diff options
349 files changed, 12930 insertions, 2847 deletions
@@ -164,6 +164,8 @@ Casey Connolly <casey.connolly@linaro.org> <caleb@connolly.tech> Casey Connolly <casey.connolly@linaro.org> <caleb@postmarketos.org> Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org> Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org> +Carl Vanderlip <carl.vanderlip@oss.qualcomm.com> <carlv@codeaurora.org> +Carl Vanderlip <carl.vanderlip@oss.qualcomm.com> <quic_carlv@quicinc.com> Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao@amd.com> Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao.osdev@gmail.com> Carlos Bilbao <carlos.bilbao@kernel.org> <bilbao@vt.edu> diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block index 803f578dc023..0ddffc9133d0 100644 --- a/Documentation/ABI/stable/sysfs-block +++ b/Documentation/ABI/stable/sysfs-block @@ -731,7 +731,7 @@ Contact: linux-block@vger.kernel.org Description: [RW] If the device is registered for writeback throttling, then this file shows the target minimum read latency. If this latency - is exceeded in a given window of time (see wb_window_usec), then + is exceeded in a given window of time (see curr_win_nsec), then the writeback throttling will start scaling back writes. Writing a value of '0' to this file disables the feature. Writing a value of '-1' to this file resets the value to the default diff --git a/Documentation/accel/index.rst b/Documentation/accel/index.rst index bc85f26533d8..d8fa332d60a8 100644 --- a/Documentation/accel/index.rst +++ b/Documentation/accel/index.rst @@ -10,6 +10,7 @@ Compute Accelerators introduction amdxdna/index qaic/index + rocket/index .. only:: subproject and html diff --git a/Documentation/accel/rocket/index.rst b/Documentation/accel/rocket/index.rst new file mode 100644 index 000000000000..70f97bccf100 --- /dev/null +++ b/Documentation/accel/rocket/index.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +===================================== + accel/rocket Rockchip NPU driver +===================================== + +The accel/rocket driver supports the Neural Processing Units (NPUs) inside some +Rockchip SoCs such as the RK3588. Rockchip calls it RKNN and sometimes RKNPU. + +The hardware is described in chapter 36 in the RK3588 TRM. + +This driver just powers the hardware on and off, allocates and maps buffers to +the device and submits jobs to the frontend unit. Everything else is done in +userspace, as a Gallium driver (also called rocket) that is part of the Mesa3D +project. + +Hardware currently supported: + +* RK3588 diff --git a/Documentation/admin-guide/blockdev/zoned_loop.rst b/Documentation/admin-guide/blockdev/zoned_loop.rst index 9c7aa3b482f3..64dcfde7450a 100644 --- a/Documentation/admin-guide/blockdev/zoned_loop.rst +++ b/Documentation/admin-guide/blockdev/zoned_loop.rst @@ -79,7 +79,7 @@ zone_capacity_mb Device zone capacity (must always be equal to or lower than the zone size. Default: zone size. conv_zones Total number of conventioanl zones starting from sector 0. Default: 8. -base_dir Path to the base directoy where to create the directory +base_dir Path to the base directory where to create the directory containing the zone files of the device. Default=/var/local/zloop. The device directory containing the zone files is always diff --git a/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst b/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst index b4de16f5ec44..6dd0800146f6 100644 --- a/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst +++ b/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst @@ -214,7 +214,7 @@ Spectre_v1 X Spectre_v2 X X Spectre_v2_user X X * (Note 1) SRBDS X X X X -SRSO X X +SRSO X X X X SSB (Note 4) TAA X X X X * (Note 2) TSA X X X X diff --git a/Documentation/devicetree/bindings/display/bridge/solomon,ssd2825.yaml b/Documentation/devicetree/bindings/display/bridge/solomon,ssd2825.yaml new file mode 100644 index 000000000000..e2d293d623b8 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/solomon,ssd2825.yaml @@ -0,0 +1,141 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/solomon,ssd2825.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Solomon SSD2825 RGB to MIPI-DSI bridge + +maintainers: + - Svyatoslav Ryhel <clamor95@gmail.com> + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + const: solomon,ssd2825 + + reg: + maxItems: 1 + + reset-gpios: true + + dvdd-supply: + description: Regulator for 1.2V digital power supply. + + avdd-supply: + description: Regulator for 1.2V analog power supply. + + vddio-supply: + description: Regulator for 1.8V IO power supply. + + spi-max-frequency: + maximum: 1000000 + + spi-cpha: true + spi-cpol: true + + clocks: + maxItems: 1 + description: Reference TX_CLK used before PLL is locked. + + solomon,hs-zero-delay-ns: + description: + HS zero delay period + minimum: 0 + maximum: 1700 + default: 133 + + solomon,hs-prep-delay-ns: + description: + HS prep delay period + minimum: 0 + maximum: 1728 + default: 40 + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/$defs/port-base + unevaluatedProperties: false + description: + Video port for RGB input + + properties: + endpoint: + $ref: /schemas/graph.yaml#/$defs/endpoint-base + unevaluatedProperties: false + + properties: + bus-width: + enum: [ 16, 18, 24 ] + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: + Video port for DSI output (panel or connector) + + required: + - port@0 + - port@1 + +required: + - compatible + - ports + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + spi { + #address-cells = <1>; + #size-cells = <0>; + + dsi@2 { + compatible = "solomon,ssd2825"; + reg = <2>; + + spi-max-frequency = <1000000>; + + spi-cpha; + spi-cpol; + + reset-gpios = <&gpio 114 GPIO_ACTIVE_LOW>; + + dvdd-supply = <&vdd_1v2>; + avdd-supply = <&vdd_1v2>; + vddio-supply = <&vdd_1v8_io>; + + solomon,hs-zero-delay-ns = <300>; + solomon,hs-prep-delay-ns = <65>; + + clocks = <&ssd2825_tx_clk>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + bridge_input: endpoint { + remote-endpoint = <&dpi_output>; + bus-width = <24>; + }; + }; + + port@1 { + reg = <1>; + + bridge_output: endpoint { + remote-endpoint = <&panel_input>; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/bridge/waveshare,dsi2dpi.yaml b/Documentation/devicetree/bindings/display/bridge/waveshare,dsi2dpi.yaml new file mode 100644 index 000000000000..5e8498c8303d --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/waveshare,dsi2dpi.yaml @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/waveshare,dsi2dpi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Waveshare MIPI-DSI to DPI Converter bridge + +maintainers: + - Joseph Guo <qijian.guo@nxp.com> + +description: + Waveshare bridge board is part of Waveshare panel which converts DSI to DPI. + +properties: + compatible: + const: waveshare,dsi2dpi + + reg: + maxItems: 1 + description: base I2C address of the device + + power-supply: true + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/$defs/port-base + unevaluatedProperties: false + description: + Video port for MIPI DSI input + + properties: + endpoint: + $ref: /schemas/media/video-interfaces.yaml# + unevaluatedProperties: false + + properties: + data-lanes: + description: array of physical DSI data lane indexes. + items: + - const: 1 + - const: 2 + + required: + - data-lanes + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: + Video port for MIPI DPI output panel. + + required: + - port@0 + - port@1 + +required: + - compatible + - reg + - ports + - power-supply + +additionalProperties: false + +examples: + - | + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + bridge@45 { + compatible = "waveshare,dsi2dpi"; + reg = <0x45>; + power-supply = <®_3p3v>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + waveshare_from_dsim: endpoint { + data-lanes = <1 2>; + remote-endpoint = <&dsim_to_waveshare>; + }; + }; + + port@1 { + reg = <1>; + + waveshare_to_panel: endpoint { + remote-endpoint = <&panel_to_waveshare>; + }; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/hydis,hv101hd1.yaml b/Documentation/devicetree/bindings/display/panel/hydis,hv101hd1.yaml new file mode 100644 index 000000000000..f429e84ee65d --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/hydis,hv101hd1.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/hydis,hv101hd1.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Hydis HV101HD1 DSI Display Panel + +maintainers: + - Svyatoslav Ryhel <clamor95@gmail.com> + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: hydis,hv101hd1 + + reg: + maxItems: 1 + + vdd-supply: true + vio-supply: true + + backlight: true + port: true + +required: + - compatible + - vdd-supply + - vio-supply + - backlight + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "hydis,hv101hd1"; + reg = <0>; + + vdd-supply = <&vdd_lcd>; + vio-supply = <&vddio_lcd>; + + backlight = <&backlight>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi_out>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index 1ac1f0219079..5e8dc9afa1fd 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -236,6 +236,8 @@ properties: - okaya,rs800480t-7x0gp # Olimex 4.3" TFT LCD panel - olimex,lcd-olinuxino-43-ts + # Olimex 5.0" TFT LCD panel + - olimex,lcd-olinuxino-5-cts # On Tat Industrial Company 5" DPI TFT panel. - ontat,kd50g21-40nt-a1 # On Tat Industrial Company 7" DPI TFT panel. @@ -321,6 +323,10 @@ properties: - vivax,tpc9150-panel # VXT 800x480 color TFT LCD panel - vxt,vl050-8048nt-c01 + # Waveshare 13.3" FHD (1920x1080) LCD panel + - waveshare,13.3inch-panel + # Waveshare 7.0" WSVGA (1024x600) LCD panel + - waveshare,7.0inch-c-panel # Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel - winstar,wf35ltiacd # Yes Optoelectronics YTC700TLAG-05-201C 7" TFT LCD panel diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml index e36659340ef3..ccb574caed28 100644 --- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml +++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml @@ -21,6 +21,10 @@ properties: - enum: # Samsung 13" 3K (2880×1920 pixels) eDP AMOLED panel - samsung,atna30dw01 + # Samsung 14" FHD+ (1920x1200 pixels) eDP AMOLED panel + - samsung,atna40ct06 + # Samsung 14" WQXGA+ (2880x1800 pixels) eDP AMOLED panel + - samsung,atna40cu11 # Samsung 14" WQXGA+ (2880×1800 pixels) eDP AMOLED panel - samsung,atna40yk20 # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa5x01-ams561ra01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa5x01-ams561ra01.yaml new file mode 100644 index 000000000000..eccfc66d7fe2 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e8aa5x01-ams561ra01.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/samsung,s6e8aa5x01-ams561ra01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Samsung AMS561RA01 panel with S6E8AA5X01 controller + +maintainers: + - Kaustabh Chakraborty <kauschluss@disroot.org> + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: samsung,s6e8aa5x01-ams561ra01 + + reg: + maxItems: 1 + + vdd-supply: + description: core voltage supply + + vci-supply: + description: voltage supply for analog circuits + + reset-gpios: true + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "samsung,s6e8aa5x01-ams561ra01"; + reg = <0>; + + vdd-supply = <&panel_vdd_reg>; + vci-supply = <&panel_vci_reg>; + + reset-gpios = <&gpd3 4 GPIO_ACTIVE_HIGH>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml b/Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml index 6d9de3303762..b3492a9aa4ef 100644 --- a/Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml +++ b/Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml @@ -62,11 +62,13 @@ properties: items: - description: GMAC main clock - description: Peripheral registers interface clock + - description: APB glue registers interface clock clock-names: items: - const: stmmaceth - const: pclk + - const: apb interrupts: items: @@ -88,8 +90,8 @@ examples: compatible = "thead,th1520-gmac", "snps,dwmac-3.70a"; reg = <0xe7070000 0x2000>, <0xec003000 0x1000>; reg-names = "dwmac", "apb"; - clocks = <&clk 1>, <&clk 2>; - clock-names = "stmmaceth", "pclk"; + clocks = <&clk 1>, <&clk 2>, <&clk 3>; + clock-names = "stmmaceth", "pclk", "apb"; interrupts = <66>; interrupt-names = "macirq"; phy-mode = "rgmii-id"; diff --git a/Documentation/devicetree/bindings/npu/rockchip,rk3588-rknn-core.yaml b/Documentation/devicetree/bindings/npu/rockchip,rk3588-rknn-core.yaml new file mode 100644 index 000000000000..caca2a4903cd --- /dev/null +++ b/Documentation/devicetree/bindings/npu/rockchip,rk3588-rknn-core.yaml @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/npu/rockchip,rk3588-rknn-core.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Neural Processing Unit IP from Rockchip + +maintainers: + - Tomeu Vizoso <tomeu@tomeuvizoso.net> + +description: + Rockchip IP for accelerating inference of neural networks. + + There is to be a node per each NPU core in the SoC, and each core should reference all the + resources that it needs to function, such as clocks, power domains, and resets. + +properties: + $nodename: + pattern: '^npu@[a-f0-9]+$' + + compatible: + enum: + - rockchip,rk3588-rknn-core + + reg: + maxItems: 3 + + reg-names: + items: + - const: pc # Program Control-related registers + - const: cna # Convolution Neural Network Accelerator registers + - const: core # Main NPU core processing unit registers + + clocks: + maxItems: 4 + + clock-names: + items: + - const: aclk + - const: hclk + - const: npu + - const: pclk + + interrupts: + maxItems: 1 + + iommus: + maxItems: 1 + + npu-supply: true + + power-domains: + maxItems: 1 + + resets: + maxItems: 2 + + reset-names: + items: + - const: srst_a + - const: srst_h + + sram-supply: true + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - interrupts + - iommus + - power-domains + - resets + - reset-names + - npu-supply + - sram-supply + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/rockchip,rk3588-cru.h> + #include <dt-bindings/interrupt-controller/irq.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/power/rk3588-power.h> + #include <dt-bindings/reset/rockchip,rk3588-cru.h> + + bus { + #address-cells = <2>; + #size-cells = <2>; + + npu@fdab0000 { + compatible = "rockchip,rk3588-rknn-core"; + reg = <0x0 0xfdab0000 0x0 0x1000>, + <0x0 0xfdab1000 0x0 0x1000>, + <0x0 0xfdab3000 0x0 0x1000>; + reg-names = "pc", "cna", "core"; + clocks = <&cru ACLK_NPU0>, <&cru HCLK_NPU0>, + <&scmi_clk SCMI_CLK_NPU>, <&cru PCLK_NPU_ROOT>; + clock-names = "aclk", "hclk", "npu", "pclk"; + interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>; + iommus = <&rknn_mmu_0>; + npu-supply = <&vdd_npu_s0>; + power-domains = <&power RK3588_PD_NPUTOP>; + resets = <&cru SRST_A_RKNN0>, <&cru SRST_H_RKNN0>; + reset-names = "srst_a", "srst_h"; + sram-supply = <&vdd_npu_mem_s0>; + }; + }; +... diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index be8637da3fe9..92db80793bba 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -497,19 +497,19 @@ Contact: Douglas Anderson <dianders@chromium.org> Level: Intermediate -Transition away from using mipi_dsi_*_write_seq() -------------------------------------------------- - -The macros mipi_dsi_generic_write_seq() and mipi_dsi_dcs_write_seq() are -non-intuitive because, if there are errors, they return out of the *caller's* -function. We should move all callers to use mipi_dsi_generic_write_seq_multi() -and mipi_dsi_dcs_write_seq_multi() macros instead. - -Once all callers are transitioned, the macros and the functions that they call, -mipi_dsi_generic_write_chatty() and mipi_dsi_dcs_write_buffer_chatty(), can -probably be removed. Alternatively, if people feel like the _multi() variants -are overkill for some use cases, we could keep the mipi_dsi_*_write_seq() -variants but change them not to return out of the caller. +Transition away from using deprecated MIPI DSI functions +-------------------------------------------------------- + +There are many functions defined in ``drm_mipi_dsi.c`` which have been +deprecated. Each deprecated function was deprecated in favor of its `multi` +variant (e.g. `mipi_dsi_generic_write()` and `mipi_dsi_generic_write_multi()`). +The `multi` variant of a function includes improved error handling and logic +which makes it more convenient to make several calls in a row, as most MIPI +drivers do. + +Drivers should be updated to use undeprecated functions. Once all usages of the +deprecated MIPI DSI functions have been removed, their definitions may be +removed from ``drm_mipi_dsi.c``. Contact: Douglas Anderson <dianders@chromium.org> diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index bb620f554598..9756d16e3df1 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -1420,7 +1420,7 @@ udp_hash_entries - INTEGER A negative value means the networking namespace does not own its hash buckets and shares the initial networking namespace's one. -udp_child_ehash_entries - INTEGER +udp_child_hash_entries - INTEGER Control the number of hash buckets for UDP sockets in the child networking namespace, which must be set before clone() or unshare(). diff --git a/MAINTAINERS b/MAINTAINERS index b97760467f09..62298a601bbe 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7494,12 +7494,23 @@ F: include/linux/power/smartreflex.h DRM ACCEL DRIVERS FOR INTEL VPU M: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> M: Maciej Falkowski <maciej.falkowski@linux.intel.com> +M: Karol Wachowski <karol.wachowski@linux.intel.com> L: dri-devel@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: drivers/accel/ivpu/ F: include/uapi/drm/ivpu_accel.h +DRM ACCEL DRIVER FOR ROCKCHIP NPU +M: Tomeu Vizoso <tomeu@tomeuvizoso.net> +L: dri-devel@lists.freedesktop.org +S: Supported +T: git https://gitlab.freedesktop.org/drm/misc/kernel.git +F: Documentation/accel/rocket/ +F: Documentation/devicetree/bindings/npu/rockchip,rknn-core.yaml +F: drivers/accel/rocket/ +F: include/uapi/drm/rocket_accel.h + DRM COMPUTE ACCELERATORS DRIVERS AND FRAMEWORK M: Oded Gabbay <ogabbay@kernel.org> L: dri-devel@lists.freedesktop.org @@ -11442,6 +11453,7 @@ F: drivers/tty/hvc/ HUNG TASK DETECTOR M: Andrew Morton <akpm@linux-foundation.org> R: Lance Yang <lance.yang@linux.dev> +R: Masami Hiramatsu <mhiramat@kernel.org> L: linux-kernel@vger.kernel.org S: Maintained F: include/linux/hung_task.h @@ -12587,10 +12599,9 @@ S: Supported F: drivers/cpufreq/intel_pstate.c INTEL PTP DFL ToD DRIVER -M: Tianfei Zhang <tianfei.zhang@intel.com> L: linux-fpga@vger.kernel.org L: netdev@vger.kernel.org -S: Maintained +S: Orphan F: drivers/ptp/ptp_dfl_tod.c INTEL QUADRATURE ENCODER PERIPHERAL DRIVER @@ -12728,9 +12739,8 @@ S: Maintained F: drivers/platform/x86/intel/wmi/thunderbolt.c INTEL WWAN IOSM DRIVER -M: M Chetan Kumar <m.chetan.kumar@intel.com> L: netdev@vger.kernel.org -S: Maintained +S: Orphan F: drivers/net/wwan/iosm/ INTEL(R) FLEXIBLE RETURN AND EVENT DELIVERY @@ -13690,7 +13700,6 @@ F: scripts/Makefile.kmsan KPROBES M: Naveen N Rao <naveen@kernel.org> -M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> M: "David S. Miller" <davem@davemloft.net> M: Masami Hiramatsu <mhiramat@kernel.org> L: linux-kernel@vger.kernel.org @@ -15678,7 +15687,6 @@ MEDIATEK T7XX 5G WWAN MODEM DRIVER M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com> R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com> R: Liu Haijun <haijun.liu@mediatek.com> -R: M Chetan Kumar <m.chetan.kumar@linux.intel.com> R: Ricardo Martinez <ricardo.martinez@linux.intel.com> L: netdev@vger.kernel.org S: Supported @@ -17455,6 +17463,7 @@ F: drivers/net/ethernet/neterion/ NETFILTER M: Pablo Neira Ayuso <pablo@netfilter.org> M: Jozsef Kadlecsik <kadlec@netfilter.org> +M: Florian Westphal <fw@strlen.de> L: netfilter-devel@vger.kernel.org L: coreteam@netfilter.org S: Maintained @@ -20667,7 +20676,7 @@ F: include/dt-bindings/clock/qcom,* QUALCOMM CLOUD AI (QAIC) DRIVER M: Jeff Hugo <jeff.hugo@oss.qualcomm.com> -R: Carl Vanderlip <quic_carlv@quicinc.com> +R: Carl Vanderlip <carl.vanderlip@oss.qualcomm.com> L: linux-arm-msm@vger.kernel.org L: dri-devel@lists.freedesktop.org S: Supported @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 17 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi index 42724bf7e90e..03f1d7319049 100644 --- a/arch/riscv/boot/dts/thead/th1520.dtsi +++ b/arch/riscv/boot/dts/thead/th1520.dtsi @@ -297,8 +297,9 @@ reg-names = "dwmac", "apb"; interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "macirq"; - clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>; - clock-names = "stmmaceth", "pclk"; + clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>, + <&clk CLK_PERISYS_APB4_HCLK>; + clock-names = "stmmaceth", "pclk", "apb"; snps,pbl = <32>; snps,fixed-burst; snps,multicast-filter-bins = <64>; @@ -319,8 +320,9 @@ reg-names = "dwmac", "apb"; interrupts = <66 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "macirq"; - clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>; - clock-names = "stmmaceth", "pclk"; + clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>, + <&clk CLK_PERISYS_APB4_HCLK>; + clock-names = "stmmaceth", "pclk", "apb"; snps,pbl = <32>; snps,fixed-burst; snps,multicast-filter-bins = <64>; diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c index 916bac09b464..63e037e94e4c 100644 --- a/arch/x86/boot/cpuflags.c +++ b/arch/x86/boot/cpuflags.c @@ -106,5 +106,18 @@ void get_cpuflags(void) cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], &cpu.flags[1]); } + + if (max_amd_level >= 0x8000001f) { + u32 ebx; + + /* + * The X86_FEATURE_COHERENCY_SFW_NO feature bit is in + * the virtualization flags entry (word 8) and set by + * scattered.c, so the bit needs to be explicitly set. + */ + cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored); + if (ebx & BIT(31)) + set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags); + } } } diff --git a/arch/x86/boot/startup/sev-shared.c b/arch/x86/boot/startup/sev-shared.c index 7a706db87b93..a34cd19796f9 100644 --- a/arch/x86/boot/startup/sev-shared.c +++ b/arch/x86/boot/startup/sev-shared.c @@ -785,6 +785,7 @@ static void __head svsm_pval_4k_page(unsigned long paddr, bool validate) pc->entry[0].page_size = RMP_PG_SIZE_4K; pc->entry[0].action = validate; pc->entry[0].ignore_cf = 0; + pc->entry[0].rsvd = 0; pc->entry[0].pfn = paddr >> PAGE_SHIFT; /* Protocol 0, Call ID 1 */ @@ -810,6 +811,13 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, if (ret) sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); } + + /* + * If validating memory (making it private) and affected by the + * cache-coherency vulnerability, perform the cache eviction mitigation. + */ + if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO)) + sev_evict_cache((void *)vaddr, 1); } /* diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index fc59ce78c477..14ef5908fb27 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -227,6 +227,7 @@ static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action, pe->page_size = RMP_PG_SIZE_4K; pe->action = action; pe->ignore_cf = 0; + pe->rsvd = 0; pe->pfn = pfn; pe++; @@ -257,6 +258,7 @@ static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int d pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; pe->ignore_cf = 0; + pe->rsvd = 0; pe->pfn = e->gfn; pe++; @@ -358,10 +360,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc) static void pvalidate_pages(struct snp_psc_desc *desc) { + struct psc_entry *e; + unsigned int i; + if (snp_vmpl) svsm_pval_pages(desc); else pval_pages(desc); + + /* + * If not affected by the cache-coherency vulnerability there is no need + * to perform the cache eviction mitigation. + */ + if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO)) + return; + + for (i = 0; i <= desc->hdr.end_entry; i++) { + e = &desc->entries[i]; + + /* + * If validating memory (making it private) perform the cache + * eviction mitigation. + */ + if (e->operation == SNP_PAGE_STATE_PRIVATE) + sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1); + } } static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc) diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c index faf1fce89ed4..c3b4acbde0d8 100644 --- a/arch/x86/coco/sev/vc-handle.c +++ b/arch/x86/coco/sev/vc-handle.c @@ -371,29 +371,30 @@ static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write) * executing with Secure TSC enabled, so special handling is required for * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ. */ -static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write) +static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool write) { + struct pt_regs *regs = ctxt->regs; u64 tsc; /* - * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled. - * Terminate the SNP guest when the interception is enabled. + * Writing to MSR_IA32_TSC can cause subsequent reads of the TSC to + * return undefined values, and GUEST_TSC_FREQ is read-only. Generate + * a #GP on all writes. */ - if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ) - return ES_VMM_ERROR; + if (write) { + ctxt->fi.vector = X86_TRAP_GP; + ctxt->fi.error_code = 0; + return ES_EXCEPTION; + } /* - * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC - * to return undefined values, so ignore all writes. - * - * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use - * the value returned by rdtsc_ordered(). + * GUEST_TSC_FREQ read should not be intercepted when Secure TSC is + * enabled. Terminate the guest if a read is attempted. */ - if (write) { - WARN_ONCE(1, "TSC MSR writes are verboten!\n"); - return ES_OK; - } + if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ) + return ES_VMM_ERROR; + /* Reads of MSR_IA32_TSC should return the current TSC value. */ tsc = rdtsc_ordered(); regs->ax = lower_32_bits(tsc); regs->dx = upper_32_bits(tsc); @@ -416,7 +417,7 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) case MSR_IA32_TSC: case MSR_AMD64_GUEST_TSC_FREQ: if (sev_status & MSR_AMD64_SNP_SECURE_TSC) - return __vc_handle_secure_tsc_msrs(regs, write); + return __vc_handle_secure_tsc_msrs(ctxt, write); break; default: break; diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 602957dd2609..06fc0479a23f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -218,6 +218,7 @@ #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */ #define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */ #define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */ +#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */ #define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */ #define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */ diff --git a/arch/x86/include/asm/cpuid.h b/arch/x86/include/asm/cpuid.h deleted file mode 100644 index d5749b25fa10..000000000000 --- a/arch/x86/include/asm/cpuid.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef _ASM_X86_CPUID_H -#define _ASM_X86_CPUID_H - -#include <asm/cpuid/api.h> - -#endif /* _ASM_X86_CPUID_H */ diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 89075ff19afa..02236962fdb1 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -619,6 +619,24 @@ int rmp_make_shared(u64 pfn, enum pg_level level); void snp_leak_pages(u64 pfn, unsigned int npages); void kdump_sev_callback(void); void snp_fixup_e820_tables(void); + +static inline void sev_evict_cache(void *va, int npages) +{ + volatile u8 val __always_unused; + u8 *bytes = va; + int page_idx; + + /* + * For SEV guests, a read from the first/last cache-lines of a 4K page + * using the guest key is sufficient to cause a flush of all cache-lines + * associated with that 4K page without incurring all the overhead of a + * full CLFLUSH sequence. + */ + for (page_idx = 0; page_idx < npages; page_idx++) { + val = bytes[page_idx * PAGE_SIZE]; + val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1]; + } +} #else static inline bool snp_probe_rmptable_info(void) { return false; } static inline int snp_rmptable_init(void) { return -ENOSYS; } @@ -634,6 +652,7 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} static inline void kdump_sev_callback(void) { } static inline void snp_fixup_e820_tables(void) {} +static inline void sev_evict_cache(void *va, int npages) {} #endif #endif diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index b74bf937cd9f..2186a771b9fc 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -386,7 +386,6 @@ static bool __init should_mitigate_vuln(unsigned int bug) case X86_BUG_SPECTRE_V2: case X86_BUG_RETBLEED: - case X86_BUG_SRSO: case X86_BUG_L1TF: case X86_BUG_ITS: return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || @@ -3184,8 +3183,18 @@ static void __init srso_select_mitigation(void) } if (srso_mitigation == SRSO_MITIGATION_AUTO) { - if (should_mitigate_vuln(X86_BUG_SRSO)) { + /* + * Use safe-RET if user->kernel or guest->host protection is + * required. Otherwise the 'microcode' mitigation is sufficient + * to protect the user->user and guest->guest vectors. + */ + if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || + (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) && + !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) { srso_mitigation = SRSO_MITIGATION_SAFE_RET; + } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { + srso_mitigation = SRSO_MITIGATION_MICROCODE; } else { srso_mitigation = SRSO_MITIGATION_NONE; return; diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index b4a1f6732a3a..6b868afb26c3 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -48,6 +48,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, { X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 }, { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, + { X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 }, { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 }, diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 12ed75c1b567..28e4fd65c9da 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1881,19 +1881,20 @@ long fpu_xstate_prctl(int option, unsigned long arg2) #ifdef CONFIG_PROC_PID_ARCH_STATUS /* * Report the amount of time elapsed in millisecond since last AVX512 - * use in the task. + * use in the task. Report -1 if no AVX-512 usage. */ static void avx512_status(struct seq_file *m, struct task_struct *task) { - unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp); - long delta; + unsigned long timestamp; + long delta = -1; - if (!timestamp) { - /* - * Report -1 if no AVX512 usage - */ - delta = -1; - } else { + /* AVX-512 usage is not tracked for kernel threads. Don't report anything. */ + if (task->flags & (PF_KTHREAD | PF_USER_WORKER)) + return; + + timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp); + + if (timestamp) { delta = (long)(jiffies - timestamp); /* * Cap to LONG_MAX if time difference > LONG_MAX diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 3bf76902f07f..50e51047e1fe 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -5847,8 +5847,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, goto out; } - bfqq = kmem_cache_alloc_node(bfq_pool, - GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, + bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO, bfqd->queue->node); if (bfqq) { diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 5936db7f8475..fe9ebd6a2e14 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -394,7 +394,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, /* allocate */ if (!new_blkg) { - new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN); + new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT); if (unlikely(!new_blkg)) { ret = -ENOMEM; goto err_put_css; @@ -1467,7 +1467,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) spin_lock_init(&blkcg->lock); refcount_set(&blkcg->online_pin, 1); - INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); + INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT); INIT_HLIST_HEAD(&blkcg->blkg_list); #ifdef CONFIG_CGROUP_WRITEBACK INIT_LIST_HEAD(&blkcg->cgwb_list); @@ -1630,7 +1630,7 @@ retry: pd_prealloc = NULL; } else { pd = pol->pd_alloc_fn(disk, blkg->blkcg, - GFP_NOWAIT | __GFP_NOWARN); + GFP_NOWAIT); } if (!pd) { diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 396cded255ea..4a7f1a349998 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -847,7 +847,7 @@ static void blk_queue_release(struct kobject *kobj) /* nothing to do here, all data is associated with the parent gendisk */ } -static const struct kobj_type blk_queue_ktype = { +const struct kobj_type blk_queue_ktype = { .default_groups = blk_queue_attr_groups, .sysfs_ops = &queue_sysfs_ops, .release = blk_queue_release, @@ -875,15 +875,14 @@ int blk_register_queue(struct gendisk *disk) struct request_queue *q = disk->queue; int ret; - kobject_init(&disk->queue_kobj, &blk_queue_ktype); ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); if (ret < 0) - goto out_put_queue_kobj; + return ret; if (queue_is_mq(q)) { ret = blk_mq_sysfs_register(disk); if (ret) - goto out_put_queue_kobj; + goto out_del_queue_kobj; } mutex_lock(&q->sysfs_lock); @@ -903,9 +902,9 @@ int blk_register_queue(struct gendisk *disk) if (queue_is_mq(q)) elevator_set_default(q); - wbt_enable_default(disk); blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); + wbt_enable_default(disk); /* Now everything is ready and send out KOBJ_ADD uevent */ kobject_uevent(&disk->queue_kobj, KOBJ_ADD); @@ -934,8 +933,8 @@ out_debugfs_remove: mutex_unlock(&q->sysfs_lock); if (queue_is_mq(q)) blk_mq_sysfs_unregister(disk); -out_put_queue_kobj: - kobject_put(&disk->queue_kobj); +out_del_queue_kobj: + kobject_del(&disk->queue_kobj); return ret; } @@ -986,5 +985,4 @@ void blk_unregister_queue(struct gendisk *disk) elevator_set_none(q); blk_debugfs_remove(disk); - kobject_put(&disk->queue_kobj); } diff --git a/block/blk-wbt.c b/block/blk-wbt.c index a50d4cd55f41..eb8037bae0bd 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -85,8 +85,8 @@ struct rq_wb { u64 sync_issue; void *sync_cookie; - unsigned long last_issue; /* last non-throttled issue */ - unsigned long last_comp; /* last non-throttled comp */ + unsigned long last_issue; /* issue time of last read rq */ + unsigned long last_comp; /* completion time of last read rq */ unsigned long min_lat_nsec; struct rq_qos rqos; struct rq_wait rq_wait[WBT_NUM_RWQ]; @@ -248,13 +248,14 @@ static void wbt_done(struct rq_qos *rqos, struct request *rq) struct rq_wb *rwb = RQWB(rqos); if (!wbt_is_tracked(rq)) { - if (rwb->sync_cookie == rq) { - rwb->sync_issue = 0; - rwb->sync_cookie = NULL; - } + if (wbt_is_read(rq)) { + if (rwb->sync_cookie == rq) { + rwb->sync_issue = 0; + rwb->sync_cookie = NULL; + } - if (wbt_is_read(rq)) wb_timestamp(rwb, &rwb->last_comp); + } } else { WARN_ON_ONCE(rq == rwb->sync_cookie); __wbt_done(rqos, wbt_flags(rq)); diff --git a/block/blk.h b/block/blk.h index 0a2eccf28ca4..46f566f9b126 100644 --- a/block/blk.h +++ b/block/blk.h @@ -29,6 +29,7 @@ struct elevator_tags; /* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) +extern const struct kobj_type blk_queue_ktype; extern struct dentry *blk_debugfs_root; struct blk_flush_queue { diff --git a/block/genhd.c b/block/genhd.c index c26733f6324b..9bbc38d12792 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1303,6 +1303,7 @@ static void disk_release(struct device *dev) disk_free_zone_resources(disk); xa_destroy(&disk->part_tbl); + kobject_put(&disk->queue_kobj); disk->queue->disk = NULL; blk_put_queue(disk->queue); @@ -1486,6 +1487,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, INIT_LIST_HEAD(&disk->slave_bdevs); #endif mutex_init(&disk->rqos_state_mutex); + kobject_init(&disk->queue_kobj, &blk_queue_ktype); return disk; out_erase_part0: diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig index 5b9490367a39..bb01cebc42bf 100644 --- a/drivers/accel/Kconfig +++ b/drivers/accel/Kconfig @@ -28,5 +28,6 @@ source "drivers/accel/amdxdna/Kconfig" source "drivers/accel/habanalabs/Kconfig" source "drivers/accel/ivpu/Kconfig" source "drivers/accel/qaic/Kconfig" +source "drivers/accel/rocket/Kconfig" endif diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile index a301fb6089d4..ffc3fa588666 100644 --- a/drivers/accel/Makefile +++ b/drivers/accel/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/ obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/ obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/ obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/ +obj-$(CONFIG_DRM_ACCEL_ROCKET) += rocket/
\ No newline at end of file diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile index 0e9adf6890a0..6797dac65efa 100644 --- a/drivers/accel/amdxdna/Makefile +++ b/drivers/accel/amdxdna/Makefile @@ -15,6 +15,7 @@ amdxdna-y := \ amdxdna_mailbox_helper.o \ amdxdna_pci_drv.o \ amdxdna_sysfs.o \ + amdxdna_ubuf.o \ npu1_regs.o \ npu2_regs.o \ npu4_regs.o \ diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c index 2cff5419bd2f..420467a5325c 100644 --- a/drivers/accel/amdxdna/aie2_ctx.c +++ b/drivers/accel/amdxdna/aie2_ctx.c @@ -46,6 +46,17 @@ static void aie2_job_put(struct amdxdna_sched_job *job) kref_put(&job->refcnt, aie2_job_release); } +static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx) +{ + hwctx->old_status = hwctx->status; + hwctx->status = HWCTX_STAT_STOP; +} + +static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx) +{ + hwctx->status = hwctx->old_status; +} + /* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */ static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx, struct drm_sched_job *bad_job) @@ -89,25 +100,6 @@ out: return ret; } -void aie2_restart_ctx(struct amdxdna_client *client) -{ - struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; - - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - mutex_lock(&client->hwctx_lock); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { - if (hwctx->status != HWCTX_STAT_STOP) - continue; - - hwctx->status = hwctx->old_status; - XDNA_DBG(xdna, "Resetting %s", hwctx->name); - aie2_hwctx_restart(xdna, hwctx); - } - mutex_unlock(&client->hwctx_lock); -} - static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq) { struct dma_fence *fence, *out_fence = NULL; @@ -141,34 +133,49 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx) dma_fence_put(fence); } -void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx) +static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg) { struct amdxdna_dev *xdna = hwctx->client->xdna; + aie2_hwctx_wait_for_idle(hwctx); + aie2_hwctx_stop(xdna, hwctx, NULL); + aie2_hwctx_status_shift_stop(hwctx); + + return 0; +} + +void aie2_hwctx_suspend(struct amdxdna_client *client) +{ + struct amdxdna_dev *xdna = client->xdna; + /* * Command timeout is unlikely. But if it happens, it doesn't * break the system. aie2_hwctx_stop() will destroy mailbox * and abort all commands. */ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - aie2_hwctx_wait_for_idle(hwctx); - aie2_hwctx_stop(xdna, hwctx, NULL); - hwctx->old_status = hwctx->status; - hwctx->status = HWCTX_STAT_STOP; + amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb); } -void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx) +static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg) { struct amdxdna_dev *xdna = hwctx->client->xdna; + aie2_hwctx_status_restore(hwctx); + return aie2_hwctx_restart(xdna, hwctx); +} + +int aie2_hwctx_resume(struct amdxdna_client *client) +{ + struct amdxdna_dev *xdna = client->xdna; + /* * The resume path cannot guarantee that mailbox channel can be * regenerated. If this happen, when submit message to this * mailbox channel, error will return. */ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - hwctx->status = hwctx->old_status; - aie2_hwctx_restart(xdna, hwctx); + return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb); } static void diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c index 82412eec9a4b..9caad083543d 100644 --- a/drivers/accel/amdxdna/aie2_message.c +++ b/drivers/accel/amdxdna/aie2_message.c @@ -290,18 +290,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6 return 0; } +static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg) +{ + u32 *bitmap = arg; + + *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col); + + return 0; +} + int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled) { DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS); struct amdxdna_dev *xdna = ndev->xdna; struct amdxdna_client *client; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; dma_addr_t dma_addr; u32 aie_bitmap = 0; u8 *buff_addr; - int ret, idx; + int ret; buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr, DMA_FROM_DEVICE, GFP_KERNEL); @@ -309,12 +316,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, return -ENOMEM; /* Go through each hardware context and mark the AIE columns that are active */ - list_for_each_entry(client, &xdna->client_list, node) { - idx = srcu_read_lock(&client->hwctx_srcu); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) - aie_bitmap |= amdxdna_hwctx_col_map(hwctx); - srcu_read_unlock(&client->hwctx_srcu, idx); - } + list_for_each_entry(client, &xdna->client_list, node) + amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map); *cols_filled = 0; req.dump_buff_addr = dma_addr; diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c index c6cf7068d23c..16ac0cab4f44 100644 --- a/drivers/accel/amdxdna/aie2_pci.c +++ b/drivers/accel/amdxdna/aie2_pci.c @@ -10,6 +10,7 @@ #include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/gpu_scheduler.h> +#include <linux/cleanup.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/iommu.h> @@ -440,6 +441,40 @@ disable_dev: return ret; } +static int aie2_hw_suspend(struct amdxdna_dev *xdna) +{ + struct amdxdna_client *client; + + guard(mutex)(&xdna->dev_lock); + list_for_each_entry(client, &xdna->client_list, node) + aie2_hwctx_suspend(client); + + aie2_hw_stop(xdna); + + return 0; +} + +static int aie2_hw_resume(struct amdxdna_dev *xdna) +{ + struct amdxdna_client *client; + int ret; + + guard(mutex)(&xdna->dev_lock); + ret = aie2_hw_start(xdna); + if (ret) { + XDNA_ERR(xdna, "Start hardware failed, %d", ret); + return ret; + } + + list_for_each_entry(client, &xdna->client_list, node) { + ret = aie2_hwctx_resume(client); + if (ret) + break; + } + + return ret; +} + static int aie2_init(struct amdxdna_dev *xdna) { struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev); @@ -520,14 +555,14 @@ static int aie2_init(struct amdxdna_dev *xdna) if (!ndev->psp_hdl) { XDNA_ERR(xdna, "failed to create psp"); ret = -ENOMEM; - goto free_irq; + goto release_fw; } xdna->dev_handle = ndev; ret = aie2_hw_start(xdna); if (ret) { XDNA_ERR(xdna, "start npu failed, ret %d", ret); - goto free_irq; + goto release_fw; } ret = aie2_mgmt_fw_query(ndev); @@ -578,8 +613,6 @@ async_event_free: aie2_error_async_events_free(ndev); stop_hw: aie2_hw_stop(xdna); -free_irq: - pci_free_irq_vectors(pdev); release_fw: release_firmware(fw); @@ -588,12 +621,10 @@ release_fw: static void aie2_fini(struct amdxdna_dev *xdna) { - struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev); struct amdxdna_dev_hdl *ndev = xdna->dev_handle; aie2_hw_stop(xdna); aie2_error_async_events_free(ndev); - pci_free_irq_vectors(pdev); } static int aie2_get_aie_status(struct amdxdna_client *client, @@ -752,65 +783,56 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client, return ret; } +static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg) +{ + struct amdxdna_drm_query_hwctx __user *buf, *tmp __free(kfree) = NULL; + struct amdxdna_drm_get_info *get_info_args = arg; + + if (get_info_args->buffer_size < sizeof(*tmp)) + return -EINVAL; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp->pid = hwctx->client->pid; + tmp->context_id = hwctx->id; + tmp->start_col = hwctx->start_col; + tmp->num_col = hwctx->num_col; + tmp->command_submissions = hwctx->priv->seq; + tmp->command_completions = hwctx->priv->completed; + + buf = u64_to_user_ptr(get_info_args->buffer); + + if (copy_to_user(buf, tmp, sizeof(*tmp))) + return -EFAULT; + + get_info_args->buffer += sizeof(*tmp); + get_info_args->buffer_size -= sizeof(*tmp); + + return 0; +} + static int aie2_get_hwctx_status(struct amdxdna_client *client, struct amdxdna_drm_get_info *args) { - struct amdxdna_drm_query_hwctx __user *buf; struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_drm_query_hwctx *tmp; + struct amdxdna_drm_get_info info_args; struct amdxdna_client *tmp_client; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; - bool overflow = false; - u32 req_bytes = 0; - u32 hw_i = 0; - int ret = 0; - int idx; + int ret; drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; + info_args.buffer = args->buffer; + info_args.buffer_size = args->buffer_size; - buf = u64_to_user_ptr(args->buffer); list_for_each_entry(tmp_client, &xdna->client_list, node) { - idx = srcu_read_lock(&tmp_client->hwctx_srcu); - amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) { - req_bytes += sizeof(*tmp); - if (args->buffer_size < req_bytes) { - /* Continue iterating to get the required size */ - overflow = true; - continue; - } - - memset(tmp, 0, sizeof(*tmp)); - tmp->pid = tmp_client->pid; - tmp->context_id = hwctx->id; - tmp->start_col = hwctx->start_col; - tmp->num_col = hwctx->num_col; - tmp->command_submissions = hwctx->priv->seq; - tmp->command_completions = hwctx->priv->completed; - - if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) { - ret = -EFAULT; - srcu_read_unlock(&tmp_client->hwctx_srcu, idx); - goto out; - } - hw_i++; - } - srcu_read_unlock(&tmp_client->hwctx_srcu, idx); - } - - if (overflow) { - XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.", - args->buffer_size, req_bytes); - ret = -EINVAL; + ret = amdxdna_hwctx_walk(tmp_client, &info_args, aie2_hwctx_status_cb); + if (ret) + break; } -out: - kfree(tmp); - args->buffer_size = req_bytes; + args->buffer_size = (u32)(info_args.buffer - args->buffer); return ret; } @@ -905,8 +927,8 @@ static int aie2_set_state(struct amdxdna_client *client, const struct amdxdna_dev_ops aie2_ops = { .init = aie2_init, .fini = aie2_fini, - .resume = aie2_hw_start, - .suspend = aie2_hw_stop, + .resume = aie2_hw_resume, + .suspend = aie2_hw_suspend, .get_aie_info = aie2_get_info, .set_aie_state = aie2_set_state, .hwctx_init = aie2_hwctx_init, @@ -914,6 +936,4 @@ const struct amdxdna_dev_ops aie2_ops = { .hwctx_config = aie2_hwctx_config, .cmd_submit = aie2_cmd_submit, .hmm_invalidate = aie2_hmm_invalidate, - .hwctx_suspend = aie2_hwctx_suspend, - .hwctx_resume = aie2_hwctx_resume, }; diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h index 385914840eaa..91a8e948f82a 100644 --- a/drivers/accel/amdxdna/aie2_pci.h +++ b/drivers/accel/amdxdna/aie2_pci.h @@ -288,10 +288,9 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int aie2_hwctx_init(struct amdxdna_hwctx *hwctx); void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx); int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size); -void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx); -void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx); +void aie2_hwctx_suspend(struct amdxdna_client *client); +int aie2_hwctx_resume(struct amdxdna_client *client); int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq); void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq); -void aie2_restart_ctx(struct amdxdna_client *client); #endif /* _AIE2_PCI_H_ */ diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c index be073224bd69..4bfe4ef20550 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.c +++ b/drivers/accel/amdxdna/amdxdna_ctx.c @@ -60,32 +60,6 @@ static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx) return &fence->base; } -void amdxdna_hwctx_suspend(struct amdxdna_client *client) -{ - struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; - - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - mutex_lock(&client->hwctx_lock); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) - xdna->dev_info->ops->hwctx_suspend(hwctx); - mutex_unlock(&client->hwctx_lock); -} - -void amdxdna_hwctx_resume(struct amdxdna_client *client) -{ - struct amdxdna_dev *xdna = client->xdna; - struct amdxdna_hwctx *hwctx; - unsigned long hwctx_id; - - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - mutex_lock(&client->hwctx_lock); - amdxdna_for_each_hwctx(client, hwctx_id, hwctx) - xdna->dev_info->ops->hwctx_resume(hwctx); - mutex_unlock(&client->hwctx_lock); -} - static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx, struct srcu_struct *ss) { @@ -94,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx, synchronize_srcu(ss); /* At this point, user is not able to submit new commands */ - mutex_lock(&xdna->dev_lock); xdna->dev_info->ops->hwctx_fini(hwctx); - mutex_unlock(&xdna->dev_lock); kfree(hwctx->name); kfree(hwctx); } +int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg, + int (*walk)(struct amdxdna_hwctx *hwctx, void *arg)) +{ + struct amdxdna_hwctx *hwctx; + unsigned long hwctx_id; + int ret = 0, idx; + + idx = srcu_read_lock(&client->hwctx_srcu); + amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { + ret = walk(hwctx, arg); + if (ret) + break; + } + srcu_read_unlock(&client->hwctx_srcu, idx); + + return ret; +} + void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size) { struct amdxdna_cmd *cmd = abo->mem.kva; @@ -152,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client) struct amdxdna_hwctx *hwctx; unsigned long hwctx_id; - mutex_lock(&client->hwctx_lock); amdxdna_for_each_hwctx(client, hwctx_id, hwctx) { XDNA_DBG(client->xdna, "PID %d close HW context %d", client->pid, hwctx->id); xa_erase(&client->hwctx_xa, hwctx->id); - mutex_unlock(&client->hwctx_lock); amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu); - mutex_lock(&client->hwctx_lock); } - mutex_unlock(&client->hwctx_lock); } int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) @@ -251,6 +237,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d if (!drm_dev_enter(dev, &idx)) return -ENODEV; + mutex_lock(&xdna->dev_lock); hwctx = xa_erase(&client->hwctx_xa, args->handle); if (!hwctx) { ret = -EINVAL; @@ -267,6 +254,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle); out: + mutex_unlock(&xdna->dev_lock); drm_dev_exit(idx); return ret; } diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h index f0a4a8586d85..7cd7a55936f0 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.h +++ b/drivers/accel/amdxdna/amdxdna_ctx.h @@ -139,16 +139,10 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo) void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size); int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo); -static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx) -{ - return GENMASK(hwctx->start_col + hwctx->num_col - 1, - hwctx->start_col); -} - void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job); void amdxdna_hwctx_remove_all(struct amdxdna_client *client); -void amdxdna_hwctx_suspend(struct amdxdna_client *client); -void amdxdna_hwctx_resume(struct amdxdna_client *client); +int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg, + int (*walk)(struct amdxdna_hwctx *hwctx, void *arg)); int amdxdna_cmd_submit(struct amdxdna_client *client, u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt, diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c index 0f85a0105178..d407a36eb412 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.c +++ b/drivers/accel/amdxdna/amdxdna_gem.c @@ -18,6 +18,7 @@ #include "amdxdna_ctx.h" #include "amdxdna_gem.h" #include "amdxdna_pci_drv.h" +#include "amdxdna_ubuf.h" #define XDNA_MAX_CMD_BO_SIZE SZ_32K @@ -296,7 +297,7 @@ static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo, vma->vm_private_data = NULL; vma->vm_ops = NULL; - ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0); + ret = dma_buf_mmap(abo->dma_buf, vma, 0); if (ret) { XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret); return ret; @@ -391,10 +392,47 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = { .vunmap = drm_gem_dmabuf_vunmap, }; +static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct amdxdna_gem_obj *abo = to_xdna_obj(obj); + + iosys_map_clear(map); + + dma_resv_assert_held(obj->resv); + + if (is_import_bo(abo)) + dma_buf_vmap(abo->dma_buf, map); + else + drm_gem_shmem_object_vmap(obj, map); + + if (!map->vaddr) + return -ENOMEM; + + return 0; +} + +static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct amdxdna_gem_obj *abo = to_xdna_obj(obj); + + dma_resv_assert_held(obj->resv); + + if (is_import_bo(abo)) + dma_buf_vunmap(abo->dma_buf, map); + else + drm_gem_shmem_object_vunmap(obj, map); +} + static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) { + struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + if (abo->dma_buf) { + get_dma_buf(abo->dma_buf); + return abo->dma_buf; + } + exp_info.ops = &amdxdna_dmabuf_ops; exp_info.size = gobj->size; exp_info.flags = flags; @@ -451,8 +489,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { .pin = drm_gem_shmem_object_pin, .unpin = drm_gem_shmem_object_unpin, .get_sg_table = drm_gem_shmem_object_get_sg_table, - .vmap = drm_gem_shmem_object_vmap, - .vunmap = drm_gem_shmem_object_vunmap, + .vmap = amdxdna_gem_obj_vmap, + .vunmap = amdxdna_gem_obj_vunmap, .mmap = amdxdna_gem_obj_mmap, .vm_ops = &drm_gem_shmem_vm_ops, .export = amdxdna_gem_prime_export, @@ -494,6 +532,68 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size) return to_gobj(abo); } +static struct amdxdna_gem_obj * +amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size) +{ + struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size); + + if (IS_ERR(shmem)) + return ERR_CAST(shmem); + + shmem->map_wc = false; + return to_xdna_obj(&shmem->base); +} + +static struct amdxdna_gem_obj * +amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args) +{ + struct amdxdna_dev *xdna = to_xdna_dev(dev); + enum amdxdna_ubuf_flag flags = 0; + struct amdxdna_drm_va_tbl va_tbl; + struct drm_gem_object *gobj; + struct dma_buf *dma_buf; + + if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) { + XDNA_DBG(xdna, "Access va table failed"); + return ERR_PTR(-EINVAL); + } + + if (va_tbl.num_entries) { + if (args->type == AMDXDNA_BO_CMD) + flags |= AMDXDNA_UBUF_FLAG_MAP_DMA; + + dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries, + u64_to_user_ptr(args->vaddr + sizeof(va_tbl))); + } else { + dma_buf = dma_buf_get(va_tbl.dmabuf_fd); + } + + if (IS_ERR(dma_buf)) + return ERR_CAST(dma_buf); + + gobj = amdxdna_gem_prime_import(dev, dma_buf); + if (IS_ERR(gobj)) { + dma_buf_put(dma_buf); + return ERR_CAST(gobj); + } + + dma_buf_put(dma_buf); + + return to_xdna_obj(gobj); +} + +static struct amdxdna_gem_obj * +amdxdna_gem_create_object(struct drm_device *dev, + struct amdxdna_drm_create_bo *args) +{ + size_t aligned_sz = PAGE_ALIGN(args->size); + + if (args->vaddr) + return amdxdna_gem_create_ubuf_object(dev, args); + + return amdxdna_gem_create_shmem_object(dev, aligned_sz); +} + struct drm_gem_object * amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { @@ -545,16 +645,12 @@ amdxdna_drm_alloc_shmem(struct drm_device *dev, struct drm_file *filp) { struct amdxdna_client *client = filp->driver_priv; - struct drm_gem_shmem_object *shmem; struct amdxdna_gem_obj *abo; - shmem = drm_gem_shmem_create(dev, args->size); - if (IS_ERR(shmem)) - return ERR_CAST(shmem); - - shmem->map_wc = false; + abo = amdxdna_gem_create_object(dev, args); + if (IS_ERR(abo)) + return ERR_CAST(abo); - abo = to_xdna_obj(&shmem->base); abo->client = client; abo->type = AMDXDNA_BO_SHMEM; @@ -569,7 +665,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, struct amdxdna_client *client = filp->driver_priv; struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); - struct drm_gem_shmem_object *shmem; struct amdxdna_gem_obj *abo; int ret; @@ -586,14 +681,12 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, goto mm_unlock; } - shmem = drm_gem_shmem_create(dev, args->size); - if (IS_ERR(shmem)) { - ret = PTR_ERR(shmem); + abo = amdxdna_gem_create_object(dev, args); + if (IS_ERR(abo)) { + ret = PTR_ERR(abo); goto mm_unlock; } - shmem->map_wc = false; - abo = to_xdna_obj(&shmem->base); abo->type = AMDXDNA_BO_DEV_HEAP; abo->client = client; abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; @@ -657,7 +750,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, { struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); - struct drm_gem_shmem_object *shmem; struct amdxdna_gem_obj *abo; int ret; @@ -671,12 +763,9 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, return ERR_PTR(-EINVAL); } - shmem = drm_gem_shmem_create(dev, args->size); - if (IS_ERR(shmem)) - return ERR_CAST(shmem); - - shmem->map_wc = false; - abo = to_xdna_obj(&shmem->base); + abo = amdxdna_gem_create_object(dev, args); + if (IS_ERR(abo)) + return ERR_CAST(abo); abo->type = AMDXDNA_BO_CMD; abo->client = filp->driver_priv; @@ -691,7 +780,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, return abo; release_obj: - drm_gem_shmem_free(shmem); + drm_gem_object_put(to_gobj(abo)); return ERR_PTR(ret); } @@ -702,7 +791,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f struct amdxdna_gem_obj *abo; int ret; - if (args->flags || args->vaddr || !args->size) + if (args->flags) return -EINVAL; XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx", diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c index f2bf1d374cc7..8ef5e4f27f5e 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.c +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c @@ -81,7 +81,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp) ret = -ENODEV; goto unbind_sva; } - mutex_init(&client->hwctx_lock); init_srcu_struct(&client->hwctx_srcu); xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC); mutex_init(&client->mm_lock); @@ -116,7 +115,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp) xa_destroy(&client->hwctx_xa); cleanup_srcu_struct(&client->hwctx_srcu); - mutex_destroy(&client->hwctx_lock); mutex_destroy(&client->mm_lock); if (client->dev_heap) drm_gem_object_put(to_gobj(client->dev_heap)); @@ -142,8 +140,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id) mutex_lock(&xdna->dev_lock); list_del_init(&client->node); - mutex_unlock(&xdna->dev_lock); amdxdna_hwctx_remove_all(client); + mutex_unlock(&xdna->dev_lock); drm_dev_exit(idx); return 0; @@ -330,11 +328,8 @@ static void amdxdna_remove(struct pci_dev *pdev) struct amdxdna_client, node); while (client) { list_del_init(&client->node); - mutex_unlock(&xdna->dev_lock); - amdxdna_hwctx_remove_all(client); - mutex_lock(&xdna->dev_lock); client = list_first_entry_or_null(&xdna->client_list, struct amdxdna_client, node); } @@ -343,89 +338,29 @@ static void amdxdna_remove(struct pci_dev *pdev) mutex_unlock(&xdna->dev_lock); } -static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna) -{ - if (xdna->dev_info->ops->suspend) - xdna->dev_info->ops->suspend(xdna); - - return 0; -} - -static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna) -{ - if (xdna->dev_info->ops->resume) - return xdna->dev_info->ops->resume(xdna); - - return 0; -} - static int amdxdna_pmops_suspend(struct device *dev) { struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); - struct amdxdna_client *client; - - mutex_lock(&xdna->dev_lock); - list_for_each_entry(client, &xdna->client_list, node) - amdxdna_hwctx_suspend(client); - amdxdna_dev_suspend_nolock(xdna); - mutex_unlock(&xdna->dev_lock); + if (!xdna->dev_info->ops->suspend) + return -EOPNOTSUPP; - return 0; + return xdna->dev_info->ops->suspend(xdna); } static int amdxdna_pmops_resume(struct device *dev) { struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); - struct amdxdna_client *client; - int ret; - XDNA_INFO(xdna, "firmware resuming..."); - mutex_lock(&xdna->dev_lock); - ret = amdxdna_dev_resume_nolock(xdna); - if (ret) { - XDNA_ERR(xdna, "resume NPU firmware failed"); - mutex_unlock(&xdna->dev_lock); - return ret; - } - - XDNA_INFO(xdna, "hardware context resuming..."); - list_for_each_entry(client, &xdna->client_list, node) - amdxdna_hwctx_resume(client); - mutex_unlock(&xdna->dev_lock); - - return 0; -} - -static int amdxdna_rpmops_suspend(struct device *dev) -{ - struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); - int ret; - - mutex_lock(&xdna->dev_lock); - ret = amdxdna_dev_suspend_nolock(xdna); - mutex_unlock(&xdna->dev_lock); - - XDNA_DBG(xdna, "Runtime suspend done ret: %d", ret); - return ret; -} - -static int amdxdna_rpmops_resume(struct device *dev) -{ - struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); - int ret; - - mutex_lock(&xdna->dev_lock); - ret = amdxdna_dev_resume_nolock(xdna); - mutex_unlock(&xdna->dev_lock); + if (!xdna->dev_info->ops->resume) + return -EOPNOTSUPP; - XDNA_DBG(xdna, "Runtime resume done ret: %d", ret); - return ret; + return xdna->dev_info->ops->resume(xdna); } static const struct dev_pm_ops amdxdna_pm_ops = { SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume) - RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL) + RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL) }; static struct pci_driver amdxdna_pci_driver = { diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h index ab79600911aa..b6b3b424d1d5 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.h +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h @@ -50,13 +50,11 @@ struct amdxdna_dev_ops { int (*init)(struct amdxdna_dev *xdna); void (*fini)(struct amdxdna_dev *xdna); int (*resume)(struct amdxdna_dev *xdna); - void (*suspend)(struct amdxdna_dev *xdna); + int (*suspend)(struct amdxdna_dev *xdna); int (*hwctx_init)(struct amdxdna_hwctx *hwctx); void (*hwctx_fini)(struct amdxdna_hwctx *hwctx); int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size); void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq); - void (*hwctx_suspend)(struct amdxdna_hwctx *hwctx); - void (*hwctx_resume)(struct amdxdna_hwctx *hwctx); int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq); int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args); int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args); @@ -118,8 +116,6 @@ struct amdxdna_device_id { struct amdxdna_client { struct list_head node; pid_t pid; - struct mutex hwctx_lock; /* protect hwctx */ - /* do NOT wait this srcu when hwctx_lock is held */ struct srcu_struct hwctx_srcu; struct xarray hwctx_xa; u32 next_hwctxid; diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c new file mode 100644 index 000000000000..077b2261cf2a --- /dev/null +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025, Advanced Micro Devices, Inc. + */ + +#include <drm/amdxdna_accel.h> +#include <drm/drm_device.h> +#include <drm/drm_print.h> +#include <linux/dma-buf.h> +#include <linux/pagemap.h> +#include <linux/vmalloc.h> + +#include "amdxdna_pci_drv.h" +#include "amdxdna_ubuf.h" + +struct amdxdna_ubuf_priv { + struct page **pages; + u64 nr_pages; + enum amdxdna_ubuf_flag flags; + struct mm_struct *mm; +}; + +static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach, + enum dma_data_direction direction) +{ + struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv; + struct sg_table *sg; + int ret; + + sg = kzalloc(sizeof(*sg), GFP_KERNEL); + if (!sg) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0, + ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL); + if (ret) + return ERR_PTR(ret); + + if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) { + ret = dma_map_sgtable(attach->dev, sg, direction, 0); + if (ret) + return ERR_PTR(ret); + } + + return sg; +} + +static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach, + struct sg_table *sg, + enum dma_data_direction direction) +{ + struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv; + + if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) + dma_unmap_sgtable(attach->dev, sg, direction, 0); + + sg_free_table(sg); + kfree(sg); +} + +static void amdxdna_ubuf_release(struct dma_buf *dbuf) +{ + struct amdxdna_ubuf_priv *ubuf = dbuf->priv; + + unpin_user_pages(ubuf->pages, ubuf->nr_pages); + kvfree(ubuf->pages); + atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm); + mmdrop(ubuf->mm); + kfree(ubuf); +} + +static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct amdxdna_ubuf_priv *ubuf; + unsigned long pfn; + pgoff_t pgoff; + + ubuf = vma->vm_private_data; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + + pfn = page_to_pfn(ubuf->pages[pgoff]); + return vmf_insert_pfn(vma, vmf->address, pfn); +} + +static const struct vm_operations_struct amdxdna_ubuf_vm_ops = { + .fault = amdxdna_ubuf_vm_fault, +}; + +static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) +{ + struct amdxdna_ubuf_priv *ubuf = dbuf->priv; + + vma->vm_ops = &amdxdna_ubuf_vm_ops; + vma->vm_private_data = ubuf; + vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + + return 0; +} + +static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map) +{ + struct amdxdna_ubuf_priv *ubuf = dbuf->priv; + void *kva; + + kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL); + if (!kva) + return -EINVAL; + + iosys_map_set_vaddr(map, kva); + return 0; +} + +static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map) +{ + vunmap(map->vaddr); +} + +static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = { + .map_dma_buf = amdxdna_ubuf_map, + .unmap_dma_buf = amdxdna_ubuf_unmap, + .release = amdxdna_ubuf_release, + .mmap = amdxdna_ubuf_mmap, + .vmap = amdxdna_ubuf_vmap, + .vunmap = amdxdna_ubuf_vunmap, +}; + +struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev, + enum amdxdna_ubuf_flag flags, + u32 num_entries, void __user *va_entries) +{ + struct amdxdna_dev *xdna = to_xdna_dev(dev); + unsigned long lock_limit, new_pinned; + struct amdxdna_drm_va_entry *va_ent; + struct amdxdna_ubuf_priv *ubuf; + u32 npages, start = 0; + struct dma_buf *dbuf; + int i, ret; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + if (!can_do_mlock()) + return ERR_PTR(-EPERM); + + ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL); + if (!ubuf) + return ERR_PTR(-ENOMEM); + + ubuf->flags = flags; + ubuf->mm = current->mm; + mmgrab(ubuf->mm); + + va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL); + if (!va_ent) { + ret = -ENOMEM; + goto free_ubuf; + } + + if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) { + XDNA_DBG(xdna, "Access va entries failed"); + ret = -EINVAL; + goto free_ent; + } + + for (i = 0, exp_info.size = 0; i < num_entries; i++) { + if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) || + !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) { + XDNA_ERR(xdna, "Invalid address or len %llx, %llx", + va_ent[i].vaddr, va_ent[i].len); + ret = -EINVAL; + goto free_ent; + } + + exp_info.size += va_ent[i].len; + } + + ubuf->nr_pages = exp_info.size >> PAGE_SHIFT; + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm); + if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { + XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d", + new_pinned, lock_limit, capable(CAP_IPC_LOCK)); + ret = -ENOMEM; + goto sub_pin_cnt; + } + + ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL); + if (!ubuf->pages) { + ret = -ENOMEM; + goto sub_pin_cnt; + } + + for (i = 0; i < num_entries; i++) { + npages = va_ent[i].len >> PAGE_SHIFT; + + ret = pin_user_pages_fast(va_ent[i].vaddr, npages, + FOLL_WRITE | FOLL_LONGTERM, + &ubuf->pages[start]); + if (ret < 0 || ret != npages) { + ret = -ENOMEM; + XDNA_ERR(xdna, "Failed to pin pages ret %d", ret); + goto destroy_pages; + } + + start += ret; + } + + exp_info.ops = &amdxdna_ubuf_dmabuf_ops; + exp_info.priv = ubuf; + exp_info.flags = O_RDWR | O_CLOEXEC; + + dbuf = dma_buf_export(&exp_info); + if (IS_ERR(dbuf)) { + ret = PTR_ERR(dbuf); + goto destroy_pages; + } + kvfree(va_ent); + + return dbuf; + +destroy_pages: + if (start) + unpin_user_pages(ubuf->pages, start); + kvfree(ubuf->pages); +sub_pin_cnt: + atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm); +free_ent: + kvfree(va_ent); +free_ubuf: + mmdrop(ubuf->mm); + kfree(ubuf); + return ERR_PTR(ret); +} diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.h b/drivers/accel/amdxdna/amdxdna_ubuf.h new file mode 100644 index 000000000000..e5cb3bdb3ec9 --- /dev/null +++ b/drivers/accel/amdxdna/amdxdna_ubuf.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2025, Advanced Micro Devices, Inc. + */ +#ifndef _AMDXDNA_UBUF_H_ +#define _AMDXDNA_UBUF_H_ + +#include <drm/drm_device.h> +#include <linux/dma-buf.h> + +enum amdxdna_ubuf_flag { + AMDXDNA_UBUF_FLAG_MAP_DMA = 1, +}; + +struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev, + enum amdxdna_ubuf_flag flags, + u32 num_entries, void __user *va_entries); + +#endif /* _AMDXDNA_UBUF_H_ */ diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c index 601fdbe70179..61472a381904 100644 --- a/drivers/accel/habanalabs/common/memory.c +++ b/drivers/accel/habanalabs/common/memory.c @@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf) struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv; struct hl_ctx *ctx; - if (!hl_dmabuf) - return; - ctx = hl_dmabuf->ctx; if (hl_dmabuf->memhash_hnode) @@ -1859,7 +1856,12 @@ static int export_dmabuf(struct hl_ctx *ctx, { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct hl_device *hdev = ctx->hdev; - int rc, fd; + CLASS(get_unused_fd, fd)(flags); + + if (fd < 0) { + dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd); + return fd; + } exp_info.ops = &habanalabs_dmabuf_ops; exp_info.size = total_size; @@ -1872,13 +1874,6 @@ static int export_dmabuf(struct hl_ctx *ctx, return PTR_ERR(hl_dmabuf->dmabuf); } - fd = dma_buf_fd(hl_dmabuf->dmabuf, flags); - if (fd < 0) { - dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd); - rc = fd; - goto err_dma_buf_put; - } - hl_dmabuf->ctx = ctx; hl_ctx_get(hl_dmabuf->ctx); atomic_inc(&ctx->hdev->dmabuf_export_cnt); @@ -1890,13 +1885,9 @@ static int export_dmabuf(struct hl_ctx *ctx, get_file(ctx->hpriv->file_priv->filp); *dmabuf_fd = fd; + fd_install(take_fd(fd), hl_dmabuf->dmabuf->file); return 0; - -err_dma_buf_put: - hl_dmabuf->dmabuf->priv = NULL; - dma_buf_put(hl_dmabuf->dmabuf); - return rc; } static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 39f83225c181..5f00809d448a 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -141,7 +141,6 @@ ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct ivpu_ipc_rx_msg *rx_msg; lockdep_assert_held(&ipc->cons_lock); - lockdep_assert_irqs_disabled(); rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC); if (!rx_msg) { diff --git a/drivers/accel/rocket/Kconfig b/drivers/accel/rocket/Kconfig new file mode 100644 index 000000000000..43d6cd98ec8e --- /dev/null +++ b/drivers/accel/rocket/Kconfig @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_ACCEL_ROCKET + tristate "Rocket (support for Rockchip NPUs)" + depends on DRM + depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST + depends on ROCKCHIP_IOMMU || COMPILE_TEST + depends on MMU + select DRM_SCHED + select DRM_GEM_SHMEM_HELPER + help + Choose this option if you have a Rockchip SoC that contains a + compatible Neural Processing Unit (NPU), such as the RK3588. Called by + Rockchip either RKNN or RKNPU, it accelerates inference of neural + networks. + + The interface exposed to userspace is described in + include/uapi/drm/rocket_accel.h and is used by the Rocket userspace + driver in Mesa3D. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called rocket. diff --git a/drivers/accel/rocket/Makefile b/drivers/accel/rocket/Makefile new file mode 100644 index 000000000000..3713dfe223d6 --- /dev/null +++ b/drivers/accel/rocket/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_DRM_ACCEL_ROCKET) := rocket.o + +rocket-y := \ + rocket_core.o \ + rocket_device.o \ + rocket_drv.o \ + rocket_gem.o \ + rocket_job.o diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c new file mode 100644 index 000000000000..72fb5e5798fa --- /dev/null +++ b/drivers/accel/rocket/rocket_core.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dev_printk.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/iommu.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include "rocket_core.h" +#include "rocket_job.h" + +int rocket_core_init(struct rocket_core *core) +{ + struct device *dev = core->dev; + struct platform_device *pdev = to_platform_device(dev); + u32 version; + int err = 0; + + core->resets[0].id = "srst_a"; + core->resets[1].id = "srst_h"; + err = devm_reset_control_bulk_get_exclusive(&pdev->dev, ARRAY_SIZE(core->resets), + core->resets); + if (err) + return dev_err_probe(dev, err, "failed to get resets for core %d\n", core->index); + + err = devm_clk_bulk_get(dev, ARRAY_SIZE(core->clks), core->clks); + if (err) + return dev_err_probe(dev, err, "failed to get clocks for core %d\n", core->index); + + core->pc_iomem = devm_platform_ioremap_resource_byname(pdev, "pc"); + if (IS_ERR(core->pc_iomem)) { + dev_err(dev, "couldn't find PC registers %ld\n", PTR_ERR(core->pc_iomem)); + return PTR_ERR(core->pc_iomem); + } + + core->cna_iomem = devm_platform_ioremap_resource_byname(pdev, "cna"); + if (IS_ERR(core->cna_iomem)) { + dev_err(dev, "couldn't find CNA registers %ld\n", PTR_ERR(core->cna_iomem)); + return PTR_ERR(core->cna_iomem); + } + + core->core_iomem = devm_platform_ioremap_resource_byname(pdev, "core"); + if (IS_ERR(core->core_iomem)) { + dev_err(dev, "couldn't find CORE registers %ld\n", PTR_ERR(core->core_iomem)); + return PTR_ERR(core->core_iomem); + } + + dma_set_max_seg_size(dev, UINT_MAX); + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (err) + return err; + + core->iommu_group = iommu_group_get(dev); + + err = rocket_job_init(core); + if (err) + return err; + + pm_runtime_use_autosuspend(dev); + + /* + * As this NPU will be most often used as part of a media pipeline that + * ends presenting in a display, choose 50 ms (~3 frames at 60Hz) as an + * autosuspend delay as that will keep the device powered up while the + * pipeline is running. + */ + pm_runtime_set_autosuspend_delay(dev, 50); + + pm_runtime_enable(dev); + + err = pm_runtime_get_sync(dev); + if (err) { + rocket_job_fini(core); + return err; + } + + version = rocket_pc_readl(core, VERSION); + version += rocket_pc_readl(core, VERSION_NUM) & 0xffff; + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + dev_info(dev, "Rockchip NPU core %d version: %d\n", core->index, version); + + return 0; +} + +void rocket_core_fini(struct rocket_core *core) +{ + pm_runtime_dont_use_autosuspend(core->dev); + pm_runtime_disable(core->dev); + iommu_group_put(core->iommu_group); + core->iommu_group = NULL; + rocket_job_fini(core); +} + +void rocket_core_reset(struct rocket_core *core) +{ + reset_control_bulk_assert(ARRAY_SIZE(core->resets), core->resets); + + udelay(10); + + reset_control_bulk_deassert(ARRAY_SIZE(core->resets), core->resets); +} diff --git a/drivers/accel/rocket/rocket_core.h b/drivers/accel/rocket/rocket_core.h new file mode 100644 index 000000000000..f6d7382854ca --- /dev/null +++ b/drivers/accel/rocket/rocket_core.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#ifndef __ROCKET_CORE_H__ +#define __ROCKET_CORE_H__ + +#include <drm/gpu_scheduler.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/mutex_types.h> +#include <linux/reset.h> + +#include "rocket_registers.h" + +#define rocket_pc_readl(core, reg) \ + readl((core)->pc_iomem + (REG_PC_##reg)) +#define rocket_pc_writel(core, reg, value) \ + writel(value, (core)->pc_iomem + (REG_PC_##reg)) + +#define rocket_cna_readl(core, reg) \ + readl((core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS) +#define rocket_cna_writel(core, reg, value) \ + writel(value, (core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS) + +#define rocket_core_readl(core, reg) \ + readl((core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS) +#define rocket_core_writel(core, reg, value) \ + writel(value, (core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS) + +struct rocket_core { + struct device *dev; + struct rocket_device *rdev; + unsigned int index; + + int irq; + void __iomem *pc_iomem; + void __iomem *cna_iomem; + void __iomem *core_iomem; + struct clk_bulk_data clks[4]; + struct reset_control_bulk_data resets[2]; + + struct iommu_group *iommu_group; + + struct mutex job_lock; + struct rocket_job *in_flight_job; + + spinlock_t fence_lock; + + struct { + struct workqueue_struct *wq; + struct work_struct work; + atomic_t pending; + } reset; + + struct drm_gpu_scheduler sched; + u64 fence_context; + u64 emit_seqno; +}; + +int rocket_core_init(struct rocket_core *core); +void rocket_core_fini(struct rocket_core *core); +void rocket_core_reset(struct rocket_core *core); + +#endif diff --git a/drivers/accel/rocket/rocket_device.c b/drivers/accel/rocket/rocket_device.c new file mode 100644 index 000000000000..46e6ee1e72c5 --- /dev/null +++ b/drivers/accel/rocket/rocket_device.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#include <drm/drm_drv.h> +#include <linux/array_size.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/of.h> + +#include "rocket_device.h" + +struct rocket_device *rocket_device_init(struct platform_device *pdev, + const struct drm_driver *rocket_drm_driver) +{ + struct device *dev = &pdev->dev; + struct device_node *core_node; + struct rocket_device *rdev; + struct drm_device *ddev; + unsigned int num_cores = 0; + int err; + + rdev = devm_drm_dev_alloc(dev, rocket_drm_driver, struct rocket_device, ddev); + if (IS_ERR(rdev)) + return rdev; + + ddev = &rdev->ddev; + dev_set_drvdata(dev, rdev); + + for_each_compatible_node(core_node, NULL, "rockchip,rk3588-rknn-core") + if (of_device_is_available(core_node)) + num_cores++; + + rdev->cores = devm_kcalloc(dev, num_cores, sizeof(*rdev->cores), GFP_KERNEL); + if (!rdev->cores) + return ERR_PTR(-ENOMEM); + + dma_set_max_seg_size(dev, UINT_MAX); + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (err) + return ERR_PTR(err); + + err = devm_mutex_init(dev, &rdev->sched_lock); + if (err) + return ERR_PTR(-ENOMEM); + + err = drm_dev_register(ddev, 0); + if (err) + return ERR_PTR(err); + + return rdev; +} + +void rocket_device_fini(struct rocket_device *rdev) +{ + WARN_ON(rdev->num_cores > 0); + + drm_dev_unregister(&rdev->ddev); +} diff --git a/drivers/accel/rocket/rocket_device.h b/drivers/accel/rocket/rocket_device.h new file mode 100644 index 000000000000..ce662abc01d3 --- /dev/null +++ b/drivers/accel/rocket/rocket_device.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#ifndef __ROCKET_DEVICE_H__ +#define __ROCKET_DEVICE_H__ + +#include <drm/drm_device.h> +#include <linux/clk.h> +#include <linux/container_of.h> +#include <linux/iommu.h> +#include <linux/platform_device.h> + +#include "rocket_core.h" + +struct rocket_device { + struct drm_device ddev; + + struct mutex sched_lock; + + struct rocket_core *cores; + unsigned int num_cores; +}; + +struct rocket_device *rocket_device_init(struct platform_device *pdev, + const struct drm_driver *rocket_drm_driver); +void rocket_device_fini(struct rocket_device *rdev); +#define to_rocket_device(drm_dev) \ + ((struct rocket_device *)(container_of((drm_dev), struct rocket_device, ddev))) + +#endif /* __ROCKET_DEVICE_H__ */ diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c new file mode 100644 index 000000000000..5c0b63f0a8f0 --- /dev/null +++ b/drivers/accel/rocket/rocket_drv.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#include <drm/drm_accel.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem.h> +#include <drm/drm_ioctl.h> +#include <drm/rocket_accel.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/iommu.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "rocket_drv.h" +#include "rocket_gem.h" +#include "rocket_job.h" + +/* + * Facade device, used to expose a single DRM device to userspace, that + * schedules jobs to any RKNN cores in the system. + */ +static struct platform_device *drm_dev; +static struct rocket_device *rdev; + +static void +rocket_iommu_domain_destroy(struct kref *kref) +{ + struct rocket_iommu_domain *domain = container_of(kref, struct rocket_iommu_domain, kref); + + iommu_domain_free(domain->domain); + domain->domain = NULL; + kfree(domain); +} + +static struct rocket_iommu_domain* +rocket_iommu_domain_create(struct device *dev) +{ + struct rocket_iommu_domain *domain = kmalloc(sizeof(*domain), GFP_KERNEL); + void *err; + + if (!domain) + return ERR_PTR(-ENOMEM); + + domain->domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(domain->domain)) { + err = ERR_CAST(domain->domain); + kfree(domain); + return err; + } + kref_init(&domain->kref); + + return domain; +} + +struct rocket_iommu_domain * +rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv) +{ + kref_get(&rocket_priv->domain->kref); + return rocket_priv->domain; +} + +void +rocket_iommu_domain_put(struct rocket_iommu_domain *domain) +{ + kref_put(&domain->kref, rocket_iommu_domain_destroy); +} + +static int +rocket_open(struct drm_device *dev, struct drm_file *file) +{ + struct rocket_device *rdev = to_rocket_device(dev); + struct rocket_file_priv *rocket_priv; + u64 start, end; + int ret; + + if (!try_module_get(THIS_MODULE)) + return -EINVAL; + + rocket_priv = kzalloc(sizeof(*rocket_priv), GFP_KERNEL); + if (!rocket_priv) { + ret = -ENOMEM; + goto err_put_mod; + } + + rocket_priv->rdev = rdev; + rocket_priv->domain = rocket_iommu_domain_create(rdev->cores[0].dev); + if (IS_ERR(rocket_priv->domain)) { + ret = PTR_ERR(rocket_priv->domain); + goto err_free; + } + + file->driver_priv = rocket_priv; + + start = rocket_priv->domain->domain->geometry.aperture_start; + end = rocket_priv->domain->domain->geometry.aperture_end; + drm_mm_init(&rocket_priv->mm, start, end - start + 1); + mutex_init(&rocket_priv->mm_lock); + + ret = rocket_job_open(rocket_priv); + if (ret) + goto err_mm_takedown; + + return 0; + +err_mm_takedown: + mutex_destroy(&rocket_priv->mm_lock); + drm_mm_takedown(&rocket_priv->mm); + rocket_iommu_domain_put(rocket_priv->domain); +err_free: + kfree(rocket_priv); +err_put_mod: + module_put(THIS_MODULE); + return ret; +} + +static void +rocket_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct rocket_file_priv *rocket_priv = file->driver_priv; + + rocket_job_close(rocket_priv); + mutex_destroy(&rocket_priv->mm_lock); + drm_mm_takedown(&rocket_priv->mm); + rocket_iommu_domain_put(rocket_priv->domain); + kfree(rocket_priv); + module_put(THIS_MODULE); +} + +static const struct drm_ioctl_desc rocket_drm_driver_ioctls[] = { +#define ROCKET_IOCTL(n, func) \ + DRM_IOCTL_DEF_DRV(ROCKET_##n, rocket_ioctl_##func, 0) + + ROCKET_IOCTL(CREATE_BO, create_bo), + ROCKET_IOCTL(SUBMIT, submit), + ROCKET_IOCTL(PREP_BO, prep_bo), + ROCKET_IOCTL(FINI_BO, fini_bo), +}; + +DEFINE_DRM_ACCEL_FOPS(rocket_accel_driver_fops); + +/* + * Rocket driver version: + * - 1.0 - initial interface + */ +static const struct drm_driver rocket_drm_driver = { + .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM, + .open = rocket_open, + .postclose = rocket_postclose, + .gem_create_object = rocket_gem_create_object, + .ioctls = rocket_drm_driver_ioctls, + .num_ioctls = ARRAY_SIZE(rocket_drm_driver_ioctls), + .fops = &rocket_accel_driver_fops, + .name = "rocket", + .desc = "rocket DRM", +}; + +static int rocket_probe(struct platform_device *pdev) +{ + if (rdev == NULL) { + /* First core probing, initialize DRM device. */ + rdev = rocket_device_init(drm_dev, &rocket_drm_driver); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, "failed to initialize rocket device\n"); + return PTR_ERR(rdev); + } + } + + unsigned int core = rdev->num_cores; + + dev_set_drvdata(&pdev->dev, rdev); + + rdev->cores[core].rdev = rdev; + rdev->cores[core].dev = &pdev->dev; + rdev->cores[core].index = core; + + rdev->num_cores++; + + return rocket_core_init(&rdev->cores[core]); +} + +static void rocket_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + for (unsigned int core = 0; core < rdev->num_cores; core++) { + if (rdev->cores[core].dev == dev) { + rocket_core_fini(&rdev->cores[core]); + rdev->num_cores--; + break; + } + } + + if (rdev->num_cores == 0) { + /* Last core removed, deinitialize DRM device. */ + rocket_device_fini(rdev); + rdev = NULL; + } +} + +static const struct of_device_id dt_match[] = { + { .compatible = "rockchip,rk3588-rknn-core" }, + {} +}; +MODULE_DEVICE_TABLE(of, dt_match); + +static int find_core_for_dev(struct device *dev) +{ + struct rocket_device *rdev = dev_get_drvdata(dev); + + for (unsigned int core = 0; core < rdev->num_cores; core++) { + if (dev == rdev->cores[core].dev) + return core; + } + + return -1; +} + +static int rocket_device_runtime_resume(struct device *dev) +{ + struct rocket_device *rdev = dev_get_drvdata(dev); + int core = find_core_for_dev(dev); + int err = 0; + + if (core < 0) + return -ENODEV; + + err = clk_bulk_prepare_enable(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks); + if (err) { + dev_err(dev, "failed to enable (%d) clocks for core %d\n", err, core); + return err; + } + + return 0; +} + +static int rocket_device_runtime_suspend(struct device *dev) +{ + struct rocket_device *rdev = dev_get_drvdata(dev); + int core = find_core_for_dev(dev); + + if (core < 0) + return -ENODEV; + + if (!rocket_job_is_idle(&rdev->cores[core])) + return -EBUSY; + + clk_bulk_disable_unprepare(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks); + + return 0; +} + +EXPORT_GPL_DEV_PM_OPS(rocket_pm_ops) = { + RUNTIME_PM_OPS(rocket_device_runtime_suspend, rocket_device_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) +}; + +static struct platform_driver rocket_driver = { + .probe = rocket_probe, + .remove = rocket_remove, + .driver = { + .name = "rocket", + .pm = pm_ptr(&rocket_pm_ops), + .of_match_table = dt_match, + }, +}; + +static int __init rocket_register(void) +{ + drm_dev = platform_device_register_simple("rknn", -1, NULL, 0); + if (IS_ERR(drm_dev)) + return PTR_ERR(drm_dev); + + return platform_driver_register(&rocket_driver); +} + +static void __exit rocket_unregister(void) +{ + platform_driver_unregister(&rocket_driver); + + platform_device_unregister(drm_dev); +} + +module_init(rocket_register); +module_exit(rocket_unregister); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("DRM driver for the Rockchip NPU IP"); +MODULE_AUTHOR("Tomeu Vizoso"); diff --git a/drivers/accel/rocket/rocket_drv.h b/drivers/accel/rocket/rocket_drv.h new file mode 100644 index 000000000000..2c673bb99ccc --- /dev/null +++ b/drivers/accel/rocket/rocket_drv.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#ifndef __ROCKET_DRV_H__ +#define __ROCKET_DRV_H__ + +#include <drm/drm_mm.h> +#include <drm/gpu_scheduler.h> + +#include "rocket_device.h" + +extern const struct dev_pm_ops rocket_pm_ops; + +struct rocket_iommu_domain { + struct iommu_domain *domain; + struct kref kref; +}; + +struct rocket_file_priv { + struct rocket_device *rdev; + + struct rocket_iommu_domain *domain; + struct drm_mm mm; + struct mutex mm_lock; + + struct drm_sched_entity sched_entity; +}; + +struct rocket_iommu_domain *rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv); +void rocket_iommu_domain_put(struct rocket_iommu_domain *domain); + +#endif diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c new file mode 100644 index 000000000000..0551e11cc184 --- /dev/null +++ b/drivers/accel/rocket/rocket_gem.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#include <drm/drm_device.h> +#include <drm/drm_utils.h> +#include <drm/rocket_accel.h> +#include <linux/dma-mapping.h> +#include <linux/iommu.h> + +#include "rocket_drv.h" +#include "rocket_gem.h" + +static void rocket_gem_bo_free(struct drm_gem_object *obj) +{ + struct rocket_gem_object *bo = to_rocket_bo(obj); + struct rocket_file_priv *rocket_priv = bo->driver_priv; + size_t unmapped; + + drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); + + unmapped = iommu_unmap(bo->domain->domain, bo->mm.start, bo->size); + drm_WARN_ON(obj->dev, unmapped != bo->size); + + mutex_lock(&rocket_priv->mm_lock); + drm_mm_remove_node(&bo->mm); + mutex_unlock(&rocket_priv->mm_lock); + + rocket_iommu_domain_put(bo->domain); + bo->domain = NULL; + + drm_gem_shmem_free(&bo->base); +} + +static const struct drm_gem_object_funcs rocket_gem_funcs = { + .free = rocket_gem_bo_free, + .print_info = drm_gem_shmem_object_print_info, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, +}; + +struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size) +{ + struct rocket_gem_object *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + obj->base.base.funcs = &rocket_gem_funcs; + + return &obj->base.base; +} + +int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct rocket_file_priv *rocket_priv = file->driver_priv; + struct drm_rocket_create_bo *args = data; + struct drm_gem_shmem_object *shmem_obj; + struct rocket_gem_object *rkt_obj; + struct drm_gem_object *gem_obj; + struct sg_table *sgt; + int ret; + + shmem_obj = drm_gem_shmem_create(dev, args->size); + if (IS_ERR(shmem_obj)) + return PTR_ERR(shmem_obj); + + gem_obj = &shmem_obj->base; + rkt_obj = to_rocket_bo(gem_obj); + + rkt_obj->driver_priv = rocket_priv; + rkt_obj->domain = rocket_iommu_domain_get(rocket_priv); + rkt_obj->size = args->size; + rkt_obj->offset = 0; + + ret = drm_gem_handle_create(file, gem_obj, &args->handle); + drm_gem_object_put(gem_obj); + if (ret) + goto err; + + sgt = drm_gem_shmem_get_pages_sgt(shmem_obj); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto err; + } + + mutex_lock(&rocket_priv->mm_lock); + ret = drm_mm_insert_node_generic(&rocket_priv->mm, &rkt_obj->mm, + rkt_obj->size, PAGE_SIZE, + 0, 0); + mutex_unlock(&rocket_priv->mm_lock); + + ret = iommu_map_sgtable(rocket_priv->domain->domain, + rkt_obj->mm.start, + shmem_obj->sgt, + IOMMU_READ | IOMMU_WRITE); + if (ret < 0 || ret < args->size) { + drm_err(dev, "failed to map buffer: size=%d request_size=%u\n", + ret, args->size); + ret = -ENOMEM; + goto err_remove_node; + } + + /* iommu_map_sgtable might have aligned the size */ + rkt_obj->size = ret; + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); + args->dma_address = rkt_obj->mm.start; + + return 0; + +err_remove_node: + mutex_lock(&rocket_priv->mm_lock); + drm_mm_remove_node(&rkt_obj->mm); + mutex_unlock(&rocket_priv->mm_lock); + +err: + drm_gem_shmem_object_free(gem_obj); + + return ret; +} + +int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_rocket_prep_bo *args = data; + unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); + struct drm_gem_object *gem_obj; + struct drm_gem_shmem_object *shmem_obj; + long ret = 0; + + if (args->reserved != 0) { + drm_dbg(dev, "Reserved field in drm_rocket_prep_bo struct should be 0.\n"); + return -EINVAL; + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + return -ENOENT; + + ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout); + if (!ret) + ret = timeout ? -ETIMEDOUT : -EBUSY; + + shmem_obj = &to_rocket_bo(gem_obj)->base; + + dma_sync_sgtable_for_cpu(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL); + + drm_gem_object_put(gem_obj); + + return ret; +} + +int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_rocket_fini_bo *args = data; + struct drm_gem_shmem_object *shmem_obj; + struct rocket_gem_object *rkt_obj; + struct drm_gem_object *gem_obj; + + if (args->reserved != 0) { + drm_dbg(dev, "Reserved field in drm_rocket_fini_bo struct should be 0.\n"); + return -EINVAL; + } + + gem_obj = drm_gem_object_lookup(file, args->handle); + if (!gem_obj) + return -ENOENT; + + rkt_obj = to_rocket_bo(gem_obj); + shmem_obj = &rkt_obj->base; + + dma_sync_sgtable_for_device(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL); + + drm_gem_object_put(gem_obj); + + return 0; +} diff --git a/drivers/accel/rocket/rocket_gem.h b/drivers/accel/rocket/rocket_gem.h new file mode 100644 index 000000000000..240430334509 --- /dev/null +++ b/drivers/accel/rocket/rocket_gem.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#ifndef __ROCKET_GEM_H__ +#define __ROCKET_GEM_H__ + +#include <drm/drm_gem_shmem_helper.h> + +struct rocket_gem_object { + struct drm_gem_shmem_object base; + + struct rocket_file_priv *driver_priv; + + struct rocket_iommu_domain *domain; + struct drm_mm_node mm; + size_t size; + u32 offset; +}; + +struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size); + +int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file); + +int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file); + +int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file); + +static inline +struct rocket_gem_object *to_rocket_bo(struct drm_gem_object *obj) +{ + return container_of(to_drm_gem_shmem_obj(obj), struct rocket_gem_object, base); +} + +#endif diff --git a/drivers/accel/rocket/rocket_job.c b/drivers/accel/rocket/rocket_job.c new file mode 100644 index 000000000000..5d4afd692306 --- /dev/null +++ b/drivers/accel/rocket/rocket_job.c @@ -0,0 +1,636 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ +/* Copyright 2019 Collabora ltd. */ +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#include <drm/drm_print.h> +#include <drm/drm_file.h> +#include <drm/drm_gem.h> +#include <drm/rocket_accel.h> +#include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "rocket_core.h" +#include "rocket_device.h" +#include "rocket_drv.h" +#include "rocket_job.h" +#include "rocket_registers.h" + +#define JOB_TIMEOUT_MS 500 + +static struct rocket_job * +to_rocket_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct rocket_job, base); +} + +static const char *rocket_fence_get_driver_name(struct dma_fence *fence) +{ + return "rocket"; +} + +static const char *rocket_fence_get_timeline_name(struct dma_fence *fence) +{ + return "rockchip-npu"; +} + +static const struct dma_fence_ops rocket_fence_ops = { + .get_driver_name = rocket_fence_get_driver_name, + .get_timeline_name = rocket_fence_get_timeline_name, +}; + +static struct dma_fence *rocket_fence_create(struct rocket_core *core) +{ + struct dma_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return ERR_PTR(-ENOMEM); + + dma_fence_init(fence, &rocket_fence_ops, &core->fence_lock, + core->fence_context, ++core->emit_seqno); + + return fence; +} + +static int +rocket_copy_tasks(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_rocket_job *job, + struct rocket_job *rjob) +{ + int ret = 0; + + if (job->task_struct_size < sizeof(struct drm_rocket_task)) + return -EINVAL; + + rjob->task_count = job->task_count; + + if (!rjob->task_count) + return 0; + + rjob->tasks = kvmalloc_array(job->task_count, sizeof(*rjob->tasks), GFP_KERNEL); + if (!rjob->tasks) { + drm_dbg(dev, "Failed to allocate task array\n"); + return -ENOMEM; + } + + for (int i = 0; i < rjob->task_count; i++) { + struct drm_rocket_task task = {0}; + + if (copy_from_user(&task, + u64_to_user_ptr(job->tasks) + i * job->task_struct_size, + sizeof(task))) { + drm_dbg(dev, "Failed to copy incoming tasks\n"); + ret = -EFAULT; + goto fail; + } + + if (task.regcmd_count == 0) { + drm_dbg(dev, "regcmd_count field in drm_rocket_task should be > 0.\n"); + ret = -EINVAL; + goto fail; + } + + rjob->tasks[i].regcmd = task.regcmd; + rjob->tasks[i].regcmd_count = task.regcmd_count; + } + + return 0; + +fail: + kvfree(rjob->tasks); + return ret; +} + +static void rocket_job_hw_submit(struct rocket_core *core, struct rocket_job *job) +{ + struct rocket_task *task; + unsigned int extra_bit; + + /* Don't queue the job if a reset is in progress */ + if (atomic_read(&core->reset.pending)) + return; + + /* GO ! */ + + task = &job->tasks[job->next_task_idx]; + job->next_task_idx++; + + rocket_pc_writel(core, BASE_ADDRESS, 0x1); + + /* From rknpu, in the TRM this bit is marked as reserved */ + extra_bit = 0x10000000 * core->index; + rocket_cna_writel(core, S_POINTER, CNA_S_POINTER_POINTER_PP_EN(1) | + CNA_S_POINTER_EXECUTER_PP_EN(1) | + CNA_S_POINTER_POINTER_PP_MODE(1) | + extra_bit); + + rocket_core_writel(core, S_POINTER, CORE_S_POINTER_POINTER_PP_EN(1) | + CORE_S_POINTER_EXECUTER_PP_EN(1) | + CORE_S_POINTER_POINTER_PP_MODE(1) | + extra_bit); + + rocket_pc_writel(core, BASE_ADDRESS, task->regcmd); + rocket_pc_writel(core, REGISTER_AMOUNTS, + PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT((task->regcmd_count + 1) / 2 - 1)); + + rocket_pc_writel(core, INTERRUPT_MASK, PC_INTERRUPT_MASK_DPU_0 | PC_INTERRUPT_MASK_DPU_1); + rocket_pc_writel(core, INTERRUPT_CLEAR, PC_INTERRUPT_CLEAR_DPU_0 | PC_INTERRUPT_CLEAR_DPU_1); + + rocket_pc_writel(core, TASK_CON, PC_TASK_CON_RESERVED_0(1) | + PC_TASK_CON_TASK_COUNT_CLEAR(1) | + PC_TASK_CON_TASK_NUMBER(1) | + PC_TASK_CON_TASK_PP_EN(1)); + + rocket_pc_writel(core, TASK_DMA_BASE_ADDR, PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(0x0)); + + rocket_pc_writel(core, OPERATION_ENABLE, PC_OPERATION_ENABLE_OP_EN(1)); + + dev_dbg(core->dev, "Submitted regcmd at 0x%llx to core %d", task->regcmd, core->index); +} + +static int rocket_acquire_object_fences(struct drm_gem_object **bos, + int bo_count, + struct drm_sched_job *job, + bool is_write) +{ + int i, ret; + + for (i = 0; i < bo_count; i++) { + ret = dma_resv_reserve_fences(bos[i]->resv, 1); + if (ret) + return ret; + + ret = drm_sched_job_add_implicit_dependencies(job, bos[i], + is_write); + if (ret) + return ret; + } + + return 0; +} + +static void rocket_attach_object_fences(struct drm_gem_object **bos, + int bo_count, + struct dma_fence *fence) +{ + int i; + + for (i = 0; i < bo_count; i++) + dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE); +} + +static int rocket_job_push(struct rocket_job *job) +{ + struct rocket_device *rdev = job->rdev; + struct drm_gem_object **bos; + struct ww_acquire_ctx acquire_ctx; + int ret = 0; + + bos = kvmalloc_array(job->in_bo_count + job->out_bo_count, sizeof(void *), + GFP_KERNEL); + memcpy(bos, job->in_bos, job->in_bo_count * sizeof(void *)); + memcpy(&bos[job->in_bo_count], job->out_bos, job->out_bo_count * sizeof(void *)); + + ret = drm_gem_lock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx); + if (ret) + goto err; + + scoped_guard(mutex, &rdev->sched_lock) { + drm_sched_job_arm(&job->base); + + job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished); + + ret = rocket_acquire_object_fences(job->in_bos, job->in_bo_count, &job->base, false); + if (ret) + goto err_unlock; + + ret = rocket_acquire_object_fences(job->out_bos, job->out_bo_count, &job->base, true); + if (ret) + goto err_unlock; + + kref_get(&job->refcount); /* put by scheduler job completion */ + + drm_sched_entity_push_job(&job->base); + } + + rocket_attach_object_fences(job->out_bos, job->out_bo_count, job->inference_done_fence); + +err_unlock: + drm_gem_unlock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx); +err: + kfree(bos); + + return ret; +} + +static void rocket_job_cleanup(struct kref *ref) +{ + struct rocket_job *job = container_of(ref, struct rocket_job, + refcount); + unsigned int i; + + rocket_iommu_domain_put(job->domain); + + dma_fence_put(job->done_fence); + dma_fence_put(job->inference_done_fence); + + if (job->in_bos) { + for (i = 0; i < job->in_bo_count; i++) + drm_gem_object_put(job->in_bos[i]); + + kvfree(job->in_bos); + } + + if (job->out_bos) { + for (i = 0; i < job->out_bo_count; i++) + drm_gem_object_put(job->out_bos[i]); + + kvfree(job->out_bos); + } + + kvfree(job->tasks); + + kfree(job); +} + +static void rocket_job_put(struct rocket_job *job) +{ + kref_put(&job->refcount, rocket_job_cleanup); +} + +static void rocket_job_free(struct drm_sched_job *sched_job) +{ + struct rocket_job *job = to_rocket_job(sched_job); + + drm_sched_job_cleanup(sched_job); + + rocket_job_put(job); +} + +static struct rocket_core *sched_to_core(struct rocket_device *rdev, + struct drm_gpu_scheduler *sched) +{ + unsigned int core; + + for (core = 0; core < rdev->num_cores; core++) { + if (&rdev->cores[core].sched == sched) + return &rdev->cores[core]; + } + + return NULL; +} + +static struct dma_fence *rocket_job_run(struct drm_sched_job *sched_job) +{ + struct rocket_job *job = to_rocket_job(sched_job); + struct rocket_device *rdev = job->rdev; + struct rocket_core *core = sched_to_core(rdev, sched_job->sched); + struct dma_fence *fence = NULL; + int ret; + + if (unlikely(job->base.s_fence->finished.error)) + return NULL; + + /* + * Nothing to execute: can happen if the job has finished while + * we were resetting the NPU. + */ + if (job->next_task_idx == job->task_count) + return NULL; + + fence = rocket_fence_create(core); + if (IS_ERR(fence)) + return fence; + + if (job->done_fence) + dma_fence_put(job->done_fence); + job->done_fence = dma_fence_get(fence); + + ret = pm_runtime_get_sync(core->dev); + if (ret < 0) + return fence; + + ret = iommu_attach_group(job->domain->domain, core->iommu_group); + if (ret < 0) + return fence; + + scoped_guard(mutex, &core->job_lock) { + core->in_flight_job = job; + rocket_job_hw_submit(core, job); + } + + return fence; +} + +static void rocket_job_handle_irq(struct rocket_core *core) +{ + pm_runtime_mark_last_busy(core->dev); + + rocket_pc_writel(core, OPERATION_ENABLE, 0x0); + rocket_pc_writel(core, INTERRUPT_CLEAR, 0x1ffff); + + scoped_guard(mutex, &core->job_lock) + if (core->in_flight_job) { + if (core->in_flight_job->next_task_idx < core->in_flight_job->task_count) { + rocket_job_hw_submit(core, core->in_flight_job); + return; + } + + iommu_detach_group(NULL, iommu_group_get(core->dev)); + dma_fence_signal(core->in_flight_job->done_fence); + pm_runtime_put_autosuspend(core->dev); + core->in_flight_job = NULL; + } +} + +static void +rocket_reset(struct rocket_core *core, struct drm_sched_job *bad) +{ + if (!atomic_read(&core->reset.pending)) + return; + + drm_sched_stop(&core->sched, bad); + + /* + * Remaining interrupts have been handled, but we might still have + * stuck jobs. Let's make sure the PM counters stay balanced by + * manually calling pm_runtime_put_noidle(). + */ + scoped_guard(mutex, &core->job_lock) { + if (core->in_flight_job) + pm_runtime_put_noidle(core->dev); + + iommu_detach_group(NULL, core->iommu_group); + + core->in_flight_job = NULL; + } + + /* Proceed with reset now. */ + rocket_core_reset(core); + + /* NPU has been reset, we can clear the reset pending bit. */ + atomic_set(&core->reset.pending, 0); + + /* Restart the scheduler */ + drm_sched_start(&core->sched, 0); +} + +static enum drm_gpu_sched_stat rocket_job_timedout(struct drm_sched_job *sched_job) +{ + struct rocket_job *job = to_rocket_job(sched_job); + struct rocket_device *rdev = job->rdev; + struct rocket_core *core = sched_to_core(rdev, sched_job->sched); + + dev_err(core->dev, "NPU job timed out"); + + atomic_set(&core->reset.pending, 1); + rocket_reset(core, sched_job); + + return DRM_GPU_SCHED_STAT_RESET; +} + +static void rocket_reset_work(struct work_struct *work) +{ + struct rocket_core *core; + + core = container_of(work, struct rocket_core, reset.work); + rocket_reset(core, NULL); +} + +static const struct drm_sched_backend_ops rocket_sched_ops = { + .run_job = rocket_job_run, + .timedout_job = rocket_job_timedout, + .free_job = rocket_job_free +}; + +static irqreturn_t rocket_job_irq_handler_thread(int irq, void *data) +{ + struct rocket_core *core = data; + + rocket_job_handle_irq(core); + + return IRQ_HANDLED; +} + +static irqreturn_t rocket_job_irq_handler(int irq, void *data) +{ + struct rocket_core *core = data; + u32 raw_status = rocket_pc_readl(core, INTERRUPT_RAW_STATUS); + + WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR); + WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR); + + if (!(raw_status & PC_INTERRUPT_RAW_STATUS_DPU_0 || + raw_status & PC_INTERRUPT_RAW_STATUS_DPU_1)) + return IRQ_NONE; + + rocket_pc_writel(core, INTERRUPT_MASK, 0x0); + + return IRQ_WAKE_THREAD; +} + +int rocket_job_init(struct rocket_core *core) +{ + struct drm_sched_init_args args = { + .ops = &rocket_sched_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = 1, + .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), + .name = dev_name(core->dev), + .dev = core->dev, + }; + int ret; + + INIT_WORK(&core->reset.work, rocket_reset_work); + spin_lock_init(&core->fence_lock); + mutex_init(&core->job_lock); + + core->irq = platform_get_irq(to_platform_device(core->dev), 0); + if (core->irq < 0) + return core->irq; + + ret = devm_request_threaded_irq(core->dev, core->irq, + rocket_job_irq_handler, + rocket_job_irq_handler_thread, + IRQF_SHARED, dev_name(core->dev), + core); + if (ret) { + dev_err(core->dev, "failed to request job irq"); + return ret; + } + + core->reset.wq = alloc_ordered_workqueue("rocket-reset-%d", 0, core->index); + if (!core->reset.wq) + return -ENOMEM; + + core->fence_context = dma_fence_context_alloc(1); + + args.timeout_wq = core->reset.wq; + ret = drm_sched_init(&core->sched, &args); + if (ret) { + dev_err(core->dev, "Failed to create scheduler: %d.", ret); + goto err_sched; + } + + return 0; + +err_sched: + drm_sched_fini(&core->sched); + + destroy_workqueue(core->reset.wq); + return ret; +} + +void rocket_job_fini(struct rocket_core *core) +{ + drm_sched_fini(&core->sched); + + cancel_work_sync(&core->reset.work); + destroy_workqueue(core->reset.wq); +} + +int rocket_job_open(struct rocket_file_priv *rocket_priv) +{ + struct rocket_device *rdev = rocket_priv->rdev; + struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores, sizeof(scheds), + GFP_KERNEL); + unsigned int core; + int ret; + + for (core = 0; core < rdev->num_cores; core++) + scheds[core] = &rdev->cores[core].sched; + + ret = drm_sched_entity_init(&rocket_priv->sched_entity, + DRM_SCHED_PRIORITY_NORMAL, + scheds, + rdev->num_cores, NULL); + if (WARN_ON(ret)) + return ret; + + return 0; +} + +void rocket_job_close(struct rocket_file_priv *rocket_priv) +{ + struct drm_sched_entity *entity = &rocket_priv->sched_entity; + + kfree(entity->sched_list); + drm_sched_entity_destroy(entity); +} + +int rocket_job_is_idle(struct rocket_core *core) +{ + /* If there are any jobs in this HW queue, we're not idle */ + if (atomic_read(&core->sched.credit_count)) + return false; + + return true; +} + +static int rocket_ioctl_submit_job(struct drm_device *dev, struct drm_file *file, + struct drm_rocket_job *job) +{ + struct rocket_device *rdev = to_rocket_device(dev); + struct rocket_file_priv *file_priv = file->driver_priv; + struct rocket_job *rjob = NULL; + int ret = 0; + + if (job->task_count == 0) + return -EINVAL; + + rjob = kzalloc(sizeof(*rjob), GFP_KERNEL); + if (!rjob) + return -ENOMEM; + + kref_init(&rjob->refcount); + + rjob->rdev = rdev; + + ret = drm_sched_job_init(&rjob->base, + &file_priv->sched_entity, + 1, NULL, file->client_id); + if (ret) + goto out_put_job; + + ret = rocket_copy_tasks(dev, file, job, rjob); + if (ret) + goto out_cleanup_job; + + ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->in_bo_handles), + job->in_bo_handle_count, &rjob->in_bos); + if (ret) + goto out_cleanup_job; + + rjob->in_bo_count = job->in_bo_handle_count; + + ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->out_bo_handles), + job->out_bo_handle_count, &rjob->out_bos); + if (ret) + goto out_cleanup_job; + + rjob->out_bo_count = job->out_bo_handle_count; + + rjob->domain = rocket_iommu_domain_get(file_priv); + + ret = rocket_job_push(rjob); + if (ret) + goto out_cleanup_job; + +out_cleanup_job: + if (ret) + drm_sched_job_cleanup(&rjob->base); +out_put_job: + rocket_job_put(rjob); + + return ret; +} + +int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_rocket_submit *args = data; + struct drm_rocket_job *jobs; + int ret = 0; + unsigned int i = 0; + + if (args->job_count == 0) + return 0; + + if (args->job_struct_size < sizeof(struct drm_rocket_job)) { + drm_dbg(dev, "job_struct_size field in drm_rocket_submit struct is too small.\n"); + return -EINVAL; + } + + if (args->reserved != 0) { + drm_dbg(dev, "Reserved field in drm_rocket_submit struct should be 0.\n"); + return -EINVAL; + } + + jobs = kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL); + if (!jobs) { + drm_dbg(dev, "Failed to allocate incoming job array\n"); + return -ENOMEM; + } + + for (i = 0; i < args->job_count; i++) { + if (copy_from_user(&jobs[i], + u64_to_user_ptr(args->jobs) + i * args->job_struct_size, + sizeof(*jobs))) { + ret = -EFAULT; + drm_dbg(dev, "Failed to copy incoming job array\n"); + goto exit; + } + } + + + for (i = 0; i < args->job_count; i++) + rocket_ioctl_submit_job(dev, file, &jobs[i]); + +exit: + kfree(jobs); + + return ret; +} diff --git a/drivers/accel/rocket/rocket_job.h b/drivers/accel/rocket/rocket_job.h new file mode 100644 index 000000000000..4ae00feec3b9 --- /dev/null +++ b/drivers/accel/rocket/rocket_job.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */ + +#ifndef __ROCKET_JOB_H__ +#define __ROCKET_JOB_H__ + +#include <drm/drm_drv.h> +#include <drm/gpu_scheduler.h> + +#include "rocket_core.h" +#include "rocket_drv.h" + +struct rocket_task { + u64 regcmd; + u32 regcmd_count; +}; + +struct rocket_job { + struct drm_sched_job base; + + struct rocket_device *rdev; + + struct drm_gem_object **in_bos; + struct drm_gem_object **out_bos; + + u32 in_bo_count; + u32 out_bo_count; + + struct rocket_task *tasks; + u32 task_count; + u32 next_task_idx; + + /* Fence to be signaled by drm-sched once its done with the job */ + struct dma_fence *inference_done_fence; + + /* Fence to be signaled by IRQ handler when the job is complete. */ + struct dma_fence *done_fence; + + struct rocket_iommu_domain *domain; + + struct kref refcount; +}; + +int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file); + +int rocket_job_init(struct rocket_core *core); +void rocket_job_fini(struct rocket_core *core); +int rocket_job_open(struct rocket_file_priv *rocket_priv); +void rocket_job_close(struct rocket_file_priv *rocket_priv); +int rocket_job_is_idle(struct rocket_core *core); + +#endif diff --git a/drivers/accel/rocket/rocket_registers.h b/drivers/accel/rocket/rocket_registers.h new file mode 100644 index 000000000000..9aef614c3470 --- /dev/null +++ b/drivers/accel/rocket/rocket_registers.h @@ -0,0 +1,4404 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ + +#ifndef __ROCKET_REGISTERS_XML__ +#define __ROCKET_REGISTERS_XML__ + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng gen_header.py tool in this git repository: +http://gitlab.freedesktop.org/mesa/mesa/ +git clone https://gitlab.freedesktop.org/mesa/mesa.git + +The rules-ng-ng source files this header was generated from are: + +- /home/tomeu/src/mesa/src/gallium/drivers/rocket/registers.xml ( 60076 bytes, from Wed Jun 12 10:02:25 2024) + +Copyright (C) 2024-2025 by the following authors: +- Tomeu Vizoso <tomeu@tomeuvizoso.net> +*/ + +#define REG_PC_VERSION 0x00000000 +#define PC_VERSION_VERSION__MASK 0xffffffff +#define PC_VERSION_VERSION__SHIFT 0 +static inline uint32_t PC_VERSION_VERSION(uint32_t val) +{ + return ((val) << PC_VERSION_VERSION__SHIFT) & PC_VERSION_VERSION__MASK; +} + +#define REG_PC_VERSION_NUM 0x00000004 +#define PC_VERSION_NUM_VERSION_NUM__MASK 0xffffffff +#define PC_VERSION_NUM_VERSION_NUM__SHIFT 0 +static inline uint32_t PC_VERSION_NUM_VERSION_NUM(uint32_t val) +{ + return ((val) << PC_VERSION_NUM_VERSION_NUM__SHIFT) & PC_VERSION_NUM_VERSION_NUM__MASK; +} + +#define REG_PC_OPERATION_ENABLE 0x00000008 +#define PC_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe +#define PC_OPERATION_ENABLE_RESERVED_0__SHIFT 1 +static inline uint32_t PC_OPERATION_ENABLE_RESERVED_0(uint32_t val) +{ + return ((val) << PC_OPERATION_ENABLE_RESERVED_0__SHIFT) & PC_OPERATION_ENABLE_RESERVED_0__MASK; +} +#define PC_OPERATION_ENABLE_OP_EN__MASK 0x00000001 +#define PC_OPERATION_ENABLE_OP_EN__SHIFT 0 +static inline uint32_t PC_OPERATION_ENABLE_OP_EN(uint32_t val) +{ + return ((val) << PC_OPERATION_ENABLE_OP_EN__SHIFT) & PC_OPERATION_ENABLE_OP_EN__MASK; +} + +#define REG_PC_BASE_ADDRESS 0x00000010 +#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK 0xfffffff0 +#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT 4 +static inline uint32_t PC_BASE_ADDRESS_PC_SOURCE_ADDR(uint32_t val) +{ + return ((val) << PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT) & PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK; +} +#define PC_BASE_ADDRESS_RESERVED_0__MASK 0x0000000e +#define PC_BASE_ADDRESS_RESERVED_0__SHIFT 1 +static inline uint32_t PC_BASE_ADDRESS_RESERVED_0(uint32_t val) +{ + return ((val) << PC_BASE_ADDRESS_RESERVED_0__SHIFT) & PC_BASE_ADDRESS_RESERVED_0__MASK; +} +#define PC_BASE_ADDRESS_PC_SEL__MASK 0x00000001 +#define PC_BASE_ADDRESS_PC_SEL__SHIFT 0 +static inline uint32_t PC_BASE_ADDRESS_PC_SEL(uint32_t val) +{ + return ((val) << PC_BASE_ADDRESS_PC_SEL__SHIFT) & PC_BASE_ADDRESS_PC_SEL__MASK; +} + +#define REG_PC_REGISTER_AMOUNTS 0x00000014 +#define PC_REGISTER_AMOUNTS_RESERVED_0__MASK 0xffff0000 +#define PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT 16 +static inline uint32_t PC_REGISTER_AMOUNTS_RESERVED_0(uint32_t val) +{ + return ((val) << PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT) & PC_REGISTER_AMOUNTS_RESERVED_0__MASK; +} +#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK 0x0000ffff +#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT 0 +static inline uint32_t PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT(uint32_t val) +{ + return ((val) << PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT) & PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK; +} + +#define REG_PC_INTERRUPT_MASK 0x00000020 +#define PC_INTERRUPT_MASK_RESERVED_0__MASK 0xffffc000 +#define PC_INTERRUPT_MASK_RESERVED_0__SHIFT 14 +static inline uint32_t PC_INTERRUPT_MASK_RESERVED_0(uint32_t val) +{ + return ((val) << PC_INTERRUPT_MASK_RESERVED_0__SHIFT) & PC_INTERRUPT_MASK_RESERVED_0__MASK; +} +#define PC_INTERRUPT_MASK_DMA_WRITE_ERROR 0x00002000 +#define PC_INTERRUPT_MASK_DMA_READ_ERROR 0x00001000 +#define PC_INTERRUPT_MASK_PPU_1 0x00000800 +#define PC_INTERRUPT_MASK_PPU_0 0x00000400 +#define PC_INTERRUPT_MASK_DPU_1 0x00000200 +#define PC_INTERRUPT_MASK_DPU_0 0x00000100 +#define PC_INTERRUPT_MASK_CORE_1 0x00000080 +#define PC_INTERRUPT_MASK_CORE_0 0x00000040 +#define PC_INTERRUPT_MASK_CNA_CSC_1 0x00000020 +#define PC_INTERRUPT_MASK_CNA_CSC_0 0x00000010 +#define PC_INTERRUPT_MASK_CNA_WEIGHT_1 0x00000008 +#define PC_INTERRUPT_MASK_CNA_WEIGHT_0 0x00000004 +#define PC_INTERRUPT_MASK_CNA_FEATURE_1 0x00000002 +#define PC_INTERRUPT_MASK_CNA_FEATURE_0 0x00000001 + +#define REG_PC_INTERRUPT_CLEAR 0x00000024 +#define PC_INTERRUPT_CLEAR_RESERVED_0__MASK 0xffffc000 +#define PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT 14 +static inline uint32_t PC_INTERRUPT_CLEAR_RESERVED_0(uint32_t val) +{ + return ((val) << PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT) & PC_INTERRUPT_CLEAR_RESERVED_0__MASK; +} +#define PC_INTERRUPT_CLEAR_DMA_WRITE_ERROR 0x00002000 +#define PC_INTERRUPT_CLEAR_DMA_READ_ERROR 0x00001000 +#define PC_INTERRUPT_CLEAR_PPU_1 0x00000800 +#define PC_INTERRUPT_CLEAR_PPU_0 0x00000400 +#define PC_INTERRUPT_CLEAR_DPU_1 0x00000200 +#define PC_INTERRUPT_CLEAR_DPU_0 0x00000100 +#define PC_INTERRUPT_CLEAR_CORE_1 0x00000080 +#define PC_INTERRUPT_CLEAR_CORE_0 0x00000040 +#define PC_INTERRUPT_CLEAR_CNA_CSC_1 0x00000020 +#define PC_INTERRUPT_CLEAR_CNA_CSC_0 0x00000010 +#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_1 0x00000008 +#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_0 0x00000004 +#define PC_INTERRUPT_CLEAR_CNA_FEATURE_1 0x00000002 +#define PC_INTERRUPT_CLEAR_CNA_FEATURE_0 0x00000001 + +#define REG_PC_INTERRUPT_STATUS 0x00000028 +#define PC_INTERRUPT_STATUS_RESERVED_0__MASK 0xffffc000 +#define PC_INTERRUPT_STATUS_RESERVED_0__SHIFT 14 +static inline uint32_t PC_INTERRUPT_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << PC_INTERRUPT_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_STATUS_RESERVED_0__MASK; +} +#define PC_INTERRUPT_STATUS_DMA_WRITE_ERROR 0x00002000 +#define PC_INTERRUPT_STATUS_DMA_READ_ERROR 0x00001000 +#define PC_INTERRUPT_STATUS_PPU_1 0x00000800 +#define PC_INTERRUPT_STATUS_PPU_0 0x00000400 +#define PC_INTERRUPT_STATUS_DPU_1 0x00000200 +#define PC_INTERRUPT_STATUS_DPU_0 0x00000100 +#define PC_INTERRUPT_STATUS_CORE_1 0x00000080 +#define PC_INTERRUPT_STATUS_CORE_0 0x00000040 +#define PC_INTERRUPT_STATUS_CNA_CSC_1 0x00000020 +#define PC_INTERRUPT_STATUS_CNA_CSC_0 0x00000010 +#define PC_INTERRUPT_STATUS_CNA_WEIGHT_1 0x00000008 +#define PC_INTERRUPT_STATUS_CNA_WEIGHT_0 0x00000004 +#define PC_INTERRUPT_STATUS_CNA_FEATURE_1 0x00000002 +#define PC_INTERRUPT_STATUS_CNA_FEATURE_0 0x00000001 + +#define REG_PC_INTERRUPT_RAW_STATUS 0x0000002c +#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK 0xffffc000 +#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT 14 +static inline uint32_t PC_INTERRUPT_RAW_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK; +} +#define PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR 0x00002000 +#define PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR 0x00001000 +#define PC_INTERRUPT_RAW_STATUS_PPU_1 0x00000800 +#define PC_INTERRUPT_RAW_STATUS_PPU_0 0x00000400 +#define PC_INTERRUPT_RAW_STATUS_DPU_1 0x00000200 +#define PC_INTERRUPT_RAW_STATUS_DPU_0 0x00000100 +#define PC_INTERRUPT_RAW_STATUS_CORE_1 0x00000080 +#define PC_INTERRUPT_RAW_STATUS_CORE_0 0x00000040 +#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_1 0x00000020 +#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_0 0x00000010 +#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_1 0x00000008 +#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_0 0x00000004 +#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_1 0x00000002 +#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_0 0x00000001 + +#define REG_PC_TASK_CON 0x00000030 +#define PC_TASK_CON_RESERVED_0__MASK 0xffffc000 +#define PC_TASK_CON_RESERVED_0__SHIFT 14 +static inline uint32_t PC_TASK_CON_RESERVED_0(uint32_t val) +{ + return ((val) << PC_TASK_CON_RESERVED_0__SHIFT) & PC_TASK_CON_RESERVED_0__MASK; +} +#define PC_TASK_CON_TASK_COUNT_CLEAR__MASK 0x00002000 +#define PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT 13 +static inline uint32_t PC_TASK_CON_TASK_COUNT_CLEAR(uint32_t val) +{ + return ((val) << PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT) & PC_TASK_CON_TASK_COUNT_CLEAR__MASK; +} +#define PC_TASK_CON_TASK_PP_EN__MASK 0x00001000 +#define PC_TASK_CON_TASK_PP_EN__SHIFT 12 +static inline uint32_t PC_TASK_CON_TASK_PP_EN(uint32_t val) +{ + return ((val) << PC_TASK_CON_TASK_PP_EN__SHIFT) & PC_TASK_CON_TASK_PP_EN__MASK; +} +#define PC_TASK_CON_TASK_NUMBER__MASK 0x00000fff +#define PC_TASK_CON_TASK_NUMBER__SHIFT 0 +static inline uint32_t PC_TASK_CON_TASK_NUMBER(uint32_t val) +{ + return ((val) << PC_TASK_CON_TASK_NUMBER__SHIFT) & PC_TASK_CON_TASK_NUMBER__MASK; +} + +#define REG_PC_TASK_DMA_BASE_ADDR 0x00000034 +#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK 0xfffffff0 +#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT 4 +static inline uint32_t PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(uint32_t val) +{ + return ((val) << PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT) & PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK; +} +#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK 0x0000000f +#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT 0 +static inline uint32_t PC_TASK_DMA_BASE_ADDR_RESERVED_0(uint32_t val) +{ + return ((val) << PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT) & PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK; +} + +#define REG_PC_TASK_STATUS 0x0000003c +#define PC_TASK_STATUS_RESERVED_0__MASK 0xf0000000 +#define PC_TASK_STATUS_RESERVED_0__SHIFT 28 +static inline uint32_t PC_TASK_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << PC_TASK_STATUS_RESERVED_0__SHIFT) & PC_TASK_STATUS_RESERVED_0__MASK; +} +#define PC_TASK_STATUS_TASK_STATUS__MASK 0x0fffffff +#define PC_TASK_STATUS_TASK_STATUS__SHIFT 0 +static inline uint32_t PC_TASK_STATUS_TASK_STATUS(uint32_t val) +{ + return ((val) << PC_TASK_STATUS_TASK_STATUS__SHIFT) & PC_TASK_STATUS_TASK_STATUS__MASK; +} + +#define REG_CNA_S_STATUS 0x00001000 +#define CNA_S_STATUS_RESERVED_0__MASK 0xfffc0000 +#define CNA_S_STATUS_RESERVED_0__SHIFT 18 +static inline uint32_t CNA_S_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_S_STATUS_RESERVED_0__SHIFT) & CNA_S_STATUS_RESERVED_0__MASK; +} +#define CNA_S_STATUS_STATUS_1__MASK 0x00030000 +#define CNA_S_STATUS_STATUS_1__SHIFT 16 +static inline uint32_t CNA_S_STATUS_STATUS_1(uint32_t val) +{ + return ((val) << CNA_S_STATUS_STATUS_1__SHIFT) & CNA_S_STATUS_STATUS_1__MASK; +} +#define CNA_S_STATUS_RESERVED_1__MASK 0x0000fffc +#define CNA_S_STATUS_RESERVED_1__SHIFT 2 +static inline uint32_t CNA_S_STATUS_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_S_STATUS_RESERVED_1__SHIFT) & CNA_S_STATUS_RESERVED_1__MASK; +} +#define CNA_S_STATUS_STATUS_0__MASK 0x00000003 +#define CNA_S_STATUS_STATUS_0__SHIFT 0 +static inline uint32_t CNA_S_STATUS_STATUS_0(uint32_t val) +{ + return ((val) << CNA_S_STATUS_STATUS_0__SHIFT) & CNA_S_STATUS_STATUS_0__MASK; +} + +#define REG_CNA_S_POINTER 0x00001004 +#define CNA_S_POINTER_RESERVED_0__MASK 0xfffe0000 +#define CNA_S_POINTER_RESERVED_0__SHIFT 17 +static inline uint32_t CNA_S_POINTER_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_S_POINTER_RESERVED_0__SHIFT) & CNA_S_POINTER_RESERVED_0__MASK; +} +#define CNA_S_POINTER_EXECUTER__MASK 0x00010000 +#define CNA_S_POINTER_EXECUTER__SHIFT 16 +static inline uint32_t CNA_S_POINTER_EXECUTER(uint32_t val) +{ + return ((val) << CNA_S_POINTER_EXECUTER__SHIFT) & CNA_S_POINTER_EXECUTER__MASK; +} +#define CNA_S_POINTER_RESERVED_1__MASK 0x0000ffc0 +#define CNA_S_POINTER_RESERVED_1__SHIFT 6 +static inline uint32_t CNA_S_POINTER_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_S_POINTER_RESERVED_1__SHIFT) & CNA_S_POINTER_RESERVED_1__MASK; +} +#define CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020 +#define CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5 +static inline uint32_t CNA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val) +{ + return ((val) << CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK; +} +#define CNA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010 +#define CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4 +static inline uint32_t CNA_S_POINTER_POINTER_PP_CLEAR(uint32_t val) +{ + return ((val) << CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_POINTER_PP_CLEAR__MASK; +} +#define CNA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008 +#define CNA_S_POINTER_POINTER_PP_MODE__SHIFT 3 +static inline uint32_t CNA_S_POINTER_POINTER_PP_MODE(uint32_t val) +{ + return ((val) << CNA_S_POINTER_POINTER_PP_MODE__SHIFT) & CNA_S_POINTER_POINTER_PP_MODE__MASK; +} +#define CNA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004 +#define CNA_S_POINTER_EXECUTER_PP_EN__SHIFT 2 +static inline uint32_t CNA_S_POINTER_EXECUTER_PP_EN(uint32_t val) +{ + return ((val) << CNA_S_POINTER_EXECUTER_PP_EN__SHIFT) & CNA_S_POINTER_EXECUTER_PP_EN__MASK; +} +#define CNA_S_POINTER_POINTER_PP_EN__MASK 0x00000002 +#define CNA_S_POINTER_POINTER_PP_EN__SHIFT 1 +static inline uint32_t CNA_S_POINTER_POINTER_PP_EN(uint32_t val) +{ + return ((val) << CNA_S_POINTER_POINTER_PP_EN__SHIFT) & CNA_S_POINTER_POINTER_PP_EN__MASK; +} +#define CNA_S_POINTER_POINTER__MASK 0x00000001 +#define CNA_S_POINTER_POINTER__SHIFT 0 +static inline uint32_t CNA_S_POINTER_POINTER(uint32_t val) +{ + return ((val) << CNA_S_POINTER_POINTER__SHIFT) & CNA_S_POINTER_POINTER__MASK; +} + +#define REG_CNA_OPERATION_ENABLE 0x00001008 +#define CNA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe +#define CNA_OPERATION_ENABLE_RESERVED_0__SHIFT 1 +static inline uint32_t CNA_OPERATION_ENABLE_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_OPERATION_ENABLE_RESERVED_0__SHIFT) & CNA_OPERATION_ENABLE_RESERVED_0__MASK; +} +#define CNA_OPERATION_ENABLE_OP_EN__MASK 0x00000001 +#define CNA_OPERATION_ENABLE_OP_EN__SHIFT 0 +static inline uint32_t CNA_OPERATION_ENABLE_OP_EN(uint32_t val) +{ + return ((val) << CNA_OPERATION_ENABLE_OP_EN__SHIFT) & CNA_OPERATION_ENABLE_OP_EN__MASK; +} + +#define REG_CNA_CONV_CON1 0x0000100c +#define CNA_CONV_CON1_RESERVED_0__MASK 0x80000000 +#define CNA_CONV_CON1_RESERVED_0__SHIFT 31 +static inline uint32_t CNA_CONV_CON1_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_RESERVED_0__SHIFT) & CNA_CONV_CON1_RESERVED_0__MASK; +} +#define CNA_CONV_CON1_NONALIGN_DMA__MASK 0x40000000 +#define CNA_CONV_CON1_NONALIGN_DMA__SHIFT 30 +static inline uint32_t CNA_CONV_CON1_NONALIGN_DMA(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_NONALIGN_DMA__SHIFT) & CNA_CONV_CON1_NONALIGN_DMA__MASK; +} +#define CNA_CONV_CON1_GROUP_LINE_OFF__MASK 0x20000000 +#define CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT 29 +static inline uint32_t CNA_CONV_CON1_GROUP_LINE_OFF(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT) & CNA_CONV_CON1_GROUP_LINE_OFF__MASK; +} +#define CNA_CONV_CON1_RESERVED_1__MASK 0x1ffe0000 +#define CNA_CONV_CON1_RESERVED_1__SHIFT 17 +static inline uint32_t CNA_CONV_CON1_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_RESERVED_1__SHIFT) & CNA_CONV_CON1_RESERVED_1__MASK; +} +#define CNA_CONV_CON1_DECONV__MASK 0x00010000 +#define CNA_CONV_CON1_DECONV__SHIFT 16 +static inline uint32_t CNA_CONV_CON1_DECONV(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_DECONV__SHIFT) & CNA_CONV_CON1_DECONV__MASK; +} +#define CNA_CONV_CON1_ARGB_IN__MASK 0x0000f000 +#define CNA_CONV_CON1_ARGB_IN__SHIFT 12 +static inline uint32_t CNA_CONV_CON1_ARGB_IN(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_ARGB_IN__SHIFT) & CNA_CONV_CON1_ARGB_IN__MASK; +} +#define CNA_CONV_CON1_RESERVED_2__MASK 0x00000c00 +#define CNA_CONV_CON1_RESERVED_2__SHIFT 10 +static inline uint32_t CNA_CONV_CON1_RESERVED_2(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_RESERVED_2__SHIFT) & CNA_CONV_CON1_RESERVED_2__MASK; +} +#define CNA_CONV_CON1_PROC_PRECISION__MASK 0x00000380 +#define CNA_CONV_CON1_PROC_PRECISION__SHIFT 7 +static inline uint32_t CNA_CONV_CON1_PROC_PRECISION(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_PROC_PRECISION__SHIFT) & CNA_CONV_CON1_PROC_PRECISION__MASK; +} +#define CNA_CONV_CON1_IN_PRECISION__MASK 0x00000070 +#define CNA_CONV_CON1_IN_PRECISION__SHIFT 4 +static inline uint32_t CNA_CONV_CON1_IN_PRECISION(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_IN_PRECISION__SHIFT) & CNA_CONV_CON1_IN_PRECISION__MASK; +} +#define CNA_CONV_CON1_CONV_MODE__MASK 0x0000000f +#define CNA_CONV_CON1_CONV_MODE__SHIFT 0 +static inline uint32_t CNA_CONV_CON1_CONV_MODE(uint32_t val) +{ + return ((val) << CNA_CONV_CON1_CONV_MODE__SHIFT) & CNA_CONV_CON1_CONV_MODE__MASK; +} + +#define REG_CNA_CONV_CON2 0x00001010 +#define CNA_CONV_CON2_RESERVED_0__MASK 0xff000000 +#define CNA_CONV_CON2_RESERVED_0__SHIFT 24 +static inline uint32_t CNA_CONV_CON2_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_CONV_CON2_RESERVED_0__SHIFT) & CNA_CONV_CON2_RESERVED_0__MASK; +} +#define CNA_CONV_CON2_KERNEL_GROUP__MASK 0x00ff0000 +#define CNA_CONV_CON2_KERNEL_GROUP__SHIFT 16 +static inline uint32_t CNA_CONV_CON2_KERNEL_GROUP(uint32_t val) +{ + return ((val) << CNA_CONV_CON2_KERNEL_GROUP__SHIFT) & CNA_CONV_CON2_KERNEL_GROUP__MASK; +} +#define CNA_CONV_CON2_RESERVED_1__MASK 0x0000c000 +#define CNA_CONV_CON2_RESERVED_1__SHIFT 14 +static inline uint32_t CNA_CONV_CON2_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_CONV_CON2_RESERVED_1__SHIFT) & CNA_CONV_CON2_RESERVED_1__MASK; +} +#define CNA_CONV_CON2_FEATURE_GRAINS__MASK 0x00003ff0 +#define CNA_CONV_CON2_FEATURE_GRAINS__SHIFT 4 +static inline uint32_t CNA_CONV_CON2_FEATURE_GRAINS(uint32_t val) +{ + return ((val) << CNA_CONV_CON2_FEATURE_GRAINS__SHIFT) & CNA_CONV_CON2_FEATURE_GRAINS__MASK; +} +#define CNA_CONV_CON2_RESERVED_2__MASK 0x00000008 +#define CNA_CONV_CON2_RESERVED_2__SHIFT 3 +static inline uint32_t CNA_CONV_CON2_RESERVED_2(uint32_t val) +{ + return ((val) << CNA_CONV_CON2_RESERVED_2__SHIFT) & CNA_CONV_CON2_RESERVED_2__MASK; +} +#define CNA_CONV_CON2_CSC_WO_EN__MASK 0x00000004 +#define CNA_CONV_CON2_CSC_WO_EN__SHIFT 2 +static inline uint32_t CNA_CONV_CON2_CSC_WO_EN(uint32_t val) +{ + return ((val) << CNA_CONV_CON2_CSC_WO_EN__SHIFT) & CNA_CONV_CON2_CSC_WO_EN__MASK; +} +#define CNA_CONV_CON2_CSC_DO_EN__MASK 0x00000002 +#define CNA_CONV_CON2_CSC_DO_EN__SHIFT 1 +static inline uint32_t CNA_CONV_CON2_CSC_DO_EN(uint32_t val) +{ + return ((val) << CNA_CONV_CON2_CSC_DO_EN__SHIFT) & CNA_CONV_CON2_CSC_DO_EN__MASK; +} +#define CNA_CONV_CON2_CMD_FIFO_SRST__MASK 0x00000001 +#define CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT 0 +static inline uint32_t CNA_CONV_CON2_CMD_FIFO_SRST(uint32_t val) +{ + return ((val) << CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT) & CNA_CONV_CON2_CMD_FIFO_SRST__MASK; +} + +#define REG_CNA_CONV_CON3 0x00001014 +#define CNA_CONV_CON3_RESERVED_0__MASK 0x80000000 +#define CNA_CONV_CON3_RESERVED_0__SHIFT 31 +static inline uint32_t CNA_CONV_CON3_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_RESERVED_0__SHIFT) & CNA_CONV_CON3_RESERVED_0__MASK; +} +#define CNA_CONV_CON3_NN_MODE__MASK 0x70000000 +#define CNA_CONV_CON3_NN_MODE__SHIFT 28 +static inline uint32_t CNA_CONV_CON3_NN_MODE(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_NN_MODE__SHIFT) & CNA_CONV_CON3_NN_MODE__MASK; +} +#define CNA_CONV_CON3_RESERVED_1__MASK 0x0c000000 +#define CNA_CONV_CON3_RESERVED_1__SHIFT 26 +static inline uint32_t CNA_CONV_CON3_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_RESERVED_1__SHIFT) & CNA_CONV_CON3_RESERVED_1__MASK; +} +#define CNA_CONV_CON3_ATROUS_Y_DILATION__MASK 0x03e00000 +#define CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT 21 +static inline uint32_t CNA_CONV_CON3_ATROUS_Y_DILATION(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_Y_DILATION__MASK; +} +#define CNA_CONV_CON3_ATROUS_X_DILATION__MASK 0x001f0000 +#define CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT 16 +static inline uint32_t CNA_CONV_CON3_ATROUS_X_DILATION(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_X_DILATION__MASK; +} +#define CNA_CONV_CON3_RESERVED_2__MASK 0x0000c000 +#define CNA_CONV_CON3_RESERVED_2__SHIFT 14 +static inline uint32_t CNA_CONV_CON3_RESERVED_2(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_RESERVED_2__SHIFT) & CNA_CONV_CON3_RESERVED_2__MASK; +} +#define CNA_CONV_CON3_DECONV_Y_STRIDE__MASK 0x00003800 +#define CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT 11 +static inline uint32_t CNA_CONV_CON3_DECONV_Y_STRIDE(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_Y_STRIDE__MASK; +} +#define CNA_CONV_CON3_DECONV_X_STRIDE__MASK 0x00000700 +#define CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT 8 +static inline uint32_t CNA_CONV_CON3_DECONV_X_STRIDE(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_X_STRIDE__MASK; +} +#define CNA_CONV_CON3_RESERVED_3__MASK 0x000000c0 +#define CNA_CONV_CON3_RESERVED_3__SHIFT 6 +static inline uint32_t CNA_CONV_CON3_RESERVED_3(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_RESERVED_3__SHIFT) & CNA_CONV_CON3_RESERVED_3__MASK; +} +#define CNA_CONV_CON3_CONV_Y_STRIDE__MASK 0x00000038 +#define CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT 3 +static inline uint32_t CNA_CONV_CON3_CONV_Y_STRIDE(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_Y_STRIDE__MASK; +} +#define CNA_CONV_CON3_CONV_X_STRIDE__MASK 0x00000007 +#define CNA_CONV_CON3_CONV_X_STRIDE__SHIFT 0 +static inline uint32_t CNA_CONV_CON3_CONV_X_STRIDE(uint32_t val) +{ + return ((val) << CNA_CONV_CON3_CONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_X_STRIDE__MASK; +} + +#define REG_CNA_DATA_SIZE0 0x00001020 +#define CNA_DATA_SIZE0_RESERVED_0__MASK 0xf8000000 +#define CNA_DATA_SIZE0_RESERVED_0__SHIFT 27 +static inline uint32_t CNA_DATA_SIZE0_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_DATA_SIZE0_RESERVED_0__MASK; +} +#define CNA_DATA_SIZE0_DATAIN_WIDTH__MASK 0x07ff0000 +#define CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT 16 +static inline uint32_t CNA_DATA_SIZE0_DATAIN_WIDTH(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT) & CNA_DATA_SIZE0_DATAIN_WIDTH__MASK; +} +#define CNA_DATA_SIZE0_RESERVED_1__MASK 0x0000f800 +#define CNA_DATA_SIZE0_RESERVED_1__SHIFT 11 +static inline uint32_t CNA_DATA_SIZE0_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_DATA_SIZE0_RESERVED_1__MASK; +} +#define CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK 0x000007ff +#define CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT 0 +static inline uint32_t CNA_DATA_SIZE0_DATAIN_HEIGHT(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT) & CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK; +} + +#define REG_CNA_DATA_SIZE1 0x00001024 +#define CNA_DATA_SIZE1_RESERVED_0__MASK 0xc0000000 +#define CNA_DATA_SIZE1_RESERVED_0__SHIFT 30 +static inline uint32_t CNA_DATA_SIZE1_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_DATA_SIZE1_RESERVED_0__MASK; +} +#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK 0x3fff0000 +#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT 16 +static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK; +} +#define CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK 0x0000ffff +#define CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT 0 +static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK; +} + +#define REG_CNA_DATA_SIZE2 0x00001028 +#define CNA_DATA_SIZE2_RESERVED_0__MASK 0xfffff800 +#define CNA_DATA_SIZE2_RESERVED_0__SHIFT 11 +static inline uint32_t CNA_DATA_SIZE2_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE2_RESERVED_0__SHIFT) & CNA_DATA_SIZE2_RESERVED_0__MASK; +} +#define CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK 0x000007ff +#define CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT 0 +static inline uint32_t CNA_DATA_SIZE2_DATAOUT_WIDTH(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT) & CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK; +} + +#define REG_CNA_DATA_SIZE3 0x0000102c +#define CNA_DATA_SIZE3_RESERVED_0__MASK 0xff000000 +#define CNA_DATA_SIZE3_RESERVED_0__SHIFT 24 +static inline uint32_t CNA_DATA_SIZE3_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE3_RESERVED_0__SHIFT) & CNA_DATA_SIZE3_RESERVED_0__MASK; +} +#define CNA_DATA_SIZE3_SURF_MODE__MASK 0x00c00000 +#define CNA_DATA_SIZE3_SURF_MODE__SHIFT 22 +static inline uint32_t CNA_DATA_SIZE3_SURF_MODE(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE3_SURF_MODE__SHIFT) & CNA_DATA_SIZE3_SURF_MODE__MASK; +} +#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK 0x003fffff +#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT 0 +static inline uint32_t CNA_DATA_SIZE3_DATAOUT_ATOMICS(uint32_t val) +{ + return ((val) << CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT) & CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK; +} + +#define REG_CNA_WEIGHT_SIZE0 0x00001030 +#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK 0xffffffff +#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT 0 +static inline uint32_t CNA_WEIGHT_SIZE0_WEIGHT_BYTES(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT) & CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK; +} + +#define REG_CNA_WEIGHT_SIZE1 0x00001034 +#define CNA_WEIGHT_SIZE1_RESERVED_0__MASK 0xfff80000 +#define CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT 19 +static inline uint32_t CNA_WEIGHT_SIZE1_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE1_RESERVED_0__MASK; +} +#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK 0x0007ffff +#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT 0 +static inline uint32_t CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT) & CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK; +} + +#define REG_CNA_WEIGHT_SIZE2 0x00001038 +#define CNA_WEIGHT_SIZE2_RESERVED_0__MASK 0xe0000000 +#define CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT 29 +static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_0__MASK; +} +#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK 0x1f000000 +#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT 24 +static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_WIDTH(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK; +} +#define CNA_WEIGHT_SIZE2_RESERVED_1__MASK 0x00e00000 +#define CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT 21 +static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_1__MASK; +} +#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK 0x001f0000 +#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT 16 +static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK; +} +#define CNA_WEIGHT_SIZE2_RESERVED_2__MASK 0x0000c000 +#define CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT 14 +static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_2(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_2__MASK; +} +#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK 0x00003fff +#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT 0 +static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_KERNELS(uint32_t val) +{ + return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK; +} + +#define REG_CNA_CBUF_CON0 0x00001040 +#define CNA_CBUF_CON0_RESERVED_0__MASK 0xffffc000 +#define CNA_CBUF_CON0_RESERVED_0__SHIFT 14 +static inline uint32_t CNA_CBUF_CON0_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_CBUF_CON0_RESERVED_0__SHIFT) & CNA_CBUF_CON0_RESERVED_0__MASK; +} +#define CNA_CBUF_CON0_WEIGHT_REUSE__MASK 0x00002000 +#define CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT 13 +static inline uint32_t CNA_CBUF_CON0_WEIGHT_REUSE(uint32_t val) +{ + return ((val) << CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT) & CNA_CBUF_CON0_WEIGHT_REUSE__MASK; +} +#define CNA_CBUF_CON0_DATA_REUSE__MASK 0x00001000 +#define CNA_CBUF_CON0_DATA_REUSE__SHIFT 12 +static inline uint32_t CNA_CBUF_CON0_DATA_REUSE(uint32_t val) +{ + return ((val) << CNA_CBUF_CON0_DATA_REUSE__SHIFT) & CNA_CBUF_CON0_DATA_REUSE__MASK; +} +#define CNA_CBUF_CON0_RESERVED_1__MASK 0x00000800 +#define CNA_CBUF_CON0_RESERVED_1__SHIFT 11 +static inline uint32_t CNA_CBUF_CON0_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_CBUF_CON0_RESERVED_1__SHIFT) & CNA_CBUF_CON0_RESERVED_1__MASK; +} +#define CNA_CBUF_CON0_FC_DATA_BANK__MASK 0x00000700 +#define CNA_CBUF_CON0_FC_DATA_BANK__SHIFT 8 +static inline uint32_t CNA_CBUF_CON0_FC_DATA_BANK(uint32_t val) +{ + return ((val) << CNA_CBUF_CON0_FC_DATA_BANK__SHIFT) & CNA_CBUF_CON0_FC_DATA_BANK__MASK; +} +#define CNA_CBUF_CON0_WEIGHT_BANK__MASK 0x000000f0 +#define CNA_CBUF_CON0_WEIGHT_BANK__SHIFT 4 +static inline uint32_t CNA_CBUF_CON0_WEIGHT_BANK(uint32_t val) +{ + return ((val) << CNA_CBUF_CON0_WEIGHT_BANK__SHIFT) & CNA_CBUF_CON0_WEIGHT_BANK__MASK; +} +#define CNA_CBUF_CON0_DATA_BANK__MASK 0x0000000f +#define CNA_CBUF_CON0_DATA_BANK__SHIFT 0 +static inline uint32_t CNA_CBUF_CON0_DATA_BANK(uint32_t val) +{ + return ((val) << CNA_CBUF_CON0_DATA_BANK__SHIFT) & CNA_CBUF_CON0_DATA_BANK__MASK; +} + +#define REG_CNA_CBUF_CON1 0x00001044 +#define CNA_CBUF_CON1_RESERVED_0__MASK 0xffffc000 +#define CNA_CBUF_CON1_RESERVED_0__SHIFT 14 +static inline uint32_t CNA_CBUF_CON1_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_CBUF_CON1_RESERVED_0__SHIFT) & CNA_CBUF_CON1_RESERVED_0__MASK; +} +#define CNA_CBUF_CON1_DATA_ENTRIES__MASK 0x00003fff +#define CNA_CBUF_CON1_DATA_ENTRIES__SHIFT 0 +static inline uint32_t CNA_CBUF_CON1_DATA_ENTRIES(uint32_t val) +{ + return ((val) << CNA_CBUF_CON1_DATA_ENTRIES__SHIFT) & CNA_CBUF_CON1_DATA_ENTRIES__MASK; +} + +#define REG_CNA_CVT_CON0 0x0000104c +#define CNA_CVT_CON0_RESERVED_0__MASK 0xf0000000 +#define CNA_CVT_CON0_RESERVED_0__SHIFT 28 +static inline uint32_t CNA_CVT_CON0_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_RESERVED_0__SHIFT) & CNA_CVT_CON0_RESERVED_0__MASK; +} +#define CNA_CVT_CON0_CVT_TRUNCATE_3__MASK 0x0fc00000 +#define CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT 22 +static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_3(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_3__MASK; +} +#define CNA_CVT_CON0_CVT_TRUNCATE_2__MASK 0x003f0000 +#define CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT 16 +static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_2(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_2__MASK; +} +#define CNA_CVT_CON0_CVT_TRUNCATE_1__MASK 0x0000fc00 +#define CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT 10 +static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_1(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_1__MASK; +} +#define CNA_CVT_CON0_CVT_TRUNCATE_0__MASK 0x000003f0 +#define CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT 4 +static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_0(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_0__MASK; +} +#define CNA_CVT_CON0_DATA_SIGN__MASK 0x00000008 +#define CNA_CVT_CON0_DATA_SIGN__SHIFT 3 +static inline uint32_t CNA_CVT_CON0_DATA_SIGN(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_DATA_SIGN__SHIFT) & CNA_CVT_CON0_DATA_SIGN__MASK; +} +#define CNA_CVT_CON0_ROUND_TYPE__MASK 0x00000004 +#define CNA_CVT_CON0_ROUND_TYPE__SHIFT 2 +static inline uint32_t CNA_CVT_CON0_ROUND_TYPE(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_ROUND_TYPE__SHIFT) & CNA_CVT_CON0_ROUND_TYPE__MASK; +} +#define CNA_CVT_CON0_CVT_TYPE__MASK 0x00000002 +#define CNA_CVT_CON0_CVT_TYPE__SHIFT 1 +static inline uint32_t CNA_CVT_CON0_CVT_TYPE(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_CVT_TYPE__SHIFT) & CNA_CVT_CON0_CVT_TYPE__MASK; +} +#define CNA_CVT_CON0_CVT_BYPASS__MASK 0x00000001 +#define CNA_CVT_CON0_CVT_BYPASS__SHIFT 0 +static inline uint32_t CNA_CVT_CON0_CVT_BYPASS(uint32_t val) +{ + return ((val) << CNA_CVT_CON0_CVT_BYPASS__SHIFT) & CNA_CVT_CON0_CVT_BYPASS__MASK; +} + +#define REG_CNA_CVT_CON1 0x00001050 +#define CNA_CVT_CON1_CVT_SCALE0__MASK 0xffff0000 +#define CNA_CVT_CON1_CVT_SCALE0__SHIFT 16 +static inline uint32_t CNA_CVT_CON1_CVT_SCALE0(uint32_t val) +{ + return ((val) << CNA_CVT_CON1_CVT_SCALE0__SHIFT) & CNA_CVT_CON1_CVT_SCALE0__MASK; +} +#define CNA_CVT_CON1_CVT_OFFSET0__MASK 0x0000ffff +#define CNA_CVT_CON1_CVT_OFFSET0__SHIFT 0 +static inline uint32_t CNA_CVT_CON1_CVT_OFFSET0(uint32_t val) +{ + return ((val) << CNA_CVT_CON1_CVT_OFFSET0__SHIFT) & CNA_CVT_CON1_CVT_OFFSET0__MASK; +} + +#define REG_CNA_CVT_CON2 0x00001054 +#define CNA_CVT_CON2_CVT_SCALE1__MASK 0xffff0000 +#define CNA_CVT_CON2_CVT_SCALE1__SHIFT 16 +static inline uint32_t CNA_CVT_CON2_CVT_SCALE1(uint32_t val) +{ + return ((val) << CNA_CVT_CON2_CVT_SCALE1__SHIFT) & CNA_CVT_CON2_CVT_SCALE1__MASK; +} +#define CNA_CVT_CON2_CVT_OFFSET1__MASK 0x0000ffff +#define CNA_CVT_CON2_CVT_OFFSET1__SHIFT 0 +static inline uint32_t CNA_CVT_CON2_CVT_OFFSET1(uint32_t val) +{ + return ((val) << CNA_CVT_CON2_CVT_OFFSET1__SHIFT) & CNA_CVT_CON2_CVT_OFFSET1__MASK; +} + +#define REG_CNA_CVT_CON3 0x00001058 +#define CNA_CVT_CON3_CVT_SCALE2__MASK 0xffff0000 +#define CNA_CVT_CON3_CVT_SCALE2__SHIFT 16 +static inline uint32_t CNA_CVT_CON3_CVT_SCALE2(uint32_t val) +{ + return ((val) << CNA_CVT_CON3_CVT_SCALE2__SHIFT) & CNA_CVT_CON3_CVT_SCALE2__MASK; +} +#define CNA_CVT_CON3_CVT_OFFSET2__MASK 0x0000ffff +#define CNA_CVT_CON3_CVT_OFFSET2__SHIFT 0 +static inline uint32_t CNA_CVT_CON3_CVT_OFFSET2(uint32_t val) +{ + return ((val) << CNA_CVT_CON3_CVT_OFFSET2__SHIFT) & CNA_CVT_CON3_CVT_OFFSET2__MASK; +} + +#define REG_CNA_CVT_CON4 0x0000105c +#define CNA_CVT_CON4_CVT_SCALE3__MASK 0xffff0000 +#define CNA_CVT_CON4_CVT_SCALE3__SHIFT 16 +static inline uint32_t CNA_CVT_CON4_CVT_SCALE3(uint32_t val) +{ + return ((val) << CNA_CVT_CON4_CVT_SCALE3__SHIFT) & CNA_CVT_CON4_CVT_SCALE3__MASK; +} +#define CNA_CVT_CON4_CVT_OFFSET3__MASK 0x0000ffff +#define CNA_CVT_CON4_CVT_OFFSET3__SHIFT 0 +static inline uint32_t CNA_CVT_CON4_CVT_OFFSET3(uint32_t val) +{ + return ((val) << CNA_CVT_CON4_CVT_OFFSET3__SHIFT) & CNA_CVT_CON4_CVT_OFFSET3__MASK; +} + +#define REG_CNA_FC_CON0 0x00001060 +#define CNA_FC_CON0_FC_SKIP_DATA__MASK 0xffff0000 +#define CNA_FC_CON0_FC_SKIP_DATA__SHIFT 16 +static inline uint32_t CNA_FC_CON0_FC_SKIP_DATA(uint32_t val) +{ + return ((val) << CNA_FC_CON0_FC_SKIP_DATA__SHIFT) & CNA_FC_CON0_FC_SKIP_DATA__MASK; +} +#define CNA_FC_CON0_RESERVED_0__MASK 0x0000fffe +#define CNA_FC_CON0_RESERVED_0__SHIFT 1 +static inline uint32_t CNA_FC_CON0_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_FC_CON0_RESERVED_0__SHIFT) & CNA_FC_CON0_RESERVED_0__MASK; +} +#define CNA_FC_CON0_FC_SKIP_EN__MASK 0x00000001 +#define CNA_FC_CON0_FC_SKIP_EN__SHIFT 0 +static inline uint32_t CNA_FC_CON0_FC_SKIP_EN(uint32_t val) +{ + return ((val) << CNA_FC_CON0_FC_SKIP_EN__SHIFT) & CNA_FC_CON0_FC_SKIP_EN__MASK; +} + +#define REG_CNA_FC_CON1 0x00001064 +#define CNA_FC_CON1_RESERVED_0__MASK 0xfffe0000 +#define CNA_FC_CON1_RESERVED_0__SHIFT 17 +static inline uint32_t CNA_FC_CON1_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_FC_CON1_RESERVED_0__SHIFT) & CNA_FC_CON1_RESERVED_0__MASK; +} +#define CNA_FC_CON1_DATA_OFFSET__MASK 0x0001ffff +#define CNA_FC_CON1_DATA_OFFSET__SHIFT 0 +static inline uint32_t CNA_FC_CON1_DATA_OFFSET(uint32_t val) +{ + return ((val) << CNA_FC_CON1_DATA_OFFSET__SHIFT) & CNA_FC_CON1_DATA_OFFSET__MASK; +} + +#define REG_CNA_PAD_CON0 0x00001068 +#define CNA_PAD_CON0_RESERVED_0__MASK 0xffffff00 +#define CNA_PAD_CON0_RESERVED_0__SHIFT 8 +static inline uint32_t CNA_PAD_CON0_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_PAD_CON0_RESERVED_0__SHIFT) & CNA_PAD_CON0_RESERVED_0__MASK; +} +#define CNA_PAD_CON0_PAD_LEFT__MASK 0x000000f0 +#define CNA_PAD_CON0_PAD_LEFT__SHIFT 4 +static inline uint32_t CNA_PAD_CON0_PAD_LEFT(uint32_t val) +{ + return ((val) << CNA_PAD_CON0_PAD_LEFT__SHIFT) & CNA_PAD_CON0_PAD_LEFT__MASK; +} +#define CNA_PAD_CON0_PAD_TOP__MASK 0x0000000f +#define CNA_PAD_CON0_PAD_TOP__SHIFT 0 +static inline uint32_t CNA_PAD_CON0_PAD_TOP(uint32_t val) +{ + return ((val) << CNA_PAD_CON0_PAD_TOP__SHIFT) & CNA_PAD_CON0_PAD_TOP__MASK; +} + +#define REG_CNA_FEATURE_DATA_ADDR 0x00001070 +#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK 0xffffffff +#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT 0 +static inline uint32_t CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR(uint32_t val) +{ + return ((val) << CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT) & CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK; +} + +#define REG_CNA_FC_CON2 0x00001074 +#define CNA_FC_CON2_RESERVED_0__MASK 0xfffe0000 +#define CNA_FC_CON2_RESERVED_0__SHIFT 17 +static inline uint32_t CNA_FC_CON2_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_FC_CON2_RESERVED_0__SHIFT) & CNA_FC_CON2_RESERVED_0__MASK; +} +#define CNA_FC_CON2_WEIGHT_OFFSET__MASK 0x0001ffff +#define CNA_FC_CON2_WEIGHT_OFFSET__SHIFT 0 +static inline uint32_t CNA_FC_CON2_WEIGHT_OFFSET(uint32_t val) +{ + return ((val) << CNA_FC_CON2_WEIGHT_OFFSET__SHIFT) & CNA_FC_CON2_WEIGHT_OFFSET__MASK; +} + +#define REG_CNA_DMA_CON0 0x00001078 +#define CNA_DMA_CON0_OV4K_BYPASS__MASK 0x80000000 +#define CNA_DMA_CON0_OV4K_BYPASS__SHIFT 31 +static inline uint32_t CNA_DMA_CON0_OV4K_BYPASS(uint32_t val) +{ + return ((val) << CNA_DMA_CON0_OV4K_BYPASS__SHIFT) & CNA_DMA_CON0_OV4K_BYPASS__MASK; +} +#define CNA_DMA_CON0_RESERVED_0__MASK 0x7ff00000 +#define CNA_DMA_CON0_RESERVED_0__SHIFT 20 +static inline uint32_t CNA_DMA_CON0_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_DMA_CON0_RESERVED_0__SHIFT) & CNA_DMA_CON0_RESERVED_0__MASK; +} +#define CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK 0x000f0000 +#define CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT 16 +static inline uint32_t CNA_DMA_CON0_WEIGHT_BURST_LEN(uint32_t val) +{ + return ((val) << CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT) & CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK; +} +#define CNA_DMA_CON0_RESERVED_1__MASK 0x0000fff0 +#define CNA_DMA_CON0_RESERVED_1__SHIFT 4 +static inline uint32_t CNA_DMA_CON0_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_DMA_CON0_RESERVED_1__SHIFT) & CNA_DMA_CON0_RESERVED_1__MASK; +} +#define CNA_DMA_CON0_DATA_BURST_LEN__MASK 0x0000000f +#define CNA_DMA_CON0_DATA_BURST_LEN__SHIFT 0 +static inline uint32_t CNA_DMA_CON0_DATA_BURST_LEN(uint32_t val) +{ + return ((val) << CNA_DMA_CON0_DATA_BURST_LEN__SHIFT) & CNA_DMA_CON0_DATA_BURST_LEN__MASK; +} + +#define REG_CNA_DMA_CON1 0x0000107c +#define CNA_DMA_CON1_RESERVED_0__MASK 0xf0000000 +#define CNA_DMA_CON1_RESERVED_0__SHIFT 28 +static inline uint32_t CNA_DMA_CON1_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_DMA_CON1_RESERVED_0__SHIFT) & CNA_DMA_CON1_RESERVED_0__MASK; +} +#define CNA_DMA_CON1_LINE_STRIDE__MASK 0x0fffffff +#define CNA_DMA_CON1_LINE_STRIDE__SHIFT 0 +static inline uint32_t CNA_DMA_CON1_LINE_STRIDE(uint32_t val) +{ + return ((val) << CNA_DMA_CON1_LINE_STRIDE__SHIFT) & CNA_DMA_CON1_LINE_STRIDE__MASK; +} + +#define REG_CNA_DMA_CON2 0x00001080 +#define CNA_DMA_CON2_RESERVED_0__MASK 0xf0000000 +#define CNA_DMA_CON2_RESERVED_0__SHIFT 28 +static inline uint32_t CNA_DMA_CON2_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_DMA_CON2_RESERVED_0__SHIFT) & CNA_DMA_CON2_RESERVED_0__MASK; +} +#define CNA_DMA_CON2_SURF_STRIDE__MASK 0x0fffffff +#define CNA_DMA_CON2_SURF_STRIDE__SHIFT 0 +static inline uint32_t CNA_DMA_CON2_SURF_STRIDE(uint32_t val) +{ + return ((val) << CNA_DMA_CON2_SURF_STRIDE__SHIFT) & CNA_DMA_CON2_SURF_STRIDE__MASK; +} + +#define REG_CNA_FC_DATA_SIZE0 0x00001084 +#define CNA_FC_DATA_SIZE0_RESERVED_0__MASK 0xc0000000 +#define CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT 30 +static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_0__MASK; +} +#define CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK 0x3fff0000 +#define CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT 16 +static inline uint32_t CNA_FC_DATA_SIZE0_DMA_WIDTH(uint32_t val) +{ + return ((val) << CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT) & CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK; +} +#define CNA_FC_DATA_SIZE0_RESERVED_1__MASK 0x0000f800 +#define CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT 11 +static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_1__MASK; +} +#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK 0x000007ff +#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT 0 +static inline uint32_t CNA_FC_DATA_SIZE0_DMA_HEIGHT(uint32_t val) +{ + return ((val) << CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT) & CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK; +} + +#define REG_CNA_FC_DATA_SIZE1 0x00001088 +#define CNA_FC_DATA_SIZE1_RESERVED_0__MASK 0xffff0000 +#define CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT 16 +static inline uint32_t CNA_FC_DATA_SIZE1_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE1_RESERVED_0__MASK; +} +#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK 0x0000ffff +#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT 0 +static inline uint32_t CNA_FC_DATA_SIZE1_DMA_CHANNEL(uint32_t val) +{ + return ((val) << CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT) & CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK; +} + +#define REG_CNA_CLK_GATE 0x00001090 +#define CNA_CLK_GATE_RESERVED_0__MASK 0xffffffe0 +#define CNA_CLK_GATE_RESERVED_0__SHIFT 5 +static inline uint32_t CNA_CLK_GATE_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_CLK_GATE_RESERVED_0__SHIFT) & CNA_CLK_GATE_RESERVED_0__MASK; +} +#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK 0x00000010 +#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT 4 +static inline uint32_t CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE(uint32_t val) +{ + return ((val) << CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK; +} +#define CNA_CLK_GATE_RESERVED_1__MASK 0x00000008 +#define CNA_CLK_GATE_RESERVED_1__SHIFT 3 +static inline uint32_t CNA_CLK_GATE_RESERVED_1(uint32_t val) +{ + return ((val) << CNA_CLK_GATE_RESERVED_1__SHIFT) & CNA_CLK_GATE_RESERVED_1__MASK; +} +#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK 0x00000004 +#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT 2 +static inline uint32_t CNA_CLK_GATE_CSC_DISABLE_CLKGATE(uint32_t val) +{ + return ((val) << CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK; +} +#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK 0x00000002 +#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT 1 +static inline uint32_t CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE(uint32_t val) +{ + return ((val) << CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK; +} +#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK 0x00000001 +#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT 0 +static inline uint32_t CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE(uint32_t val) +{ + return ((val) << CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK; +} + +#define REG_CNA_DCOMP_CTRL 0x00001100 +#define CNA_DCOMP_CTRL_RESERVED_0__MASK 0xfffffff0 +#define CNA_DCOMP_CTRL_RESERVED_0__SHIFT 4 +static inline uint32_t CNA_DCOMP_CTRL_RESERVED_0(uint32_t val) +{ + return ((val) << CNA_DCOMP_CTRL_RESERVED_0__SHIFT) & CNA_DCOMP_CTRL_RESERVED_0__MASK; +} +#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK 0x00000008 +#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT 3 +static inline uint32_t CNA_DCOMP_CTRL_WT_DEC_BYPASS(uint32_t val) +{ + return ((val) << CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT) & CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK; +} +#define CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK 0x00000007 +#define CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT 0 +static inline uint32_t CNA_DCOMP_CTRL_DECOMP_CONTROL(uint32_t val) +{ + return ((val) << CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT) & CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK; +} + +#define REG_CNA_DCOMP_REGNUM 0x00001104 +#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK 0xffffffff +#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT 0 +static inline uint32_t CNA_DCOMP_REGNUM_DCOMP_REGNUM(uint32_t val) +{ + return ((val) << CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT) & CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK; +} + +#define REG_CNA_DCOMP_ADDR0 0x00001110 +#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK 0xffffffff +#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT 0 +static inline uint32_t CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0(uint32_t val) +{ + return ((val) << CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT) & CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT0 0x00001140 +#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT) & CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT1 0x00001144 +#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT) & CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT2 0x00001148 +#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT) & CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT3 0x0000114c +#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT) & CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT4 0x00001150 +#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT) & CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT5 0x00001154 +#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT) & CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT6 0x00001158 +#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT) & CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT7 0x0000115c +#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT) & CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT8 0x00001160 +#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT) & CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT9 0x00001164 +#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT) & CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT10 0x00001168 +#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT) & CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT11 0x0000116c +#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT) & CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT12 0x00001170 +#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT) & CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT13 0x00001174 +#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT) & CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT14 0x00001178 +#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT) & CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK; +} + +#define REG_CNA_DCOMP_AMOUNT15 0x0000117c +#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK 0xffffffff +#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT 0 +static inline uint32_t CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15(uint32_t val) +{ + return ((val) << CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT) & CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK; +} + +#define REG_CNA_CVT_CON5 0x00001180 +#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK 0xffffffff +#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT 0 +static inline uint32_t CNA_CVT_CON5_PER_CHANNEL_CVT_EN(uint32_t val) +{ + return ((val) << CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT) & CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK; +} + +#define REG_CNA_PAD_CON1 0x00001184 +#define CNA_PAD_CON1_PAD_VALUE__MASK 0xffffffff +#define CNA_PAD_CON1_PAD_VALUE__SHIFT 0 +static inline uint32_t CNA_PAD_CON1_PAD_VALUE(uint32_t val) +{ + return ((val) << CNA_PAD_CON1_PAD_VALUE__SHIFT) & CNA_PAD_CON1_PAD_VALUE__MASK; +} + +#define REG_CORE_S_STATUS 0x00003000 +#define CORE_S_STATUS_RESERVED_0__MASK 0xfffc0000 +#define CORE_S_STATUS_RESERVED_0__SHIFT 18 +static inline uint32_t CORE_S_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << CORE_S_STATUS_RESERVED_0__SHIFT) & CORE_S_STATUS_RESERVED_0__MASK; +} +#define CORE_S_STATUS_STATUS_1__MASK 0x00030000 +#define CORE_S_STATUS_STATUS_1__SHIFT 16 +static inline uint32_t CORE_S_STATUS_STATUS_1(uint32_t val) +{ + return ((val) << CORE_S_STATUS_STATUS_1__SHIFT) & CORE_S_STATUS_STATUS_1__MASK; +} +#define CORE_S_STATUS_RESERVED_1__MASK 0x0000fffc +#define CORE_S_STATUS_RESERVED_1__SHIFT 2 +static inline uint32_t CORE_S_STATUS_RESERVED_1(uint32_t val) +{ + return ((val) << CORE_S_STATUS_RESERVED_1__SHIFT) & CORE_S_STATUS_RESERVED_1__MASK; +} +#define CORE_S_STATUS_STATUS_0__MASK 0x00000003 +#define CORE_S_STATUS_STATUS_0__SHIFT 0 +static inline uint32_t CORE_S_STATUS_STATUS_0(uint32_t val) +{ + return ((val) << CORE_S_STATUS_STATUS_0__SHIFT) & CORE_S_STATUS_STATUS_0__MASK; +} + +#define REG_CORE_S_POINTER 0x00003004 +#define CORE_S_POINTER_RESERVED_0__MASK 0xfffe0000 +#define CORE_S_POINTER_RESERVED_0__SHIFT 17 +static inline uint32_t CORE_S_POINTER_RESERVED_0(uint32_t val) +{ + return ((val) << CORE_S_POINTER_RESERVED_0__SHIFT) & CORE_S_POINTER_RESERVED_0__MASK; +} +#define CORE_S_POINTER_EXECUTER__MASK 0x00010000 +#define CORE_S_POINTER_EXECUTER__SHIFT 16 +static inline uint32_t CORE_S_POINTER_EXECUTER(uint32_t val) +{ + return ((val) << CORE_S_POINTER_EXECUTER__SHIFT) & CORE_S_POINTER_EXECUTER__MASK; +} +#define CORE_S_POINTER_RESERVED_1__MASK 0x0000ffc0 +#define CORE_S_POINTER_RESERVED_1__SHIFT 6 +static inline uint32_t CORE_S_POINTER_RESERVED_1(uint32_t val) +{ + return ((val) << CORE_S_POINTER_RESERVED_1__SHIFT) & CORE_S_POINTER_RESERVED_1__MASK; +} +#define CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020 +#define CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5 +static inline uint32_t CORE_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val) +{ + return ((val) << CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK; +} +#define CORE_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010 +#define CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT 4 +static inline uint32_t CORE_S_POINTER_POINTER_PP_CLEAR(uint32_t val) +{ + return ((val) << CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_POINTER_PP_CLEAR__MASK; +} +#define CORE_S_POINTER_POINTER_PP_MODE__MASK 0x00000008 +#define CORE_S_POINTER_POINTER_PP_MODE__SHIFT 3 +static inline uint32_t CORE_S_POINTER_POINTER_PP_MODE(uint32_t val) +{ + return ((val) << CORE_S_POINTER_POINTER_PP_MODE__SHIFT) & CORE_S_POINTER_POINTER_PP_MODE__MASK; +} +#define CORE_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004 +#define CORE_S_POINTER_EXECUTER_PP_EN__SHIFT 2 +static inline uint32_t CORE_S_POINTER_EXECUTER_PP_EN(uint32_t val) +{ + return ((val) << CORE_S_POINTER_EXECUTER_PP_EN__SHIFT) & CORE_S_POINTER_EXECUTER_PP_EN__MASK; +} +#define CORE_S_POINTER_POINTER_PP_EN__MASK 0x00000002 +#define CORE_S_POINTER_POINTER_PP_EN__SHIFT 1 +static inline uint32_t CORE_S_POINTER_POINTER_PP_EN(uint32_t val) +{ + return ((val) << CORE_S_POINTER_POINTER_PP_EN__SHIFT) & CORE_S_POINTER_POINTER_PP_EN__MASK; +} +#define CORE_S_POINTER_POINTER__MASK 0x00000001 +#define CORE_S_POINTER_POINTER__SHIFT 0 +static inline uint32_t CORE_S_POINTER_POINTER(uint32_t val) +{ + return ((val) << CORE_S_POINTER_POINTER__SHIFT) & CORE_S_POINTER_POINTER__MASK; +} + +#define REG_CORE_OPERATION_ENABLE 0x00003008 +#define CORE_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe +#define CORE_OPERATION_ENABLE_RESERVED_0__SHIFT 1 +static inline uint32_t CORE_OPERATION_ENABLE_RESERVED_0(uint32_t val) +{ + return ((val) << CORE_OPERATION_ENABLE_RESERVED_0__SHIFT) & CORE_OPERATION_ENABLE_RESERVED_0__MASK; +} +#define CORE_OPERATION_ENABLE_OP_EN__MASK 0x00000001 +#define CORE_OPERATION_ENABLE_OP_EN__SHIFT 0 +static inline uint32_t CORE_OPERATION_ENABLE_OP_EN(uint32_t val) +{ + return ((val) << CORE_OPERATION_ENABLE_OP_EN__SHIFT) & CORE_OPERATION_ENABLE_OP_EN__MASK; +} + +#define REG_CORE_MAC_GATING 0x0000300c +#define CORE_MAC_GATING_RESERVED_0__MASK 0xf8000000 +#define CORE_MAC_GATING_RESERVED_0__SHIFT 27 +static inline uint32_t CORE_MAC_GATING_RESERVED_0(uint32_t val) +{ + return ((val) << CORE_MAC_GATING_RESERVED_0__SHIFT) & CORE_MAC_GATING_RESERVED_0__MASK; +} +#define CORE_MAC_GATING_SLCG_OP_EN__MASK 0x07ffffff +#define CORE_MAC_GATING_SLCG_OP_EN__SHIFT 0 +static inline uint32_t CORE_MAC_GATING_SLCG_OP_EN(uint32_t val) +{ + return ((val) << CORE_MAC_GATING_SLCG_OP_EN__SHIFT) & CORE_MAC_GATING_SLCG_OP_EN__MASK; +} + +#define REG_CORE_MISC_CFG 0x00003010 +#define CORE_MISC_CFG_RESERVED_0__MASK 0xfff00000 +#define CORE_MISC_CFG_RESERVED_0__SHIFT 20 +static inline uint32_t CORE_MISC_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << CORE_MISC_CFG_RESERVED_0__SHIFT) & CORE_MISC_CFG_RESERVED_0__MASK; +} +#define CORE_MISC_CFG_SOFT_GATING__MASK 0x000fc000 +#define CORE_MISC_CFG_SOFT_GATING__SHIFT 14 +static inline uint32_t CORE_MISC_CFG_SOFT_GATING(uint32_t val) +{ + return ((val) << CORE_MISC_CFG_SOFT_GATING__SHIFT) & CORE_MISC_CFG_SOFT_GATING__MASK; +} +#define CORE_MISC_CFG_RESERVED_1__MASK 0x00003800 +#define CORE_MISC_CFG_RESERVED_1__SHIFT 11 +static inline uint32_t CORE_MISC_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << CORE_MISC_CFG_RESERVED_1__SHIFT) & CORE_MISC_CFG_RESERVED_1__MASK; +} +#define CORE_MISC_CFG_PROC_PRECISION__MASK 0x00000700 +#define CORE_MISC_CFG_PROC_PRECISION__SHIFT 8 +static inline uint32_t CORE_MISC_CFG_PROC_PRECISION(uint32_t val) +{ + return ((val) << CORE_MISC_CFG_PROC_PRECISION__SHIFT) & CORE_MISC_CFG_PROC_PRECISION__MASK; +} +#define CORE_MISC_CFG_RESERVED_2__MASK 0x000000fc +#define CORE_MISC_CFG_RESERVED_2__SHIFT 2 +static inline uint32_t CORE_MISC_CFG_RESERVED_2(uint32_t val) +{ + return ((val) << CORE_MISC_CFG_RESERVED_2__SHIFT) & CORE_MISC_CFG_RESERVED_2__MASK; +} +#define CORE_MISC_CFG_DW_EN__MASK 0x00000002 +#define CORE_MISC_CFG_DW_EN__SHIFT 1 +static inline uint32_t CORE_MISC_CFG_DW_EN(uint32_t val) +{ + return ((val) << CORE_MISC_CFG_DW_EN__SHIFT) & CORE_MISC_CFG_DW_EN__MASK; +} +#define CORE_MISC_CFG_QD_EN__MASK 0x00000001 +#define CORE_MISC_CFG_QD_EN__SHIFT 0 +static inline uint32_t CORE_MISC_CFG_QD_EN(uint32_t val) +{ + return ((val) << CORE_MISC_CFG_QD_EN__SHIFT) & CORE_MISC_CFG_QD_EN__MASK; +} + +#define REG_CORE_DATAOUT_SIZE_0 0x00003014 +#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK 0xffff0000 +#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT 16 +static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT(uint32_t val) +{ + return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK; +} +#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK 0x0000ffff +#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT 0 +static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH(uint32_t val) +{ + return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK; +} + +#define REG_CORE_DATAOUT_SIZE_1 0x00003018 +#define CORE_DATAOUT_SIZE_1_RESERVED_0__MASK 0xffff0000 +#define CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT 16 +static inline uint32_t CORE_DATAOUT_SIZE_1_RESERVED_0(uint32_t val) +{ + return ((val) << CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT) & CORE_DATAOUT_SIZE_1_RESERVED_0__MASK; +} +#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK 0x0000ffff +#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT 0 +static inline uint32_t CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL(uint32_t val) +{ + return ((val) << CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT) & CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK; +} + +#define REG_CORE_CLIP_TRUNCATE 0x0000301c +#define CORE_CLIP_TRUNCATE_RESERVED_0__MASK 0xffffff80 +#define CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT 7 +static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_0(uint32_t val) +{ + return ((val) << CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_0__MASK; +} +#define CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK 0x00000040 +#define CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT 6 +static inline uint32_t CORE_CLIP_TRUNCATE_ROUND_TYPE(uint32_t val) +{ + return ((val) << CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT) & CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK; +} +#define CORE_CLIP_TRUNCATE_RESERVED_1__MASK 0x00000020 +#define CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT 5 +static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_1(uint32_t val) +{ + return ((val) << CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_1__MASK; +} +#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK 0x0000001f +#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT 0 +static inline uint32_t CORE_CLIP_TRUNCATE_CLIP_TRUNCATE(uint32_t val) +{ + return ((val) << CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT) & CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK; +} + +#define REG_DPU_S_STATUS 0x00004000 +#define DPU_S_STATUS_RESERVED_0__MASK 0xfffc0000 +#define DPU_S_STATUS_RESERVED_0__SHIFT 18 +static inline uint32_t DPU_S_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_S_STATUS_RESERVED_0__SHIFT) & DPU_S_STATUS_RESERVED_0__MASK; +} +#define DPU_S_STATUS_STATUS_1__MASK 0x00030000 +#define DPU_S_STATUS_STATUS_1__SHIFT 16 +static inline uint32_t DPU_S_STATUS_STATUS_1(uint32_t val) +{ + return ((val) << DPU_S_STATUS_STATUS_1__SHIFT) & DPU_S_STATUS_STATUS_1__MASK; +} +#define DPU_S_STATUS_RESERVED_1__MASK 0x0000fffc +#define DPU_S_STATUS_RESERVED_1__SHIFT 2 +static inline uint32_t DPU_S_STATUS_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_S_STATUS_RESERVED_1__SHIFT) & DPU_S_STATUS_RESERVED_1__MASK; +} +#define DPU_S_STATUS_STATUS_0__MASK 0x00000003 +#define DPU_S_STATUS_STATUS_0__SHIFT 0 +static inline uint32_t DPU_S_STATUS_STATUS_0(uint32_t val) +{ + return ((val) << DPU_S_STATUS_STATUS_0__SHIFT) & DPU_S_STATUS_STATUS_0__MASK; +} + +#define REG_DPU_S_POINTER 0x00004004 +#define DPU_S_POINTER_RESERVED_0__MASK 0xfffe0000 +#define DPU_S_POINTER_RESERVED_0__SHIFT 17 +static inline uint32_t DPU_S_POINTER_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_S_POINTER_RESERVED_0__SHIFT) & DPU_S_POINTER_RESERVED_0__MASK; +} +#define DPU_S_POINTER_EXECUTER__MASK 0x00010000 +#define DPU_S_POINTER_EXECUTER__SHIFT 16 +static inline uint32_t DPU_S_POINTER_EXECUTER(uint32_t val) +{ + return ((val) << DPU_S_POINTER_EXECUTER__SHIFT) & DPU_S_POINTER_EXECUTER__MASK; +} +#define DPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0 +#define DPU_S_POINTER_RESERVED_1__SHIFT 6 +static inline uint32_t DPU_S_POINTER_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_S_POINTER_RESERVED_1__SHIFT) & DPU_S_POINTER_RESERVED_1__MASK; +} +#define DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020 +#define DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5 +static inline uint32_t DPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val) +{ + return ((val) << DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK; +} +#define DPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010 +#define DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4 +static inline uint32_t DPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val) +{ + return ((val) << DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_POINTER_PP_CLEAR__MASK; +} +#define DPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008 +#define DPU_S_POINTER_POINTER_PP_MODE__SHIFT 3 +static inline uint32_t DPU_S_POINTER_POINTER_PP_MODE(uint32_t val) +{ + return ((val) << DPU_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_S_POINTER_POINTER_PP_MODE__MASK; +} +#define DPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004 +#define DPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2 +static inline uint32_t DPU_S_POINTER_EXECUTER_PP_EN(uint32_t val) +{ + return ((val) << DPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_S_POINTER_EXECUTER_PP_EN__MASK; +} +#define DPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002 +#define DPU_S_POINTER_POINTER_PP_EN__SHIFT 1 +static inline uint32_t DPU_S_POINTER_POINTER_PP_EN(uint32_t val) +{ + return ((val) << DPU_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_S_POINTER_POINTER_PP_EN__MASK; +} +#define DPU_S_POINTER_POINTER__MASK 0x00000001 +#define DPU_S_POINTER_POINTER__SHIFT 0 +static inline uint32_t DPU_S_POINTER_POINTER(uint32_t val) +{ + return ((val) << DPU_S_POINTER_POINTER__SHIFT) & DPU_S_POINTER_POINTER__MASK; +} + +#define REG_DPU_OPERATION_ENABLE 0x00004008 +#define DPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe +#define DPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1 +static inline uint32_t DPU_OPERATION_ENABLE_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_OPERATION_ENABLE_RESERVED_0__MASK; +} +#define DPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001 +#define DPU_OPERATION_ENABLE_OP_EN__SHIFT 0 +static inline uint32_t DPU_OPERATION_ENABLE_OP_EN(uint32_t val) +{ + return ((val) << DPU_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_OPERATION_ENABLE_OP_EN__MASK; +} + +#define REG_DPU_FEATURE_MODE_CFG 0x0000400c +#define DPU_FEATURE_MODE_CFG_COMB_USE__MASK 0x80000000 +#define DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT 31 +static inline uint32_t DPU_FEATURE_MODE_CFG_COMB_USE(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_FEATURE_MODE_CFG_COMB_USE__MASK; +} +#define DPU_FEATURE_MODE_CFG_TP_EN__MASK 0x40000000 +#define DPU_FEATURE_MODE_CFG_TP_EN__SHIFT 30 +static inline uint32_t DPU_FEATURE_MODE_CFG_TP_EN(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_TP_EN__SHIFT) & DPU_FEATURE_MODE_CFG_TP_EN__MASK; +} +#define DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK 0x3c000000 +#define DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT 26 +static inline uint32_t DPU_FEATURE_MODE_CFG_RGP_TYPE(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT) & DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK; +} +#define DPU_FEATURE_MODE_CFG_NONALIGN__MASK 0x02000000 +#define DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT 25 +static inline uint32_t DPU_FEATURE_MODE_CFG_NONALIGN(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT) & DPU_FEATURE_MODE_CFG_NONALIGN__MASK; +} +#define DPU_FEATURE_MODE_CFG_SURF_LEN__MASK 0x01fffe00 +#define DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT 9 +static inline uint32_t DPU_FEATURE_MODE_CFG_SURF_LEN(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_SURF_LEN__MASK; +} +#define DPU_FEATURE_MODE_CFG_BURST_LEN__MASK 0x000001e0 +#define DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT 5 +static inline uint32_t DPU_FEATURE_MODE_CFG_BURST_LEN(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_BURST_LEN__MASK; +} +#define DPU_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000018 +#define DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT 3 +static inline uint32_t DPU_FEATURE_MODE_CFG_CONV_MODE(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_CONV_MODE__MASK; +} +#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK 0x00000006 +#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT 1 +static inline uint32_t DPU_FEATURE_MODE_CFG_OUTPUT_MODE(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK; +} +#define DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001 +#define DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0 +static inline uint32_t DPU_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val) +{ + return ((val) << DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK; +} + +#define REG_DPU_DATA_FORMAT 0x00004010 +#define DPU_DATA_FORMAT_OUT_PRECISION__MASK 0xe0000000 +#define DPU_DATA_FORMAT_OUT_PRECISION__SHIFT 29 +static inline uint32_t DPU_DATA_FORMAT_OUT_PRECISION(uint32_t val) +{ + return ((val) << DPU_DATA_FORMAT_OUT_PRECISION__SHIFT) & DPU_DATA_FORMAT_OUT_PRECISION__MASK; +} +#define DPU_DATA_FORMAT_IN_PRECISION__MASK 0x1c000000 +#define DPU_DATA_FORMAT_IN_PRECISION__SHIFT 26 +static inline uint32_t DPU_DATA_FORMAT_IN_PRECISION(uint32_t val) +{ + return ((val) << DPU_DATA_FORMAT_IN_PRECISION__SHIFT) & DPU_DATA_FORMAT_IN_PRECISION__MASK; +} +#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK 0x03ff0000 +#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT 16 +static inline uint32_t DPU_DATA_FORMAT_EW_TRUNCATE_NEG(uint32_t val) +{ + return ((val) << DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT) & DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK; +} +#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK 0x0000fc00 +#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT 10 +static inline uint32_t DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG(uint32_t val) +{ + return ((val) << DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK; +} +#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK 0x000003f0 +#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT 4 +static inline uint32_t DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG(uint32_t val) +{ + return ((val) << DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK; +} +#define DPU_DATA_FORMAT_MC_SURF_OUT__MASK 0x00000008 +#define DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT 3 +static inline uint32_t DPU_DATA_FORMAT_MC_SURF_OUT(uint32_t val) +{ + return ((val) << DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT) & DPU_DATA_FORMAT_MC_SURF_OUT__MASK; +} +#define DPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007 +#define DPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0 +static inline uint32_t DPU_DATA_FORMAT_PROC_PRECISION(uint32_t val) +{ + return ((val) << DPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & DPU_DATA_FORMAT_PROC_PRECISION__MASK; +} + +#define REG_DPU_OFFSET_PEND 0x00004014 +#define DPU_OFFSET_PEND_RESERVED_0__MASK 0xffff0000 +#define DPU_OFFSET_PEND_RESERVED_0__SHIFT 16 +static inline uint32_t DPU_OFFSET_PEND_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_OFFSET_PEND_RESERVED_0__SHIFT) & DPU_OFFSET_PEND_RESERVED_0__MASK; +} +#define DPU_OFFSET_PEND_OFFSET_PEND__MASK 0x0000ffff +#define DPU_OFFSET_PEND_OFFSET_PEND__SHIFT 0 +static inline uint32_t DPU_OFFSET_PEND_OFFSET_PEND(uint32_t val) +{ + return ((val) << DPU_OFFSET_PEND_OFFSET_PEND__SHIFT) & DPU_OFFSET_PEND_OFFSET_PEND__MASK; +} + +#define REG_DPU_DST_BASE_ADDR 0x00004020 +#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xffffffff +#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 0 +static inline uint32_t DPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val) +{ + return ((val) << DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK; +} + +#define REG_DPU_DST_SURF_STRIDE 0x00004024 +#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0 +#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4 +static inline uint32_t DPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val) +{ + return ((val) << DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK; +} +#define DPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f +#define DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0 +static inline uint32_t DPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_DST_SURF_STRIDE_RESERVED_0__MASK; +} + +#define REG_DPU_DATA_CUBE_WIDTH 0x00004030 +#define DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000 +#define DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13 +static inline uint32_t DPU_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK; +} +#define DPU_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff +#define DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT 0 +static inline uint32_t DPU_DATA_CUBE_WIDTH_WIDTH(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_DATA_CUBE_WIDTH_WIDTH__MASK; +} + +#define REG_DPU_DATA_CUBE_HEIGHT 0x00004034 +#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xfe000000 +#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 25 +static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK; +} +#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK 0x01c00000 +#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT 22 +static inline uint32_t DPU_DATA_CUBE_HEIGHT_MINMAX_CTL(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT) & DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK; +} +#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x003fe000 +#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13 +static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK; +} +#define DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff +#define DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0 +static inline uint32_t DPU_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK; +} + +#define REG_DPU_DATA_CUBE_NOTCH_ADDR 0x00004038 +#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK 0xe0000000 +#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT 29 +static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK; +} +#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK 0x1fff0000 +#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT 16 +static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK; +} +#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK 0x0000e000 +#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT 13 +static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK; +} +#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK 0x00001fff +#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT 0 +static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK; +} + +#define REG_DPU_DATA_CUBE_CHANNEL 0x0000403c +#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xe0000000 +#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 29 +static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK; +} +#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK 0x1fff0000 +#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT 16 +static inline uint32_t DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK; +} +#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK 0x0000e000 +#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT 13 +static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK; +} +#define DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff +#define DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0 +static inline uint32_t DPU_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val) +{ + return ((val) << DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK; +} + +#define REG_DPU_BS_CFG 0x00004040 +#define DPU_BS_CFG_RESERVED_0__MASK 0xfff00000 +#define DPU_BS_CFG_RESERVED_0__SHIFT 20 +static inline uint32_t DPU_BS_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_BS_CFG_RESERVED_0__SHIFT) & DPU_BS_CFG_RESERVED_0__MASK; +} +#define DPU_BS_CFG_BS_ALU_ALGO__MASK 0x000f0000 +#define DPU_BS_CFG_BS_ALU_ALGO__SHIFT 16 +static inline uint32_t DPU_BS_CFG_BS_ALU_ALGO(uint32_t val) +{ + return ((val) << DPU_BS_CFG_BS_ALU_ALGO__SHIFT) & DPU_BS_CFG_BS_ALU_ALGO__MASK; +} +#define DPU_BS_CFG_RESERVED_1__MASK 0x0000fe00 +#define DPU_BS_CFG_RESERVED_1__SHIFT 9 +static inline uint32_t DPU_BS_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_BS_CFG_RESERVED_1__SHIFT) & DPU_BS_CFG_RESERVED_1__MASK; +} +#define DPU_BS_CFG_BS_ALU_SRC__MASK 0x00000100 +#define DPU_BS_CFG_BS_ALU_SRC__SHIFT 8 +static inline uint32_t DPU_BS_CFG_BS_ALU_SRC(uint32_t val) +{ + return ((val) << DPU_BS_CFG_BS_ALU_SRC__SHIFT) & DPU_BS_CFG_BS_ALU_SRC__MASK; +} +#define DPU_BS_CFG_BS_RELUX_EN__MASK 0x00000080 +#define DPU_BS_CFG_BS_RELUX_EN__SHIFT 7 +static inline uint32_t DPU_BS_CFG_BS_RELUX_EN(uint32_t val) +{ + return ((val) << DPU_BS_CFG_BS_RELUX_EN__SHIFT) & DPU_BS_CFG_BS_RELUX_EN__MASK; +} +#define DPU_BS_CFG_BS_RELU_BYPASS__MASK 0x00000040 +#define DPU_BS_CFG_BS_RELU_BYPASS__SHIFT 6 +static inline uint32_t DPU_BS_CFG_BS_RELU_BYPASS(uint32_t val) +{ + return ((val) << DPU_BS_CFG_BS_RELU_BYPASS__SHIFT) & DPU_BS_CFG_BS_RELU_BYPASS__MASK; +} +#define DPU_BS_CFG_BS_MUL_PRELU__MASK 0x00000020 +#define DPU_BS_CFG_BS_MUL_PRELU__SHIFT 5 +static inline uint32_t DPU_BS_CFG_BS_MUL_PRELU(uint32_t val) +{ + return ((val) << DPU_BS_CFG_BS_MUL_PRELU__SHIFT) & DPU_BS_CFG_BS_MUL_PRELU__MASK; +} +#define DPU_BS_CFG_BS_MUL_BYPASS__MASK 0x00000010 +#define DPU_BS_CFG_BS_MUL_BYPASS__SHIFT 4 +static inline uint32_t DPU_BS_CFG_BS_MUL_BYPASS(uint32_t val) +{ + return ((val) << DPU_BS_CFG_BS_MUL_BYPASS__SHIFT) & DPU_BS_CFG_BS_MUL_BYPASS__MASK; +} +#define DPU_BS_CFG_RESERVED_2__MASK 0x0000000c +#define DPU_BS_CFG_RESERVED_2__SHIFT 2 +static inline uint32_t DPU_BS_CFG_RESERVED_2(uint32_t val) +{ + return ((val) << DPU_BS_CFG_RESERVED_2__SHIFT) & DPU_BS_CFG_RESERVED_2__MASK; +} +#define DPU_BS_CFG_BS_ALU_BYPASS__MASK 0x00000002 +#define DPU_BS_CFG_BS_ALU_BYPASS__SHIFT 1 +static inline uint32_t DPU_BS_CFG_BS_ALU_BYPASS(uint32_t val) +{ + return ((val) << DPU_BS_CFG_BS_ALU_BYPASS__SHIFT) & DPU_BS_CFG_BS_ALU_BYPASS__MASK; +} +#define DPU_BS_CFG_BS_BYPASS__MASK 0x00000001 +#define DPU_BS_CFG_BS_BYPASS__SHIFT 0 +static inline uint32_t DPU_BS_CFG_BS_BYPASS(uint32_t val) +{ + return ((val) << DPU_BS_CFG_BS_BYPASS__SHIFT) & DPU_BS_CFG_BS_BYPASS__MASK; +} + +#define REG_DPU_BS_ALU_CFG 0x00004044 +#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK 0xffffffff +#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT 0 +static inline uint32_t DPU_BS_ALU_CFG_BS_ALU_OPERAND(uint32_t val) +{ + return ((val) << DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT) & DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK; +} + +#define REG_DPU_BS_MUL_CFG 0x00004048 +#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK 0xffff0000 +#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT 16 +static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_OPERAND(uint32_t val) +{ + return ((val) << DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK; +} +#define DPU_BS_MUL_CFG_RESERVED_0__MASK 0x0000c000 +#define DPU_BS_MUL_CFG_RESERVED_0__SHIFT 14 +static inline uint32_t DPU_BS_MUL_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_BS_MUL_CFG_RESERVED_0__SHIFT) & DPU_BS_MUL_CFG_RESERVED_0__MASK; +} +#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK 0x00003f00 +#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT 8 +static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE(uint32_t val) +{ + return ((val) << DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK; +} +#define DPU_BS_MUL_CFG_RESERVED_1__MASK 0x000000fc +#define DPU_BS_MUL_CFG_RESERVED_1__SHIFT 2 +static inline uint32_t DPU_BS_MUL_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_BS_MUL_CFG_RESERVED_1__SHIFT) & DPU_BS_MUL_CFG_RESERVED_1__MASK; +} +#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK 0x00000002 +#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT 1 +static inline uint32_t DPU_BS_MUL_CFG_BS_TRUNCATE_SRC(uint32_t val) +{ + return ((val) << DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK; +} +#define DPU_BS_MUL_CFG_BS_MUL_SRC__MASK 0x00000001 +#define DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT 0 +static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SRC(uint32_t val) +{ + return ((val) << DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SRC__MASK; +} + +#define REG_DPU_BS_RELUX_CMP_VALUE 0x0000404c +#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK 0xffffffff +#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT 0 +static inline uint32_t DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT(uint32_t val) +{ + return ((val) << DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT) & DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK; +} + +#define REG_DPU_BS_OW_CFG 0x00004050 +#define DPU_BS_OW_CFG_RGP_CNTER__MASK 0xf0000000 +#define DPU_BS_OW_CFG_RGP_CNTER__SHIFT 28 +static inline uint32_t DPU_BS_OW_CFG_RGP_CNTER(uint32_t val) +{ + return ((val) << DPU_BS_OW_CFG_RGP_CNTER__SHIFT) & DPU_BS_OW_CFG_RGP_CNTER__MASK; +} +#define DPU_BS_OW_CFG_TP_ORG_EN__MASK 0x08000000 +#define DPU_BS_OW_CFG_TP_ORG_EN__SHIFT 27 +static inline uint32_t DPU_BS_OW_CFG_TP_ORG_EN(uint32_t val) +{ + return ((val) << DPU_BS_OW_CFG_TP_ORG_EN__SHIFT) & DPU_BS_OW_CFG_TP_ORG_EN__MASK; +} +#define DPU_BS_OW_CFG_RESERVED_0__MASK 0x07fff800 +#define DPU_BS_OW_CFG_RESERVED_0__SHIFT 11 +static inline uint32_t DPU_BS_OW_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_BS_OW_CFG_RESERVED_0__SHIFT) & DPU_BS_OW_CFG_RESERVED_0__MASK; +} +#define DPU_BS_OW_CFG_SIZE_E_2__MASK 0x00000700 +#define DPU_BS_OW_CFG_SIZE_E_2__SHIFT 8 +static inline uint32_t DPU_BS_OW_CFG_SIZE_E_2(uint32_t val) +{ + return ((val) << DPU_BS_OW_CFG_SIZE_E_2__SHIFT) & DPU_BS_OW_CFG_SIZE_E_2__MASK; +} +#define DPU_BS_OW_CFG_SIZE_E_1__MASK 0x000000e0 +#define DPU_BS_OW_CFG_SIZE_E_1__SHIFT 5 +static inline uint32_t DPU_BS_OW_CFG_SIZE_E_1(uint32_t val) +{ + return ((val) << DPU_BS_OW_CFG_SIZE_E_1__SHIFT) & DPU_BS_OW_CFG_SIZE_E_1__MASK; +} +#define DPU_BS_OW_CFG_SIZE_E_0__MASK 0x0000001c +#define DPU_BS_OW_CFG_SIZE_E_0__SHIFT 2 +static inline uint32_t DPU_BS_OW_CFG_SIZE_E_0(uint32_t val) +{ + return ((val) << DPU_BS_OW_CFG_SIZE_E_0__SHIFT) & DPU_BS_OW_CFG_SIZE_E_0__MASK; +} +#define DPU_BS_OW_CFG_OD_BYPASS__MASK 0x00000002 +#define DPU_BS_OW_CFG_OD_BYPASS__SHIFT 1 +static inline uint32_t DPU_BS_OW_CFG_OD_BYPASS(uint32_t val) +{ + return ((val) << DPU_BS_OW_CFG_OD_BYPASS__SHIFT) & DPU_BS_OW_CFG_OD_BYPASS__MASK; +} +#define DPU_BS_OW_CFG_OW_SRC__MASK 0x00000001 +#define DPU_BS_OW_CFG_OW_SRC__SHIFT 0 +static inline uint32_t DPU_BS_OW_CFG_OW_SRC(uint32_t val) +{ + return ((val) << DPU_BS_OW_CFG_OW_SRC__SHIFT) & DPU_BS_OW_CFG_OW_SRC__MASK; +} + +#define REG_DPU_BS_OW_OP 0x00004054 +#define DPU_BS_OW_OP_RESERVED_0__MASK 0xffff0000 +#define DPU_BS_OW_OP_RESERVED_0__SHIFT 16 +static inline uint32_t DPU_BS_OW_OP_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_BS_OW_OP_RESERVED_0__SHIFT) & DPU_BS_OW_OP_RESERVED_0__MASK; +} +#define DPU_BS_OW_OP_OW_OP__MASK 0x0000ffff +#define DPU_BS_OW_OP_OW_OP__SHIFT 0 +static inline uint32_t DPU_BS_OW_OP_OW_OP(uint32_t val) +{ + return ((val) << DPU_BS_OW_OP_OW_OP__SHIFT) & DPU_BS_OW_OP_OW_OP__MASK; +} + +#define REG_DPU_WDMA_SIZE_0 0x00004058 +#define DPU_WDMA_SIZE_0_RESERVED_0__MASK 0xf0000000 +#define DPU_WDMA_SIZE_0_RESERVED_0__SHIFT 28 +static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_0_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_0__MASK; +} +#define DPU_WDMA_SIZE_0_TP_PRECISION__MASK 0x08000000 +#define DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT 27 +static inline uint32_t DPU_WDMA_SIZE_0_TP_PRECISION(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT) & DPU_WDMA_SIZE_0_TP_PRECISION__MASK; +} +#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK 0x07ff0000 +#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT 16 +static inline uint32_t DPU_WDMA_SIZE_0_SIZE_C_WDMA(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT) & DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK; +} +#define DPU_WDMA_SIZE_0_RESERVED_1__MASK 0x0000e000 +#define DPU_WDMA_SIZE_0_RESERVED_1__SHIFT 13 +static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_0_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_1__MASK; +} +#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK 0x00001fff +#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT 0 +static inline uint32_t DPU_WDMA_SIZE_0_CHANNEL_WDMA(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT) & DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK; +} + +#define REG_DPU_WDMA_SIZE_1 0x0000405c +#define DPU_WDMA_SIZE_1_RESERVED_0__MASK 0xe0000000 +#define DPU_WDMA_SIZE_1_RESERVED_0__SHIFT 29 +static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_1_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_0__MASK; +} +#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK 0x1fff0000 +#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT 16 +static inline uint32_t DPU_WDMA_SIZE_1_HEIGHT_WDMA(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT) & DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK; +} +#define DPU_WDMA_SIZE_1_RESERVED_1__MASK 0x0000e000 +#define DPU_WDMA_SIZE_1_RESERVED_1__SHIFT 13 +static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_1_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_1__MASK; +} +#define DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK 0x00001fff +#define DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT 0 +static inline uint32_t DPU_WDMA_SIZE_1_WIDTH_WDMA(uint32_t val) +{ + return ((val) << DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT) & DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK; +} + +#define REG_DPU_BN_CFG 0x00004060 +#define DPU_BN_CFG_RESERVED_0__MASK 0xfff00000 +#define DPU_BN_CFG_RESERVED_0__SHIFT 20 +static inline uint32_t DPU_BN_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_BN_CFG_RESERVED_0__SHIFT) & DPU_BN_CFG_RESERVED_0__MASK; +} +#define DPU_BN_CFG_BN_ALU_ALGO__MASK 0x000f0000 +#define DPU_BN_CFG_BN_ALU_ALGO__SHIFT 16 +static inline uint32_t DPU_BN_CFG_BN_ALU_ALGO(uint32_t val) +{ + return ((val) << DPU_BN_CFG_BN_ALU_ALGO__SHIFT) & DPU_BN_CFG_BN_ALU_ALGO__MASK; +} +#define DPU_BN_CFG_RESERVED_1__MASK 0x0000fe00 +#define DPU_BN_CFG_RESERVED_1__SHIFT 9 +static inline uint32_t DPU_BN_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_BN_CFG_RESERVED_1__SHIFT) & DPU_BN_CFG_RESERVED_1__MASK; +} +#define DPU_BN_CFG_BN_ALU_SRC__MASK 0x00000100 +#define DPU_BN_CFG_BN_ALU_SRC__SHIFT 8 +static inline uint32_t DPU_BN_CFG_BN_ALU_SRC(uint32_t val) +{ + return ((val) << DPU_BN_CFG_BN_ALU_SRC__SHIFT) & DPU_BN_CFG_BN_ALU_SRC__MASK; +} +#define DPU_BN_CFG_BN_RELUX_EN__MASK 0x00000080 +#define DPU_BN_CFG_BN_RELUX_EN__SHIFT 7 +static inline uint32_t DPU_BN_CFG_BN_RELUX_EN(uint32_t val) +{ + return ((val) << DPU_BN_CFG_BN_RELUX_EN__SHIFT) & DPU_BN_CFG_BN_RELUX_EN__MASK; +} +#define DPU_BN_CFG_BN_RELU_BYPASS__MASK 0x00000040 +#define DPU_BN_CFG_BN_RELU_BYPASS__SHIFT 6 +static inline uint32_t DPU_BN_CFG_BN_RELU_BYPASS(uint32_t val) +{ + return ((val) << DPU_BN_CFG_BN_RELU_BYPASS__SHIFT) & DPU_BN_CFG_BN_RELU_BYPASS__MASK; +} +#define DPU_BN_CFG_BN_MUL_PRELU__MASK 0x00000020 +#define DPU_BN_CFG_BN_MUL_PRELU__SHIFT 5 +static inline uint32_t DPU_BN_CFG_BN_MUL_PRELU(uint32_t val) +{ + return ((val) << DPU_BN_CFG_BN_MUL_PRELU__SHIFT) & DPU_BN_CFG_BN_MUL_PRELU__MASK; +} +#define DPU_BN_CFG_BN_MUL_BYPASS__MASK 0x00000010 +#define DPU_BN_CFG_BN_MUL_BYPASS__SHIFT 4 +static inline uint32_t DPU_BN_CFG_BN_MUL_BYPASS(uint32_t val) +{ + return ((val) << DPU_BN_CFG_BN_MUL_BYPASS__SHIFT) & DPU_BN_CFG_BN_MUL_BYPASS__MASK; +} +#define DPU_BN_CFG_RESERVED_2__MASK 0x0000000c +#define DPU_BN_CFG_RESERVED_2__SHIFT 2 +static inline uint32_t DPU_BN_CFG_RESERVED_2(uint32_t val) +{ + return ((val) << DPU_BN_CFG_RESERVED_2__SHIFT) & DPU_BN_CFG_RESERVED_2__MASK; +} +#define DPU_BN_CFG_BN_ALU_BYPASS__MASK 0x00000002 +#define DPU_BN_CFG_BN_ALU_BYPASS__SHIFT 1 +static inline uint32_t DPU_BN_CFG_BN_ALU_BYPASS(uint32_t val) +{ + return ((val) << DPU_BN_CFG_BN_ALU_BYPASS__SHIFT) & DPU_BN_CFG_BN_ALU_BYPASS__MASK; +} +#define DPU_BN_CFG_BN_BYPASS__MASK 0x00000001 +#define DPU_BN_CFG_BN_BYPASS__SHIFT 0 +static inline uint32_t DPU_BN_CFG_BN_BYPASS(uint32_t val) +{ + return ((val) << DPU_BN_CFG_BN_BYPASS__SHIFT) & DPU_BN_CFG_BN_BYPASS__MASK; +} + +#define REG_DPU_BN_ALU_CFG 0x00004064 +#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK 0xffffffff +#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT 0 +static inline uint32_t DPU_BN_ALU_CFG_BN_ALU_OPERAND(uint32_t val) +{ + return ((val) << DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT) & DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK; +} + +#define REG_DPU_BN_MUL_CFG 0x00004068 +#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK 0xffff0000 +#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT 16 +static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_OPERAND(uint32_t val) +{ + return ((val) << DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK; +} +#define DPU_BN_MUL_CFG_RESERVED_0__MASK 0x0000c000 +#define DPU_BN_MUL_CFG_RESERVED_0__SHIFT 14 +static inline uint32_t DPU_BN_MUL_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_BN_MUL_CFG_RESERVED_0__SHIFT) & DPU_BN_MUL_CFG_RESERVED_0__MASK; +} +#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK 0x00003f00 +#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT 8 +static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE(uint32_t val) +{ + return ((val) << DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK; +} +#define DPU_BN_MUL_CFG_RESERVED_1__MASK 0x000000fc +#define DPU_BN_MUL_CFG_RESERVED_1__SHIFT 2 +static inline uint32_t DPU_BN_MUL_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_BN_MUL_CFG_RESERVED_1__SHIFT) & DPU_BN_MUL_CFG_RESERVED_1__MASK; +} +#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK 0x00000002 +#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT 1 +static inline uint32_t DPU_BN_MUL_CFG_BN_TRUNCATE_SRC(uint32_t val) +{ + return ((val) << DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK; +} +#define DPU_BN_MUL_CFG_BN_MUL_SRC__MASK 0x00000001 +#define DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT 0 +static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SRC(uint32_t val) +{ + return ((val) << DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SRC__MASK; +} + +#define REG_DPU_BN_RELUX_CMP_VALUE 0x0000406c +#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK 0xffffffff +#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT 0 +static inline uint32_t DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT(uint32_t val) +{ + return ((val) << DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT) & DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK; +} + +#define REG_DPU_EW_CFG 0x00004070 +#define DPU_EW_CFG_EW_CVT_TYPE__MASK 0x80000000 +#define DPU_EW_CFG_EW_CVT_TYPE__SHIFT 31 +static inline uint32_t DPU_EW_CFG_EW_CVT_TYPE(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_CVT_TYPE__SHIFT) & DPU_EW_CFG_EW_CVT_TYPE__MASK; +} +#define DPU_EW_CFG_EW_CVT_ROUND__MASK 0x40000000 +#define DPU_EW_CFG_EW_CVT_ROUND__SHIFT 30 +static inline uint32_t DPU_EW_CFG_EW_CVT_ROUND(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_CVT_ROUND__SHIFT) & DPU_EW_CFG_EW_CVT_ROUND__MASK; +} +#define DPU_EW_CFG_EW_DATA_MODE__MASK 0x30000000 +#define DPU_EW_CFG_EW_DATA_MODE__SHIFT 28 +static inline uint32_t DPU_EW_CFG_EW_DATA_MODE(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_DATA_MODE__SHIFT) & DPU_EW_CFG_EW_DATA_MODE__MASK; +} +#define DPU_EW_CFG_RESERVED_0__MASK 0x0f000000 +#define DPU_EW_CFG_RESERVED_0__SHIFT 24 +static inline uint32_t DPU_EW_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_EW_CFG_RESERVED_0__SHIFT) & DPU_EW_CFG_RESERVED_0__MASK; +} +#define DPU_EW_CFG_EDATA_SIZE__MASK 0x00c00000 +#define DPU_EW_CFG_EDATA_SIZE__SHIFT 22 +static inline uint32_t DPU_EW_CFG_EDATA_SIZE(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EDATA_SIZE__SHIFT) & DPU_EW_CFG_EDATA_SIZE__MASK; +} +#define DPU_EW_CFG_EW_EQUAL_EN__MASK 0x00200000 +#define DPU_EW_CFG_EW_EQUAL_EN__SHIFT 21 +static inline uint32_t DPU_EW_CFG_EW_EQUAL_EN(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_EQUAL_EN__SHIFT) & DPU_EW_CFG_EW_EQUAL_EN__MASK; +} +#define DPU_EW_CFG_EW_BINARY_EN__MASK 0x00100000 +#define DPU_EW_CFG_EW_BINARY_EN__SHIFT 20 +static inline uint32_t DPU_EW_CFG_EW_BINARY_EN(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_BINARY_EN__SHIFT) & DPU_EW_CFG_EW_BINARY_EN__MASK; +} +#define DPU_EW_CFG_EW_ALU_ALGO__MASK 0x000f0000 +#define DPU_EW_CFG_EW_ALU_ALGO__SHIFT 16 +static inline uint32_t DPU_EW_CFG_EW_ALU_ALGO(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_ALU_ALGO__SHIFT) & DPU_EW_CFG_EW_ALU_ALGO__MASK; +} +#define DPU_EW_CFG_RESERVED_1__MASK 0x0000f800 +#define DPU_EW_CFG_RESERVED_1__SHIFT 11 +static inline uint32_t DPU_EW_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_EW_CFG_RESERVED_1__SHIFT) & DPU_EW_CFG_RESERVED_1__MASK; +} +#define DPU_EW_CFG_EW_RELUX_EN__MASK 0x00000400 +#define DPU_EW_CFG_EW_RELUX_EN__SHIFT 10 +static inline uint32_t DPU_EW_CFG_EW_RELUX_EN(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_RELUX_EN__SHIFT) & DPU_EW_CFG_EW_RELUX_EN__MASK; +} +#define DPU_EW_CFG_EW_RELU_BYPASS__MASK 0x00000200 +#define DPU_EW_CFG_EW_RELU_BYPASS__SHIFT 9 +static inline uint32_t DPU_EW_CFG_EW_RELU_BYPASS(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_RELU_BYPASS__SHIFT) & DPU_EW_CFG_EW_RELU_BYPASS__MASK; +} +#define DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK 0x00000100 +#define DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT 8 +static inline uint32_t DPU_EW_CFG_EW_OP_CVT_BYPASS(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK; +} +#define DPU_EW_CFG_EW_LUT_BYPASS__MASK 0x00000080 +#define DPU_EW_CFG_EW_LUT_BYPASS__SHIFT 7 +static inline uint32_t DPU_EW_CFG_EW_LUT_BYPASS(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_LUT_BYPASS__SHIFT) & DPU_EW_CFG_EW_LUT_BYPASS__MASK; +} +#define DPU_EW_CFG_EW_OP_SRC__MASK 0x00000040 +#define DPU_EW_CFG_EW_OP_SRC__SHIFT 6 +static inline uint32_t DPU_EW_CFG_EW_OP_SRC(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_OP_SRC__SHIFT) & DPU_EW_CFG_EW_OP_SRC__MASK; +} +#define DPU_EW_CFG_EW_MUL_PRELU__MASK 0x00000020 +#define DPU_EW_CFG_EW_MUL_PRELU__SHIFT 5 +static inline uint32_t DPU_EW_CFG_EW_MUL_PRELU(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_MUL_PRELU__SHIFT) & DPU_EW_CFG_EW_MUL_PRELU__MASK; +} +#define DPU_EW_CFG_RESERVED_2__MASK 0x00000018 +#define DPU_EW_CFG_RESERVED_2__SHIFT 3 +static inline uint32_t DPU_EW_CFG_RESERVED_2(uint32_t val) +{ + return ((val) << DPU_EW_CFG_RESERVED_2__SHIFT) & DPU_EW_CFG_RESERVED_2__MASK; +} +#define DPU_EW_CFG_EW_OP_TYPE__MASK 0x00000004 +#define DPU_EW_CFG_EW_OP_TYPE__SHIFT 2 +static inline uint32_t DPU_EW_CFG_EW_OP_TYPE(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_OP_TYPE__SHIFT) & DPU_EW_CFG_EW_OP_TYPE__MASK; +} +#define DPU_EW_CFG_EW_OP_BYPASS__MASK 0x00000002 +#define DPU_EW_CFG_EW_OP_BYPASS__SHIFT 1 +static inline uint32_t DPU_EW_CFG_EW_OP_BYPASS(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_OP_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_BYPASS__MASK; +} +#define DPU_EW_CFG_EW_BYPASS__MASK 0x00000001 +#define DPU_EW_CFG_EW_BYPASS__SHIFT 0 +static inline uint32_t DPU_EW_CFG_EW_BYPASS(uint32_t val) +{ + return ((val) << DPU_EW_CFG_EW_BYPASS__SHIFT) & DPU_EW_CFG_EW_BYPASS__MASK; +} + +#define REG_DPU_EW_CVT_OFFSET_VALUE 0x00004074 +#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK 0xffffffff +#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT 0 +static inline uint32_t DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET(uint32_t val) +{ + return ((val) << DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT) & DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK; +} + +#define REG_DPU_EW_CVT_SCALE_VALUE 0x00004078 +#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK 0xffc00000 +#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT 22 +static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE(uint32_t val) +{ + return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK; +} +#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK 0x003f0000 +#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT 16 +static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT(uint32_t val) +{ + return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK; +} +#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK 0x0000ffff +#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT 0 +static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE(uint32_t val) +{ + return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK; +} + +#define REG_DPU_EW_RELUX_CMP_VALUE 0x0000407c +#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK 0xffffffff +#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT 0 +static inline uint32_t DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT(uint32_t val) +{ + return ((val) << DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT) & DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK; +} + +#define REG_DPU_OUT_CVT_OFFSET 0x00004080 +#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK 0xffffffff +#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT 0 +static inline uint32_t DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT) & DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK; +} + +#define REG_DPU_OUT_CVT_SCALE 0x00004084 +#define DPU_OUT_CVT_SCALE_RESERVED_0__MASK 0xfffe0000 +#define DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT 17 +static inline uint32_t DPU_OUT_CVT_SCALE_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT) & DPU_OUT_CVT_SCALE_RESERVED_0__MASK; +} +#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK 0x00010000 +#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT 16 +static inline uint32_t DPU_OUT_CVT_SCALE_FP32TOFP16_EN(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT) & DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK; +} +#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK 0x0000ffff +#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT 0 +static inline uint32_t DPU_OUT_CVT_SCALE_OUT_CVT_SCALE(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT) & DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK; +} + +#define REG_DPU_OUT_CVT_SHIFT 0x00004088 +#define DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK 0x80000000 +#define DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT 31 +static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_TYPE(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK; +} +#define DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK 0x40000000 +#define DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT 30 +static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_ROUND(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK; +} +#define DPU_OUT_CVT_SHIFT_RESERVED_0__MASK 0x3ff00000 +#define DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT 20 +static inline uint32_t DPU_OUT_CVT_SHIFT_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT) & DPU_OUT_CVT_SHIFT_RESERVED_0__MASK; +} +#define DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK 0x000ff000 +#define DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT 12 +static inline uint32_t DPU_OUT_CVT_SHIFT_MINUS_EXP(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT) & DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK; +} +#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK 0x00000fff +#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT 0 +static inline uint32_t DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT(uint32_t val) +{ + return ((val) << DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT) & DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK; +} + +#define REG_DPU_EW_OP_VALUE_0 0x00004090 +#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK 0xffffffff +#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT 0 +static inline uint32_t DPU_EW_OP_VALUE_0_EW_OPERAND_0(uint32_t val) +{ + return ((val) << DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT) & DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK; +} + +#define REG_DPU_EW_OP_VALUE_1 0x00004094 +#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK 0xffffffff +#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT 0 +static inline uint32_t DPU_EW_OP_VALUE_1_EW_OPERAND_1(uint32_t val) +{ + return ((val) << DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT) & DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK; +} + +#define REG_DPU_EW_OP_VALUE_2 0x00004098 +#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK 0xffffffff +#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT 0 +static inline uint32_t DPU_EW_OP_VALUE_2_EW_OPERAND_2(uint32_t val) +{ + return ((val) << DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT) & DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK; +} + +#define REG_DPU_EW_OP_VALUE_3 0x0000409c +#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK 0xffffffff +#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT 0 +static inline uint32_t DPU_EW_OP_VALUE_3_EW_OPERAND_3(uint32_t val) +{ + return ((val) << DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT) & DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK; +} + +#define REG_DPU_EW_OP_VALUE_4 0x000040a0 +#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK 0xffffffff +#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT 0 +static inline uint32_t DPU_EW_OP_VALUE_4_EW_OPERAND_4(uint32_t val) +{ + return ((val) << DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT) & DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK; +} + +#define REG_DPU_EW_OP_VALUE_5 0x000040a4 +#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK 0xffffffff +#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT 0 +static inline uint32_t DPU_EW_OP_VALUE_5_EW_OPERAND_5(uint32_t val) +{ + return ((val) << DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT) & DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK; +} + +#define REG_DPU_EW_OP_VALUE_6 0x000040a8 +#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK 0xffffffff +#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT 0 +static inline uint32_t DPU_EW_OP_VALUE_6_EW_OPERAND_6(uint32_t val) +{ + return ((val) << DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT) & DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK; +} + +#define REG_DPU_EW_OP_VALUE_7 0x000040ac +#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK 0xffffffff +#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT 0 +static inline uint32_t DPU_EW_OP_VALUE_7_EW_OPERAND_7(uint32_t val) +{ + return ((val) << DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT) & DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK; +} + +#define REG_DPU_SURFACE_ADD 0x000040c0 +#define DPU_SURFACE_ADD_SURF_ADD__MASK 0xfffffff0 +#define DPU_SURFACE_ADD_SURF_ADD__SHIFT 4 +static inline uint32_t DPU_SURFACE_ADD_SURF_ADD(uint32_t val) +{ + return ((val) << DPU_SURFACE_ADD_SURF_ADD__SHIFT) & DPU_SURFACE_ADD_SURF_ADD__MASK; +} +#define DPU_SURFACE_ADD_RESERVED_0__MASK 0x0000000f +#define DPU_SURFACE_ADD_RESERVED_0__SHIFT 0 +static inline uint32_t DPU_SURFACE_ADD_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_SURFACE_ADD_RESERVED_0__SHIFT) & DPU_SURFACE_ADD_RESERVED_0__MASK; +} + +#define REG_DPU_LUT_ACCESS_CFG 0x00004100 +#define DPU_LUT_ACCESS_CFG_RESERVED_0__MASK 0xfffc0000 +#define DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT 18 +static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_0__MASK; +} +#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK 0x00020000 +#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT 17 +static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE(uint32_t val) +{ + return ((val) << DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK; +} +#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK 0x00010000 +#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT 16 +static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_TABLE_ID(uint32_t val) +{ + return ((val) << DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK; +} +#define DPU_LUT_ACCESS_CFG_RESERVED_1__MASK 0x0000fc00 +#define DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT 10 +static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_1__MASK; +} +#define DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK 0x000003ff +#define DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT 0 +static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ADDR(uint32_t val) +{ + return ((val) << DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK; +} + +#define REG_DPU_LUT_ACCESS_DATA 0x00004104 +#define DPU_LUT_ACCESS_DATA_RESERVED_0__MASK 0xffff0000 +#define DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT 16 +static inline uint32_t DPU_LUT_ACCESS_DATA_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_DATA_RESERVED_0__MASK; +} +#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK 0x0000ffff +#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT 0 +static inline uint32_t DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA(uint32_t val) +{ + return ((val) << DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT) & DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK; +} + +#define REG_DPU_LUT_CFG 0x00004108 +#define DPU_LUT_CFG_RESERVED_0__MASK 0xffffff00 +#define DPU_LUT_CFG_RESERVED_0__SHIFT 8 +static inline uint32_t DPU_LUT_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_LUT_CFG_RESERVED_0__SHIFT) & DPU_LUT_CFG_RESERVED_0__MASK; +} +#define DPU_LUT_CFG_LUT_CAL_SEL__MASK 0x00000080 +#define DPU_LUT_CFG_LUT_CAL_SEL__SHIFT 7 +static inline uint32_t DPU_LUT_CFG_LUT_CAL_SEL(uint32_t val) +{ + return ((val) << DPU_LUT_CFG_LUT_CAL_SEL__SHIFT) & DPU_LUT_CFG_LUT_CAL_SEL__MASK; +} +#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK 0x00000040 +#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT 6 +static inline uint32_t DPU_LUT_CFG_LUT_HYBRID_PRIORITY(uint32_t val) +{ + return ((val) << DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK; +} +#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK 0x00000020 +#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT 5 +static inline uint32_t DPU_LUT_CFG_LUT_OFLOW_PRIORITY(uint32_t val) +{ + return ((val) << DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK; +} +#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK 0x00000010 +#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT 4 +static inline uint32_t DPU_LUT_CFG_LUT_UFLOW_PRIORITY(uint32_t val) +{ + return ((val) << DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK; +} +#define DPU_LUT_CFG_LUT_LO_LE_MUX__MASK 0x0000000c +#define DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT 2 +static inline uint32_t DPU_LUT_CFG_LUT_LO_LE_MUX(uint32_t val) +{ + return ((val) << DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT) & DPU_LUT_CFG_LUT_LO_LE_MUX__MASK; +} +#define DPU_LUT_CFG_LUT_EXPAND_EN__MASK 0x00000002 +#define DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT 1 +static inline uint32_t DPU_LUT_CFG_LUT_EXPAND_EN(uint32_t val) +{ + return ((val) << DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT) & DPU_LUT_CFG_LUT_EXPAND_EN__MASK; +} +#define DPU_LUT_CFG_LUT_ROAD_SEL__MASK 0x00000001 +#define DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT 0 +static inline uint32_t DPU_LUT_CFG_LUT_ROAD_SEL(uint32_t val) +{ + return ((val) << DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT) & DPU_LUT_CFG_LUT_ROAD_SEL__MASK; +} + +#define REG_DPU_LUT_INFO 0x0000410c +#define DPU_LUT_INFO_RESERVED_0__MASK 0xff000000 +#define DPU_LUT_INFO_RESERVED_0__SHIFT 24 +static inline uint32_t DPU_LUT_INFO_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_LUT_INFO_RESERVED_0__SHIFT) & DPU_LUT_INFO_RESERVED_0__MASK; +} +#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK 0x00ff0000 +#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT 16 +static inline uint32_t DPU_LUT_INFO_LUT_LO_INDEX_SELECT(uint32_t val) +{ + return ((val) << DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK; +} +#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK 0x0000ff00 +#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT 8 +static inline uint32_t DPU_LUT_INFO_LUT_LE_INDEX_SELECT(uint32_t val) +{ + return ((val) << DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK; +} +#define DPU_LUT_INFO_RESERVED_1__MASK 0x000000ff +#define DPU_LUT_INFO_RESERVED_1__SHIFT 0 +static inline uint32_t DPU_LUT_INFO_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_LUT_INFO_RESERVED_1__SHIFT) & DPU_LUT_INFO_RESERVED_1__MASK; +} + +#define REG_DPU_LUT_LE_START 0x00004110 +#define DPU_LUT_LE_START_LUT_LE_START__MASK 0xffffffff +#define DPU_LUT_LE_START_LUT_LE_START__SHIFT 0 +static inline uint32_t DPU_LUT_LE_START_LUT_LE_START(uint32_t val) +{ + return ((val) << DPU_LUT_LE_START_LUT_LE_START__SHIFT) & DPU_LUT_LE_START_LUT_LE_START__MASK; +} + +#define REG_DPU_LUT_LE_END 0x00004114 +#define DPU_LUT_LE_END_LUT_LE_END__MASK 0xffffffff +#define DPU_LUT_LE_END_LUT_LE_END__SHIFT 0 +static inline uint32_t DPU_LUT_LE_END_LUT_LE_END(uint32_t val) +{ + return ((val) << DPU_LUT_LE_END_LUT_LE_END__SHIFT) & DPU_LUT_LE_END_LUT_LE_END__MASK; +} + +#define REG_DPU_LUT_LO_START 0x00004118 +#define DPU_LUT_LO_START_LUT_LO_START__MASK 0xffffffff +#define DPU_LUT_LO_START_LUT_LO_START__SHIFT 0 +static inline uint32_t DPU_LUT_LO_START_LUT_LO_START(uint32_t val) +{ + return ((val) << DPU_LUT_LO_START_LUT_LO_START__SHIFT) & DPU_LUT_LO_START_LUT_LO_START__MASK; +} + +#define REG_DPU_LUT_LO_END 0x0000411c +#define DPU_LUT_LO_END_LUT_LO_END__MASK 0xffffffff +#define DPU_LUT_LO_END_LUT_LO_END__SHIFT 0 +static inline uint32_t DPU_LUT_LO_END_LUT_LO_END(uint32_t val) +{ + return ((val) << DPU_LUT_LO_END_LUT_LO_END__SHIFT) & DPU_LUT_LO_END_LUT_LO_END__MASK; +} + +#define REG_DPU_LUT_LE_SLOPE_SCALE 0x00004120 +#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK 0xffff0000 +#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT 16 +static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE(uint32_t val) +{ + return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK; +} +#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK 0x0000ffff +#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT 0 +static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE(uint32_t val) +{ + return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK; +} + +#define REG_DPU_LUT_LE_SLOPE_SHIFT 0x00004124 +#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00 +#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT 10 +static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK; +} +#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK 0x000003e0 +#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT 5 +static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT(uint32_t val) +{ + return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK; +} +#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK 0x0000001f +#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT 0 +static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT(uint32_t val) +{ + return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK; +} + +#define REG_DPU_LUT_LO_SLOPE_SCALE 0x00004128 +#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK 0xffff0000 +#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT 16 +static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE(uint32_t val) +{ + return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK; +} +#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK 0x0000ffff +#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT 0 +static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE(uint32_t val) +{ + return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK; +} + +#define REG_DPU_LUT_LO_SLOPE_SHIFT 0x0000412c +#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00 +#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT 10 +static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK; +} +#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK 0x000003e0 +#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT 5 +static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT(uint32_t val) +{ + return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK; +} +#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK 0x0000001f +#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT 0 +static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT(uint32_t val) +{ + return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK; +} + +#define REG_DPU_RDMA_RDMA_S_STATUS 0x00005000 +#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000 +#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18 +static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000 +#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16 +static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK; +} +#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc +#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2 +static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK; +} +#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003 +#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK; +} + +#define REG_DPU_RDMA_RDMA_S_POINTER 0x00005004 +#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000 +#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000 +#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK; +} +#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0 +#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK; +} +#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020 +#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK; +} +#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010 +#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK; +} +#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008 +#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK; +} +#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004 +#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK; +} +#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002 +#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK; +} +#define DPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001 +#define DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER__MASK; +} + +#define REG_DPU_RDMA_RDMA_OPERATION_ENABLE 0x00005008 +#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe +#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1 +static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001 +#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK; +} + +#define REG_DPU_RDMA_RDMA_DATA_CUBE_WIDTH 0x0000500c +#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000 +#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13 +static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff +#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK; +} + +#define REG_DPU_RDMA_RDMA_DATA_CUBE_HEIGHT 0x00005010 +#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xe0000000 +#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 29 +static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK 0x1fff0000 +#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT 16 +static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK; +} +#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x0000e000 +#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13 +static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK; +} +#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff +#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK; +} + +#define REG_DPU_RDMA_RDMA_DATA_CUBE_CHANNEL 0x00005014 +#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xffffe000 +#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 13 +static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff +#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK; +} + +#define REG_DPU_RDMA_RDMA_SRC_BASE_ADDR 0x00005018 +#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff +#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK; +} + +#define REG_DPU_RDMA_RDMA_BRDMA_CFG 0x0000501c +#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK 0xffffffe0 +#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT 5 +static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK 0x0000001e +#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT 1 +static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK; +} +#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK 0x00000001 +#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK; +} + +#define REG_DPU_RDMA_RDMA_BS_BASE_ADDR 0x00005020 +#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK 0xffffffff +#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK; +} + +#define REG_DPU_RDMA_RDMA_NRDMA_CFG 0x00005028 +#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK 0xffffffe0 +#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT 5 +static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK 0x0000001e +#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT 1 +static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK; +} +#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK 0x00000001 +#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK; +} + +#define REG_DPU_RDMA_RDMA_BN_BASE_ADDR 0x0000502c +#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK 0xffffffff +#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK; +} + +#define REG_DPU_RDMA_RDMA_ERDMA_CFG 0x00005034 +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK 0xc0000000 +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT 30 +static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK; +} +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK 0x20000000 +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT 29 +static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK; +} +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK 0x10000000 +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT 28 +static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK; +} +#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK 0x0ffffff0 +#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT 4 +static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK 0x0000000c +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT 2 +static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK; +} +#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK 0x00000002 +#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT 1 +static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK; +} +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK 0x00000001 +#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK; +} + +#define REG_DPU_RDMA_RDMA_EW_BASE_ADDR 0x00005038 +#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK 0xffffffff +#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK; +} + +#define REG_DPU_RDMA_RDMA_EW_SURF_STRIDE 0x00005040 +#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK 0xfffffff0 +#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT 4 +static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK; +} +#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK 0x0000000f +#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK; +} + +#define REG_DPU_RDMA_RDMA_FEATURE_MODE_CFG 0x00005044 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK 0xfffc0000 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT 18 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK 0x00038000 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT 15 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK; +} +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK 0x00007800 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT 11 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK; +} +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK 0x00000700 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT 8 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK; +} +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK 0x000000e0 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT 5 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK; +} +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK 0x00000010 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT 4 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK; +} +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK 0x00000008 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT 3 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK; +} +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000006 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT 1 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK; +} +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001 +#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK; +} + +#define REG_DPU_RDMA_RDMA_SRC_DMA_CFG 0x00005048 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK 0xfff80000 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT 19 +static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK; +} +#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK 0x0007c000 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT 14 +static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK 0x00002000 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT 13 +static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK; +} +#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK 0x00001000 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT 12 +static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK; +} +#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00000e00 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 9 +static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK; +} +#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000001c0 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT 6 +static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK; +} +#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK 0x00000038 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT 3 +static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK; +} +#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK 0x00000007 +#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK; +} + +#define REG_DPU_RDMA_RDMA_SURF_NOTCH 0x0000504c +#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK 0xfffffff0 +#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT 4 +static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK; +} +#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK 0x0000000f +#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK; +} + +#define REG_DPU_RDMA_RDMA_PAD_CFG 0x00005064 +#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK 0xffff0000 +#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT 16 +static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK; +} +#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK 0x0000ff80 +#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT 7 +static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK; +} +#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK 0x00000070 +#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT 4 +static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_TOP(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK; +} +#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK 0x00000008 +#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT 3 +static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK; +} +#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK 0x00000007 +#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK; +} + +#define REG_DPU_RDMA_RDMA_WEIGHT 0x00005068 +#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK 0xff000000 +#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT 24 +static inline uint32_t DPU_RDMA_RDMA_WEIGHT_E_WEIGHT(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK; +} +#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK 0x00ff0000 +#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT 16 +static inline uint32_t DPU_RDMA_RDMA_WEIGHT_N_WEIGHT(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK; +} +#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK 0x0000ff00 +#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT 8 +static inline uint32_t DPU_RDMA_RDMA_WEIGHT_B_WEIGHT(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK; +} +#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK 0x000000ff +#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_WEIGHT_M_WEIGHT(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK; +} + +#define REG_DPU_RDMA_RDMA_EW_SURF_NOTCH 0x0000506c +#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK 0xfffffff0 +#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT 4 +static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK; +} +#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK 0x0000000f +#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT 0 +static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0(uint32_t val) +{ + return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK; +} + +#define REG_PPU_S_STATUS 0x00006000 +#define PPU_S_STATUS_RESERVED_0__MASK 0xfffc0000 +#define PPU_S_STATUS_RESERVED_0__SHIFT 18 +static inline uint32_t PPU_S_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_S_STATUS_RESERVED_0__SHIFT) & PPU_S_STATUS_RESERVED_0__MASK; +} +#define PPU_S_STATUS_STATUS_1__MASK 0x00030000 +#define PPU_S_STATUS_STATUS_1__SHIFT 16 +static inline uint32_t PPU_S_STATUS_STATUS_1(uint32_t val) +{ + return ((val) << PPU_S_STATUS_STATUS_1__SHIFT) & PPU_S_STATUS_STATUS_1__MASK; +} +#define PPU_S_STATUS_RESERVED_1__MASK 0x0000fffc +#define PPU_S_STATUS_RESERVED_1__SHIFT 2 +static inline uint32_t PPU_S_STATUS_RESERVED_1(uint32_t val) +{ + return ((val) << PPU_S_STATUS_RESERVED_1__SHIFT) & PPU_S_STATUS_RESERVED_1__MASK; +} +#define PPU_S_STATUS_STATUS_0__MASK 0x00000003 +#define PPU_S_STATUS_STATUS_0__SHIFT 0 +static inline uint32_t PPU_S_STATUS_STATUS_0(uint32_t val) +{ + return ((val) << PPU_S_STATUS_STATUS_0__SHIFT) & PPU_S_STATUS_STATUS_0__MASK; +} + +#define REG_PPU_S_POINTER 0x00006004 +#define PPU_S_POINTER_RESERVED_0__MASK 0xfffe0000 +#define PPU_S_POINTER_RESERVED_0__SHIFT 17 +static inline uint32_t PPU_S_POINTER_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_S_POINTER_RESERVED_0__SHIFT) & PPU_S_POINTER_RESERVED_0__MASK; +} +#define PPU_S_POINTER_EXECUTER__MASK 0x00010000 +#define PPU_S_POINTER_EXECUTER__SHIFT 16 +static inline uint32_t PPU_S_POINTER_EXECUTER(uint32_t val) +{ + return ((val) << PPU_S_POINTER_EXECUTER__SHIFT) & PPU_S_POINTER_EXECUTER__MASK; +} +#define PPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0 +#define PPU_S_POINTER_RESERVED_1__SHIFT 6 +static inline uint32_t PPU_S_POINTER_RESERVED_1(uint32_t val) +{ + return ((val) << PPU_S_POINTER_RESERVED_1__SHIFT) & PPU_S_POINTER_RESERVED_1__MASK; +} +#define PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020 +#define PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5 +static inline uint32_t PPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val) +{ + return ((val) << PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK; +} +#define PPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010 +#define PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4 +static inline uint32_t PPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val) +{ + return ((val) << PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_POINTER_PP_CLEAR__MASK; +} +#define PPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008 +#define PPU_S_POINTER_POINTER_PP_MODE__SHIFT 3 +static inline uint32_t PPU_S_POINTER_POINTER_PP_MODE(uint32_t val) +{ + return ((val) << PPU_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_S_POINTER_POINTER_PP_MODE__MASK; +} +#define PPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004 +#define PPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2 +static inline uint32_t PPU_S_POINTER_EXECUTER_PP_EN(uint32_t val) +{ + return ((val) << PPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_S_POINTER_EXECUTER_PP_EN__MASK; +} +#define PPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002 +#define PPU_S_POINTER_POINTER_PP_EN__SHIFT 1 +static inline uint32_t PPU_S_POINTER_POINTER_PP_EN(uint32_t val) +{ + return ((val) << PPU_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_S_POINTER_POINTER_PP_EN__MASK; +} +#define PPU_S_POINTER_POINTER__MASK 0x00000001 +#define PPU_S_POINTER_POINTER__SHIFT 0 +static inline uint32_t PPU_S_POINTER_POINTER(uint32_t val) +{ + return ((val) << PPU_S_POINTER_POINTER__SHIFT) & PPU_S_POINTER_POINTER__MASK; +} + +#define REG_PPU_OPERATION_ENABLE 0x00006008 +#define PPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe +#define PPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1 +static inline uint32_t PPU_OPERATION_ENABLE_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_OPERATION_ENABLE_RESERVED_0__MASK; +} +#define PPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001 +#define PPU_OPERATION_ENABLE_OP_EN__SHIFT 0 +static inline uint32_t PPU_OPERATION_ENABLE_OP_EN(uint32_t val) +{ + return ((val) << PPU_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_OPERATION_ENABLE_OP_EN__MASK; +} + +#define REG_PPU_DATA_CUBE_IN_WIDTH 0x0000600c +#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000 +#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK; +} +#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff +#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0 +static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK; +} + +#define REG_PPU_DATA_CUBE_IN_HEIGHT 0x00006010 +#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000 +#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK; +} +#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff +#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0 +static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK; +} + +#define REG_PPU_DATA_CUBE_IN_CHANNEL 0x00006014 +#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000 +#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK; +} +#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff +#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0 +static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK; +} + +#define REG_PPU_DATA_CUBE_OUT_WIDTH 0x00006018 +#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK 0xffffe000 +#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK; +} +#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK 0x00001fff +#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT 0 +static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK; +} + +#define REG_PPU_DATA_CUBE_OUT_HEIGHT 0x0000601c +#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK 0xffffe000 +#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK; +} +#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK 0x00001fff +#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT 0 +static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK; +} + +#define REG_PPU_DATA_CUBE_OUT_CHANNEL 0x00006020 +#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK 0xffffe000 +#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK; +} +#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK 0x00001fff +#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT 0 +static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL(uint32_t val) +{ + return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK; +} + +#define REG_PPU_OPERATION_MODE_CFG 0x00006024 +#define PPU_OPERATION_MODE_CFG_RESERVED_0__MASK 0x80000000 +#define PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT 31 +static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_0__MASK; +} +#define PPU_OPERATION_MODE_CFG_INDEX_EN__MASK 0x40000000 +#define PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT 30 +static inline uint32_t PPU_OPERATION_MODE_CFG_INDEX_EN(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT) & PPU_OPERATION_MODE_CFG_INDEX_EN__MASK; +} +#define PPU_OPERATION_MODE_CFG_RESERVED_1__MASK 0x20000000 +#define PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT 29 +static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_1__MASK; +} +#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK 0x1fff0000 +#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT 16 +static inline uint32_t PPU_OPERATION_MODE_CFG_NOTCH_ADDR(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT) & PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK; +} +#define PPU_OPERATION_MODE_CFG_RESERVED_2__MASK 0x0000ff00 +#define PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT 8 +static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_2(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_2__MASK; +} +#define PPU_OPERATION_MODE_CFG_USE_CNT__MASK 0x000000e0 +#define PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT 5 +static inline uint32_t PPU_OPERATION_MODE_CFG_USE_CNT(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT) & PPU_OPERATION_MODE_CFG_USE_CNT__MASK; +} +#define PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK 0x00000010 +#define PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT 4 +static inline uint32_t PPU_OPERATION_MODE_CFG_FLYING_MODE(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT) & PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK; +} +#define PPU_OPERATION_MODE_CFG_RESERVED_3__MASK 0x0000000c +#define PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT 2 +static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_3(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_3__MASK; +} +#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK 0x00000003 +#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT 0 +static inline uint32_t PPU_OPERATION_MODE_CFG_POOLING_METHOD(uint32_t val) +{ + return ((val) << PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT) & PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK; +} + +#define REG_PPU_POOLING_KERNEL_CFG 0x00006034 +#define PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK 0xff000000 +#define PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT 24 +static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK; +} +#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00f00000 +#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 20 +static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val) +{ + return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK; +} +#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000f0000 +#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT 16 +static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH(uint32_t val) +{ + return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK; +} +#define PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK 0x0000f000 +#define PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT 12 +static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK; +} +#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK 0x00000f00 +#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT 8 +static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT(uint32_t val) +{ + return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK; +} +#define PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK 0x000000f0 +#define PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT 4 +static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_2(uint32_t val) +{ + return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK; +} +#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK 0x0000000f +#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT 0 +static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH(uint32_t val) +{ + return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK; +} + +#define REG_PPU_RECIP_KERNEL_WIDTH 0x00006038 +#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK 0xfffe0000 +#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT 17 +static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK; +} +#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK 0x0001ffff +#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT 0 +static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH(uint32_t val) +{ + return ((val) << PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK; +} + +#define REG_PPU_RECIP_KERNEL_HEIGHT 0x0000603c +#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK 0xfffe0000 +#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT 17 +static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK; +} +#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK 0x0001ffff +#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT 0 +static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT(uint32_t val) +{ + return ((val) << PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK; +} + +#define REG_PPU_POOLING_PADDING_CFG 0x00006040 +#define PPU_POOLING_PADDING_CFG_RESERVED_0__MASK 0xffff8000 +#define PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT 15 +static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_0__MASK; +} +#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK 0x00007000 +#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT 12 +static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_BOTTOM(uint32_t val) +{ + return ((val) << PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK; +} +#define PPU_POOLING_PADDING_CFG_RESERVED_1__MASK 0x00000800 +#define PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT 11 +static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_1(uint32_t val) +{ + return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_1__MASK; +} +#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK 0x00000700 +#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT 8 +static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_RIGHT(uint32_t val) +{ + return ((val) << PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK; +} +#define PPU_POOLING_PADDING_CFG_RESERVED_2__MASK 0x00000080 +#define PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT 7 +static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_2(uint32_t val) +{ + return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_2__MASK; +} +#define PPU_POOLING_PADDING_CFG_PAD_TOP__MASK 0x00000070 +#define PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT 4 +static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_TOP(uint32_t val) +{ + return ((val) << PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_TOP__MASK; +} +#define PPU_POOLING_PADDING_CFG_RESERVED_3__MASK 0x00000008 +#define PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT 3 +static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_3(uint32_t val) +{ + return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_3__MASK; +} +#define PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK 0x00000007 +#define PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT 0 +static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_LEFT(uint32_t val) +{ + return ((val) << PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK; +} + +#define REG_PPU_PADDING_VALUE_1_CFG 0x00006044 +#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK 0xffffffff +#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT 0 +static inline uint32_t PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0(uint32_t val) +{ + return ((val) << PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT) & PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK; +} + +#define REG_PPU_PADDING_VALUE_2_CFG 0x00006048 +#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK 0xfffffff8 +#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT 3 +static inline uint32_t PPU_PADDING_VALUE_2_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT) & PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK; +} +#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK 0x00000007 +#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT 0 +static inline uint32_t PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1(uint32_t val) +{ + return ((val) << PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT) & PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK; +} + +#define REG_PPU_DST_BASE_ADDR 0x00006070 +#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xfffffff0 +#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 4 +static inline uint32_t PPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val) +{ + return ((val) << PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK; +} +#define PPU_DST_BASE_ADDR_RESERVED_0__MASK 0x0000000f +#define PPU_DST_BASE_ADDR_RESERVED_0__SHIFT 0 +static inline uint32_t PPU_DST_BASE_ADDR_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_DST_BASE_ADDR_RESERVED_0__SHIFT) & PPU_DST_BASE_ADDR_RESERVED_0__MASK; +} + +#define REG_PPU_DST_SURF_STRIDE 0x0000607c +#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0 +#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4 +static inline uint32_t PPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val) +{ + return ((val) << PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK; +} +#define PPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f +#define PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0 +static inline uint32_t PPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_DST_SURF_STRIDE_RESERVED_0__MASK; +} + +#define REG_PPU_DATA_FORMAT 0x00006084 +#define PPU_DATA_FORMAT_INDEX_ADD__MASK 0xfffffff0 +#define PPU_DATA_FORMAT_INDEX_ADD__SHIFT 4 +static inline uint32_t PPU_DATA_FORMAT_INDEX_ADD(uint32_t val) +{ + return ((val) << PPU_DATA_FORMAT_INDEX_ADD__SHIFT) & PPU_DATA_FORMAT_INDEX_ADD__MASK; +} +#define PPU_DATA_FORMAT_DPU_FLYIN__MASK 0x00000008 +#define PPU_DATA_FORMAT_DPU_FLYIN__SHIFT 3 +static inline uint32_t PPU_DATA_FORMAT_DPU_FLYIN(uint32_t val) +{ + return ((val) << PPU_DATA_FORMAT_DPU_FLYIN__SHIFT) & PPU_DATA_FORMAT_DPU_FLYIN__MASK; +} +#define PPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007 +#define PPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0 +static inline uint32_t PPU_DATA_FORMAT_PROC_PRECISION(uint32_t val) +{ + return ((val) << PPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & PPU_DATA_FORMAT_PROC_PRECISION__MASK; +} + +#define REG_PPU_MISC_CTRL 0x000060dc +#define PPU_MISC_CTRL_SURF_LEN__MASK 0xffff0000 +#define PPU_MISC_CTRL_SURF_LEN__SHIFT 16 +static inline uint32_t PPU_MISC_CTRL_SURF_LEN(uint32_t val) +{ + return ((val) << PPU_MISC_CTRL_SURF_LEN__SHIFT) & PPU_MISC_CTRL_SURF_LEN__MASK; +} +#define PPU_MISC_CTRL_RESERVED_0__MASK 0x0000fe00 +#define PPU_MISC_CTRL_RESERVED_0__SHIFT 9 +static inline uint32_t PPU_MISC_CTRL_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_MISC_CTRL_RESERVED_0__SHIFT) & PPU_MISC_CTRL_RESERVED_0__MASK; +} +#define PPU_MISC_CTRL_MC_SURF_OUT__MASK 0x00000100 +#define PPU_MISC_CTRL_MC_SURF_OUT__SHIFT 8 +static inline uint32_t PPU_MISC_CTRL_MC_SURF_OUT(uint32_t val) +{ + return ((val) << PPU_MISC_CTRL_MC_SURF_OUT__SHIFT) & PPU_MISC_CTRL_MC_SURF_OUT__MASK; +} +#define PPU_MISC_CTRL_NONALIGN__MASK 0x00000080 +#define PPU_MISC_CTRL_NONALIGN__SHIFT 7 +static inline uint32_t PPU_MISC_CTRL_NONALIGN(uint32_t val) +{ + return ((val) << PPU_MISC_CTRL_NONALIGN__SHIFT) & PPU_MISC_CTRL_NONALIGN__MASK; +} +#define PPU_MISC_CTRL_RESERVED_1__MASK 0x00000070 +#define PPU_MISC_CTRL_RESERVED_1__SHIFT 4 +static inline uint32_t PPU_MISC_CTRL_RESERVED_1(uint32_t val) +{ + return ((val) << PPU_MISC_CTRL_RESERVED_1__SHIFT) & PPU_MISC_CTRL_RESERVED_1__MASK; +} +#define PPU_MISC_CTRL_BURST_LEN__MASK 0x0000000f +#define PPU_MISC_CTRL_BURST_LEN__SHIFT 0 +static inline uint32_t PPU_MISC_CTRL_BURST_LEN(uint32_t val) +{ + return ((val) << PPU_MISC_CTRL_BURST_LEN__SHIFT) & PPU_MISC_CTRL_BURST_LEN__MASK; +} + +#define REG_PPU_RDMA_RDMA_S_STATUS 0x00007000 +#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000 +#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18 +static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK; +} +#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000 +#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16 +static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK; +} +#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc +#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2 +static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK; +} +#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003 +#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK; +} + +#define REG_PPU_RDMA_RDMA_S_POINTER 0x00007004 +#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000 +#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK; +} +#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000 +#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK; +} +#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0 +#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK; +} +#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020 +#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK; +} +#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010 +#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK; +} +#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008 +#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK; +} +#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004 +#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK; +} +#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002 +#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK; +} +#define PPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001 +#define PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER__MASK; +} + +#define REG_PPU_RDMA_RDMA_OPERATION_ENABLE 0x00007008 +#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe +#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1 +static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK; +} +#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001 +#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK; +} + +#define REG_PPU_RDMA_RDMA_CUBE_IN_WIDTH 0x0000700c +#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000 +#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK; +} +#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff +#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK; +} + +#define REG_PPU_RDMA_RDMA_CUBE_IN_HEIGHT 0x00007010 +#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000 +#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK; +} +#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff +#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK; +} + +#define REG_PPU_RDMA_RDMA_CUBE_IN_CHANNEL 0x00007014 +#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000 +#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13 +static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK; +} +#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff +#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK; +} + +#define REG_PPU_RDMA_RDMA_SRC_BASE_ADDR 0x0000701c +#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff +#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK; +} + +#define REG_PPU_RDMA_RDMA_SRC_LINE_STRIDE 0x00007024 +#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK 0xfffffff0 +#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT 4 +static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK; +} +#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK 0x0000000f +#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK; +} + +#define REG_PPU_RDMA_RDMA_SRC_SURF_STRIDE 0x00007028 +#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK 0xfffffff0 +#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT 4 +static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK; +} +#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK 0x0000000f +#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK; +} + +#define REG_PPU_RDMA_RDMA_DATA_FORMAT 0x00007030 +#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK 0xfffffffc +#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT 2 +static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK; +} +#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK 0x00000003 +#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT 0 +static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION(uint32_t val) +{ + return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK; +} + +#define REG_DDMA_CFG_OUTSTANDING 0x00008000 +#define DDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000 +#define DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16 +static inline uint32_t DDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & DDMA_CFG_OUTSTANDING_RESERVED_0__MASK; +} +#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00 +#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8 +static inline uint32_t DDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val) +{ + return ((val) << DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK; +} +#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff +#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0 +static inline uint32_t DDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val) +{ + return ((val) << DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK; +} + +#define REG_DDMA_RD_WEIGHT_0 0x00008004 +#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000 +#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24 +static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val) +{ + return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK; +} +#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000 +#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16 +static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val) +{ + return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK; +} +#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00 +#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8 +static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val) +{ + return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK; +} +#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff +#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0 +static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val) +{ + return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK; +} + +#define REG_DDMA_WR_WEIGHT_0 0x00008008 +#define DDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000 +#define DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16 +static inline uint32_t DDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & DDMA_WR_WEIGHT_0_RESERVED_0__MASK; +} +#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00 +#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8 +static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val) +{ + return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK; +} +#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff +#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0 +static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val) +{ + return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK; +} + +#define REG_DDMA_CFG_ID_ERROR 0x0000800c +#define DDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00 +#define DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10 +static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_0__MASK; +} +#define DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0 +#define DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6 +static inline uint32_t DDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val) +{ + return ((val) << DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK; +} +#define DDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020 +#define DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5 +static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val) +{ + return ((val) << DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_1__MASK; +} +#define DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f +#define DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0 +static inline uint32_t DDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val) +{ + return ((val) << DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK; +} + +#define REG_DDMA_RD_WEIGHT_1 0x00008010 +#define DDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00 +#define DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8 +static inline uint32_t DDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & DDMA_RD_WEIGHT_1_RESERVED_0__MASK; +} +#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff +#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0 +static inline uint32_t DDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val) +{ + return ((val) << DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK; +} + +#define REG_DDMA_CFG_DMA_FIFO_CLR 0x00008014 +#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe +#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1 +static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK; +} +#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001 +#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0 +static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK; +} + +#define REG_DDMA_CFG_DMA_ARB 0x00008018 +#define DDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00 +#define DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10 +static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_0__MASK; +} +#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200 +#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9 +static inline uint32_t DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK; +} +#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100 +#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8 +static inline uint32_t DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK; +} +#define DDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080 +#define DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7 +static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_1__MASK; +} +#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070 +#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4 +static inline uint32_t DDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK; +} +#define DDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008 +#define DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3 +static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_2__MASK; +} +#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007 +#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0 +static inline uint32_t DDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK; +} + +#define REG_DDMA_CFG_DMA_RD_QOS 0x00008020 +#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00 +#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10 +static inline uint32_t DDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK; +} +#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300 +#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8 +static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK; +} +#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0 +#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6 +static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK; +} +#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030 +#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4 +static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK; +} +#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c +#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2 +static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK; +} +#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003 +#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0 +static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK; +} + +#define REG_DDMA_CFG_DMA_RD_CFG 0x00008024 +#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000 +#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13 +static inline uint32_t DDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK; +} +#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000 +#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12 +static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK; +} +#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00 +#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8 +static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK; +} +#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0 +#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5 +static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK; +} +#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018 +#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3 +static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK; +} +#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007 +#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0 +static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK; +} + +#define REG_DDMA_CFG_DMA_WR_CFG 0x00008028 +#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000 +#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13 +static inline uint32_t DDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK; +} +#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000 +#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12 +static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK; +} +#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00 +#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8 +static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK; +} +#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0 +#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5 +static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK; +} +#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018 +#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3 +static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK; +} +#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007 +#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0 +static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK; +} + +#define REG_DDMA_CFG_DMA_WSTRB 0x0000802c +#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff +#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0 +static inline uint32_t DDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val) +{ + return ((val) << DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK; +} + +#define REG_DDMA_CFG_STATUS 0x00008030 +#define DDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00 +#define DDMA_CFG_STATUS_RESERVED_0__SHIFT 9 +static inline uint32_t DDMA_CFG_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << DDMA_CFG_STATUS_RESERVED_0__SHIFT) & DDMA_CFG_STATUS_RESERVED_0__MASK; +} +#define DDMA_CFG_STATUS_IDEL__MASK 0x00000100 +#define DDMA_CFG_STATUS_IDEL__SHIFT 8 +static inline uint32_t DDMA_CFG_STATUS_IDEL(uint32_t val) +{ + return ((val) << DDMA_CFG_STATUS_IDEL__SHIFT) & DDMA_CFG_STATUS_IDEL__MASK; +} +#define DDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff +#define DDMA_CFG_STATUS_RESERVED_1__SHIFT 0 +static inline uint32_t DDMA_CFG_STATUS_RESERVED_1(uint32_t val) +{ + return ((val) << DDMA_CFG_STATUS_RESERVED_1__SHIFT) & DDMA_CFG_STATUS_RESERVED_1__MASK; +} + +#define REG_SDMA_CFG_OUTSTANDING 0x00009000 +#define SDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000 +#define SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16 +static inline uint32_t SDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & SDMA_CFG_OUTSTANDING_RESERVED_0__MASK; +} +#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00 +#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8 +static inline uint32_t SDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val) +{ + return ((val) << SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK; +} +#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff +#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0 +static inline uint32_t SDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val) +{ + return ((val) << SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK; +} + +#define REG_SDMA_RD_WEIGHT_0 0x00009004 +#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000 +#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24 +static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val) +{ + return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK; +} +#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000 +#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16 +static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val) +{ + return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK; +} +#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00 +#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8 +static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val) +{ + return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK; +} +#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff +#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0 +static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val) +{ + return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK; +} + +#define REG_SDMA_WR_WEIGHT_0 0x00009008 +#define SDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000 +#define SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16 +static inline uint32_t SDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & SDMA_WR_WEIGHT_0_RESERVED_0__MASK; +} +#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00 +#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8 +static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val) +{ + return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK; +} +#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff +#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0 +static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val) +{ + return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK; +} + +#define REG_SDMA_CFG_ID_ERROR 0x0000900c +#define SDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00 +#define SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10 +static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_0__MASK; +} +#define SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0 +#define SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6 +static inline uint32_t SDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val) +{ + return ((val) << SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK; +} +#define SDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020 +#define SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5 +static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val) +{ + return ((val) << SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_1__MASK; +} +#define SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f +#define SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0 +static inline uint32_t SDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val) +{ + return ((val) << SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK; +} + +#define REG_SDMA_RD_WEIGHT_1 0x00009010 +#define SDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00 +#define SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8 +static inline uint32_t SDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & SDMA_RD_WEIGHT_1_RESERVED_0__MASK; +} +#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff +#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0 +static inline uint32_t SDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val) +{ + return ((val) << SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK; +} + +#define REG_SDMA_CFG_DMA_FIFO_CLR 0x00009014 +#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe +#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1 +static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK; +} +#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001 +#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0 +static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK; +} + +#define REG_SDMA_CFG_DMA_ARB 0x00009018 +#define SDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00 +#define SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10 +static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_0__MASK; +} +#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200 +#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9 +static inline uint32_t SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK; +} +#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100 +#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8 +static inline uint32_t SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK; +} +#define SDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080 +#define SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7 +static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_1__MASK; +} +#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070 +#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4 +static inline uint32_t SDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK; +} +#define SDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008 +#define SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3 +static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_2__MASK; +} +#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007 +#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0 +static inline uint32_t SDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK; +} + +#define REG_SDMA_CFG_DMA_RD_QOS 0x00009020 +#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00 +#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10 +static inline uint32_t SDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK; +} +#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300 +#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8 +static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK; +} +#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0 +#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6 +static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK; +} +#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030 +#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4 +static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK; +} +#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c +#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2 +static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK; +} +#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003 +#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0 +static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK; +} + +#define REG_SDMA_CFG_DMA_RD_CFG 0x00009024 +#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000 +#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13 +static inline uint32_t SDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK; +} +#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000 +#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12 +static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK; +} +#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00 +#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8 +static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK; +} +#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0 +#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5 +static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK; +} +#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018 +#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3 +static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK; +} +#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007 +#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0 +static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK; +} + +#define REG_SDMA_CFG_DMA_WR_CFG 0x00009028 +#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000 +#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13 +static inline uint32_t SDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK; +} +#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000 +#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12 +static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK; +} +#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00 +#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8 +static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK; +} +#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0 +#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5 +static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK; +} +#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018 +#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3 +static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK; +} +#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007 +#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0 +static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK; +} + +#define REG_SDMA_CFG_DMA_WSTRB 0x0000902c +#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff +#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0 +static inline uint32_t SDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val) +{ + return ((val) << SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK; +} + +#define REG_SDMA_CFG_STATUS 0x00009030 +#define SDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00 +#define SDMA_CFG_STATUS_RESERVED_0__SHIFT 9 +static inline uint32_t SDMA_CFG_STATUS_RESERVED_0(uint32_t val) +{ + return ((val) << SDMA_CFG_STATUS_RESERVED_0__SHIFT) & SDMA_CFG_STATUS_RESERVED_0__MASK; +} +#define SDMA_CFG_STATUS_IDEL__MASK 0x00000100 +#define SDMA_CFG_STATUS_IDEL__SHIFT 8 +static inline uint32_t SDMA_CFG_STATUS_IDEL(uint32_t val) +{ + return ((val) << SDMA_CFG_STATUS_IDEL__SHIFT) & SDMA_CFG_STATUS_IDEL__MASK; +} +#define SDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff +#define SDMA_CFG_STATUS_RESERVED_1__SHIFT 0 +static inline uint32_t SDMA_CFG_STATUS_RESERVED_1(uint32_t val) +{ + return ((val) << SDMA_CFG_STATUS_RESERVED_1__SHIFT) & SDMA_CFG_STATUS_RESERVED_1__MASK; +} + +#define REG_GLOBAL_OPERATION_ENABLE 0x0000f008 +#define GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK 0xffffff80 +#define GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT 7 +static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_0(uint32_t val) +{ + return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK; +} +#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK 0x00000040 +#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT 6 +static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN(uint32_t val) +{ + return ((val) << GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK; +} +#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK 0x00000020 +#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT 5 +static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_OP_EN(uint32_t val) +{ + return ((val) << GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK; +} +#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK 0x00000010 +#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT 4 +static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN(uint32_t val) +{ + return ((val) << GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK; +} +#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK 0x00000008 +#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT 3 +static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_OP_EN(uint32_t val) +{ + return ((val) << GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK; +} +#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK 0x00000004 +#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT 2 +static inline uint32_t GLOBAL_OPERATION_ENABLE_CORE_OP_EN(uint32_t val) +{ + return ((val) << GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK; +} +#define GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK 0x00000002 +#define GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT 1 +static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_1(uint32_t val) +{ + return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK; +} +#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK 0x00000001 +#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT 0 +static inline uint32_t GLOBAL_OPERATION_ENABLE_CNA_OP_EN(uint32_t val) +{ + return ((val) << GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK; +} + +#endif /* __ROCKET_REGISTERS_XML__ */ diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 75c7db8b156a..7855bbf752b1 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2033,7 +2033,7 @@ void __init acpi_ec_ecdt_probe(void) goto out; } - if (!strstarts(ecdt_ptr->id, "\\")) { + if (!strlen(ecdt_ptr->id)) { /* * The ECDT table on some MSI notebooks contains invalid data, together * with an empty ID string (""). @@ -2042,9 +2042,13 @@ void __init acpi_ec_ecdt_probe(void) * a "fully qualified reference to the (...) embedded controller device", * so this string always has to start with a backslash. * - * By verifying this we can avoid such faulty ECDT tables in a safe way. + * However some ThinkBook machines have a ECDT table with a valid EC + * description but an invalid ID string ("_SB.PC00.LPCB.EC0"). + * + * Because of this we only check if the ID string is empty in order to + * avoid the obvious cases. */ - pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id); + pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n"); goto out; } diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 755003bf3a45..8972446b7162 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -180,7 +180,7 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) struct acpi_processor *pr = per_cpu(processors, cpu); int ret; - if (!pr || !pr->performance) + if (!pr) continue; /* @@ -197,6 +197,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, ret); + if (!pr->performance) + continue; + ret = acpi_processor_get_platform_limit(pr); if (ret) pr_err("Failed to update freq constraint for CPU%d (%d)\n", diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 2946ae6d4b2c..2586e77ebf45 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2075,7 +2075,7 @@ out: * Check if a link is established. This is a relaxed version of * ata_phys_link_online() which accounts for the fact that this is potentially * called after changing the link power management policy, which may not be - * reflected immediately in the SSTAUS register (e.g., we may still be seeing + * reflected immediately in the SStatus register (e.g., we may still be seeing * the PHY in partial, slumber or devsleep Partial power management state. * So check that: * - A device is still present, that is, DET is 1h (Device presence detected @@ -2089,8 +2089,13 @@ static bool ata_eh_link_established(struct ata_link *link) u32 sstatus; u8 det, ipm; + /* + * For old IDE/PATA adapters that do not have a valid scr_read method, + * or if reading the SStatus register fails, assume that the device is + * present. Device probe will determine if that is really the case. + */ if (sata_scr_read(link, SCR_STATUS, &sstatus)) - return false; + return true; det = sstatus & 0x0f; ipm = (sstatus >> 8) & 0x0f; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 57f674f51b0c..2ded5e476d6e 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3904,21 +3904,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, /* Check cdl_ctrl */ switch (buf[0] & 0x03) { case 0: - /* Disable CDL if it is enabled */ - if (!(dev->flags & ATA_DFLAG_CDL_ENABLED)) - return 0; + /* Disable CDL */ ata_dev_dbg(dev, "Disabling CDL\n"); cdl_action = 0; dev->flags &= ~ATA_DFLAG_CDL_ENABLED; break; case 0x02: /* - * Enable CDL if not already enabled. Since this is mutually - * exclusive with NCQ priority, allow this only if NCQ priority - * is disabled. + * Enable CDL. Since CDL is mutually exclusive with NCQ + * priority, allow this only if NCQ priority is disabled. */ - if (dev->flags & ATA_DFLAG_CDL_ENABLED) - return 0; if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { ata_dev_err(dev, "NCQ priority must be disabled to enable CDL\n"); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index e21492981f7d..f6d6276974ee 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -380,6 +380,9 @@ enum { /* this is/was a write request */ __EE_WRITE, + /* hand back using mempool_free(e, drbd_buffer_page_pool) */ + __EE_RELEASE_TO_MEMPOOL, + /* this is/was a write same request */ __EE_WRITE_SAME, @@ -402,6 +405,7 @@ enum { #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) #define EE_SUBMITTED (1<<__EE_SUBMITTED) #define EE_WRITE (1<<__EE_WRITE) +#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL) #define EE_WRITE_SAME (1<<__EE_WRITE_SAME) #define EE_APPLICATION (1<<__EE_APPLICATION) #define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ) @@ -858,7 +862,6 @@ struct drbd_device { struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ struct list_head done_ee; /* need to send P_WRITE_ACK */ struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */ - struct list_head net_ee; /* zero-copy network send in progress */ struct list_head resync_reads; atomic_t pp_in_use; /* allocated from page pool */ @@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ extern mempool_t drbd_request_mempool; extern mempool_t drbd_ee_mempool; -/* drbd's page pool, used to buffer data received from the peer, - * or data requested by the peer. - * - * This does not have an emergency reserve. - * - * When allocating from this pool, it first takes pages from the pool. - * Only if the pool is depleted will try to allocate from the system. - * - * The assumption is that pages taken from this pool will be processed, - * and given back, "quickly", and then can be recycled, so we can avoid - * frequent calls to alloc_page(), and still will be able to make progress even - * under memory pressure. - */ -extern struct page *drbd_pp_pool; -extern spinlock_t drbd_pp_lock; -extern int drbd_pp_vacant; -extern wait_queue_head_t drbd_pp_wait; - /* We also need a standard (emergency-reserve backed) page pool * for meta data IO (activity log, bitmap). * We can keep it global, as long as it is used as "N pages at a time". @@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait; */ #define DRBD_MIN_POOL_PAGES 128 extern mempool_t drbd_md_io_page_pool; +extern mempool_t drbd_buffer_page_pool; /* We also need to make sure we get a bio * when we need it for housekeeping purposes */ @@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, sector_t, unsigned int, unsigned int, gfp_t) __must_hold(local); -extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, - int); -#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) -#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) +extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req); extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool); extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); extern int drbd_connected(struct drbd_peer_device *); @@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page) for (; page && ({ n = page_chain_next(page); 1; }); page = n) -static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req) -{ - struct page *page = peer_req->pages; - page_chain_for_each(page) { - if (page_count(page) > 1) - return 1; - } - return 0; -} - static inline union drbd_state drbd_read_state(struct drbd_device *device) { struct drbd_resource *resource = device->resource; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 52724b79be30..c73376886e7a 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ mempool_t drbd_request_mempool; mempool_t drbd_ee_mempool; mempool_t drbd_md_io_page_pool; +mempool_t drbd_buffer_page_pool; struct bio_set drbd_md_io_bio_set; struct bio_set drbd_io_bio_set; -/* I do not use a standard mempool, because: - 1) I want to hand out the pre-allocated objects first. - 2) I want to be able to interrupt sleeping allocation with a signal. - Note: This is a single linked list, the next pointer is the private - member of struct page. - */ -struct page *drbd_pp_pool; -DEFINE_SPINLOCK(drbd_pp_lock); -int drbd_pp_vacant; -wait_queue_head_t drbd_pp_wait; - DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); static const struct block_device_operations drbd_ops = { @@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, struct drbd_peer_request *peer_req) { + bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL); struct page *page = peer_req->pages; unsigned len = peer_req->i.size; int err; @@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, page_chain_for_each(page) { unsigned l = min_t(unsigned, len, PAGE_SIZE); - err = _drbd_send_page(peer_device, page, 0, l, - page_chain_next(page) ? MSG_MORE : 0); + if (likely(use_sendpage)) + err = _drbd_send_page(peer_device, page, 0, l, + page_chain_next(page) ? MSG_MORE : 0); + else + err = _drbd_no_send_page(peer_device, page, 0, l, + page_chain_next(page) ? MSG_MORE : 0); + if (err) return err; len -= l; @@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device) INIT_LIST_HEAD(&device->sync_ee); INIT_LIST_HEAD(&device->done_ee); INIT_LIST_HEAD(&device->read_ee); - INIT_LIST_HEAD(&device->net_ee); INIT_LIST_HEAD(&device->resync_reads); INIT_LIST_HEAD(&device->resync_work.list); INIT_LIST_HEAD(&device->unplug_work.list); @@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device) D_ASSERT(device, list_empty(&device->sync_ee)); D_ASSERT(device, list_empty(&device->done_ee)); D_ASSERT(device, list_empty(&device->read_ee)); - D_ASSERT(device, list_empty(&device->net_ee)); D_ASSERT(device, list_empty(&device->resync_reads)); D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q)); D_ASSERT(device, list_empty(&device->resync_work.list)); @@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device) static void drbd_destroy_mempools(void) { - struct page *page; - - while (drbd_pp_pool) { - page = drbd_pp_pool; - drbd_pp_pool = (struct page *)page_private(page); - __free_page(page); - drbd_pp_vacant--; - } - /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */ bioset_exit(&drbd_io_bio_set); bioset_exit(&drbd_md_io_bio_set); + mempool_exit(&drbd_buffer_page_pool); mempool_exit(&drbd_md_io_page_pool); mempool_exit(&drbd_ee_mempool); mempool_exit(&drbd_request_mempool); @@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void) static int drbd_create_mempools(void) { - struct page *page; const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count; - int i, ret; + int ret; /* caches */ drbd_request_cache = kmem_cache_create( @@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void) if (ret) goto Enomem; + ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0); + if (ret) + goto Enomem; + ret = mempool_init_slab_pool(&drbd_request_mempool, number, drbd_request_cache); if (ret) @@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void) if (ret) goto Enomem; - for (i = 0; i < number; i++) { - page = alloc_page(GFP_HIGHUSER); - if (!page) - goto Enomem; - set_page_private(page, (unsigned long)drbd_pp_pool); - drbd_pp_pool = page; - } - drbd_pp_vacant = number; - return 0; Enomem: @@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device) rr = drbd_free_peer_reqs(device, &device->done_ee); if (rr) drbd_err(device, "%d EEs in done list found!\n", rr); - - rr = drbd_free_peer_reqs(device, &device->net_ee); - if (rr) - drbd_err(device, "%d EEs in net list found!\n", rr); } /* caution. no locking. */ @@ -2863,11 +2839,6 @@ static int __init drbd_init(void) return err; } - /* - * allocate all necessary structs - */ - init_waitqueue_head(&drbd_pp_wait); - drbd_proc = NULL; /* play safe for drbd_cleanup */ idr_init(&drbd_devices); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 975024cf03c5..caaf2781136d 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -33,6 +33,7 @@ #include <linux/string.h> #include <linux/scatterlist.h> #include <linux/part_stat.h> +#include <linux/mempool.h> #include "drbd_int.h" #include "drbd_protocol.h" #include "drbd_req.h" @@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int); #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) -/* - * some helper functions to deal with single linked page lists, - * page->private being our "next" pointer. - */ - -/* If at least n pages are linked at head, get n pages off. - * Otherwise, don't modify head, and return NULL. - * Locking is the responsibility of the caller. - */ -static struct page *page_chain_del(struct page **head, int n) -{ - struct page *page; - struct page *tmp; - - BUG_ON(!n); - BUG_ON(!head); - - page = *head; - - if (!page) - return NULL; - - while (page) { - tmp = page_chain_next(page); - if (--n == 0) - break; /* found sufficient pages */ - if (tmp == NULL) - /* insufficient pages, don't use any of them. */ - return NULL; - page = tmp; - } - - /* add end of list marker for the returned list */ - set_page_private(page, 0); - /* actual return value, and adjustment of head */ - page = *head; - *head = tmp; - return page; -} - -/* may be used outside of locks to find the tail of a (usually short) - * "private" page chain, before adding it back to a global chain head - * with page_chain_add() under a spinlock. */ -static struct page *page_chain_tail(struct page *page, int *len) -{ - struct page *tmp; - int i = 1; - while ((tmp = page_chain_next(page))) { - ++i; - page = tmp; - } - if (len) - *len = i; - return page; -} - -static int page_chain_free(struct page *page) -{ - struct page *tmp; - int i = 0; - page_chain_for_each_safe(page, tmp) { - put_page(page); - ++i; - } - return i; -} - -static void page_chain_add(struct page **head, - struct page *chain_first, struct page *chain_last) -{ -#if 1 - struct page *tmp; - tmp = page_chain_tail(chain_first, NULL); - BUG_ON(tmp != chain_last); -#endif - - /* add chain to head */ - set_page_private(chain_last, (unsigned long)*head); - *head = chain_first; -} - -static struct page *__drbd_alloc_pages(struct drbd_device *device, - unsigned int number) +static struct page *__drbd_alloc_pages(unsigned int number) { struct page *page = NULL; struct page *tmp = NULL; unsigned int i = 0; - /* Yes, testing drbd_pp_vacant outside the lock is racy. - * So what. It saves a spin_lock. */ - if (drbd_pp_vacant >= number) { - spin_lock(&drbd_pp_lock); - page = page_chain_del(&drbd_pp_pool, number); - if (page) - drbd_pp_vacant -= number; - spin_unlock(&drbd_pp_lock); - if (page) - return page; - } - /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ for (i = 0; i < number; i++) { - tmp = alloc_page(GFP_TRY); + tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY); if (!tmp) - break; + goto fail; set_page_private(tmp, (unsigned long)page); page = tmp; } - - if (i == number) - return page; - - /* Not enough pages immediately available this time. - * No need to jump around here, drbd_alloc_pages will retry this - * function "soon". */ - if (page) { - tmp = page_chain_tail(page, NULL); - spin_lock(&drbd_pp_lock); - page_chain_add(&drbd_pp_pool, page, tmp); - drbd_pp_vacant += i; - spin_unlock(&drbd_pp_lock); + return page; +fail: + page_chain_for_each_safe(page, tmp) { + set_page_private(page, 0); + mempool_free(page, &drbd_buffer_page_pool); } return NULL; } -static void reclaim_finished_net_peer_reqs(struct drbd_device *device, - struct list_head *to_be_freed) -{ - struct drbd_peer_request *peer_req, *tmp; - - /* The EEs are always appended to the end of the list. Since - they are sent in order over the wire, they have to finish - in order. As soon as we see the first not finished we can - stop to examine the list... */ - - list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) { - if (drbd_peer_req_has_active_page(peer_req)) - break; - list_move(&peer_req->w.list, to_be_freed); - } -} - -static void drbd_reclaim_net_peer_reqs(struct drbd_device *device) -{ - LIST_HEAD(reclaimed); - struct drbd_peer_request *peer_req, *t; - - spin_lock_irq(&device->resource->req_lock); - reclaim_finished_net_peer_reqs(device, &reclaimed); - spin_unlock_irq(&device->resource->req_lock); - list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) - drbd_free_net_peer_req(device, peer_req); -} - -static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection) -{ - struct drbd_peer_device *peer_device; - int vnr; - - rcu_read_lock(); - idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { - struct drbd_device *device = peer_device->device; - if (!atomic_read(&device->pp_in_use_by_net)) - continue; - - kref_get(&device->kref); - rcu_read_unlock(); - drbd_reclaim_net_peer_reqs(device); - kref_put(&device->kref, drbd_destroy_device); - rcu_read_lock(); - } - rcu_read_unlock(); -} - /** * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled) * @peer_device: DRBD device. @@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int bool retry) { struct drbd_device *device = peer_device->device; - struct page *page = NULL; + struct page *page; struct net_conf *nc; - DEFINE_WAIT(wait); unsigned int mxb; rcu_read_lock(); @@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int mxb = nc ? nc->max_buffers : 1000000; rcu_read_unlock(); - if (atomic_read(&device->pp_in_use) < mxb) - page = __drbd_alloc_pages(device, number); - - /* Try to keep the fast path fast, but occasionally we need - * to reclaim the pages we lended to the network stack. */ - if (page && atomic_read(&device->pp_in_use_by_net) > 512) - drbd_reclaim_net_peer_reqs(device); - - while (page == NULL) { - prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); - - drbd_reclaim_net_peer_reqs(device); - - if (atomic_read(&device->pp_in_use) < mxb) { - page = __drbd_alloc_pages(device, number); - if (page) - break; - } - - if (!retry) - break; - - if (signal_pending(current)) { - drbd_warn(device, "drbd_alloc_pages interrupted!\n"); - break; - } - - if (schedule_timeout(HZ/10) == 0) - mxb = UINT_MAX; - } - finish_wait(&drbd_pp_wait, &wait); + if (atomic_read(&device->pp_in_use) >= mxb) + schedule_timeout_interruptible(HZ / 10); + page = __drbd_alloc_pages(number); if (page) atomic_add(number, &device->pp_in_use); @@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int * Is also used from inside an other spin_lock_irq(&resource->req_lock); * Either links the page chain back to the global pool, * or returns all pages to the system. */ -static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) +static void drbd_free_pages(struct drbd_device *device, struct page *page) { - atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use; - int i; + struct page *tmp; + int i = 0; if (page == NULL) return; - if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count) - i = page_chain_free(page); - else { - struct page *tmp; - tmp = page_chain_tail(page, &i); - spin_lock(&drbd_pp_lock); - page_chain_add(&drbd_pp_pool, page, tmp); - drbd_pp_vacant += i; - spin_unlock(&drbd_pp_lock); - } - i = atomic_sub_return(i, a); + page_chain_for_each_safe(page, tmp) { + set_page_private(page, 0); + if (page_count(page) == 1) + mempool_free(page, &drbd_buffer_page_pool); + else + put_page(page); + i++; + } + i = atomic_sub_return(i, &device->pp_in_use); if (i < 0) - drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n", - is_net ? "pp_in_use_by_net" : "pp_in_use", i); - wake_up(&drbd_pp_wait); + drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); } /* @@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto gfpflags_allow_blocking(gfp_mask)); if (!page) goto fail; + if (!mempool_is_saturated(&drbd_buffer_page_pool)) + peer_req->flags |= EE_RELEASE_TO_MEMPOOL; } memset(peer_req, 0, sizeof(*peer_req)); @@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto return NULL; } -void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, - int is_net) +void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req) { might_sleep(); if (peer_req->flags & EE_HAS_DIGEST) kfree(peer_req->digest); - drbd_free_pages(device, peer_req->pages, is_net); + drbd_free_pages(device, peer_req->pages); D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); D_ASSERT(device, drbd_interval_empty(&peer_req->i)); if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) { @@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) LIST_HEAD(work_list); struct drbd_peer_request *peer_req, *t; int count = 0; - int is_net = list == &device->net_ee; spin_lock_irq(&device->resource->req_lock); list_splice_init(list, &work_list); spin_unlock_irq(&device->resource->req_lock); list_for_each_entry_safe(peer_req, t, &work_list, w.list) { - __drbd_free_peer_req(device, peer_req, is_net); + drbd_free_peer_req(device, peer_req); count++; } return count; @@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) static int drbd_finish_peer_reqs(struct drbd_device *device) { LIST_HEAD(work_list); - LIST_HEAD(reclaimed); struct drbd_peer_request *peer_req, *t; int err = 0; spin_lock_irq(&device->resource->req_lock); - reclaim_finished_net_peer_reqs(device, &reclaimed); list_splice_init(&device->done_ee, &work_list); spin_unlock_irq(&device->resource->req_lock); - list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) - drbd_free_net_peer_req(device, peer_req); - /* possible callbacks here: * e_end_block, and e_end_resync_block, e_send_superseded. * all ignore the last argument. @@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size) data_size -= len; } kunmap(page); - drbd_free_pages(peer_device->device, page, 0); + drbd_free_pages(peer_device->device, page); return err; } @@ -5224,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device) put_ldev(device); } - /* tcp_close and release of sendpage pages can be deferred. I don't - * want to use SO_LINGER, because apparently it can be deferred for - * more than 20 seconds (longest time I checked). - * - * Actually we don't care for exactly when the network stack does its - * put_page(), but release our reference on these pages right here. - */ - i = drbd_free_peer_reqs(device, &device->net_ee); - if (i) - drbd_info(device, "net_ee not empty, killed %u entries\n", i); i = atomic_read(&device->pp_in_use_by_net); if (i) drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i); @@ -5980,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi) while (get_t_state(thi) == RUNNING) { drbd_thread_current_set_cpu(thi); - conn_reclaim_net_peer_reqs(connection); - if (test_and_clear_bit(SEND_PING, &connection->flags)) { if (drbd_send_ping(connection)) { drbd_err(connection, "drbd_send_ping has failed\n"); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index a6ea737b3b71..dea3e79d044f 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1030,22 +1030,6 @@ out: return 1; } -/* helper */ -static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req) -{ - if (drbd_peer_req_has_active_page(peer_req)) { - /* This might happen if sendpage() has not finished */ - int i = PFN_UP(peer_req->i.size); - atomic_add(i, &device->pp_in_use_by_net); - atomic_sub(i, &device->pp_in_use); - spin_lock_irq(&device->resource->req_lock); - list_add_tail(&peer_req->w.list, &device->net_ee); - spin_unlock_irq(&device->resource->req_lock); - wake_up(&drbd_pp_wait); - } else - drbd_free_peer_req(device, peer_req); -} - /** * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST * @w: work object. @@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel) int err; if (unlikely(cancel)) { - drbd_free_peer_req(device, peer_req); - dec_unacked(device); - return 0; + err = 0; + goto out; } if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { @@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel) err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req); } - dec_unacked(device); - - move_to_net_ee_or_free(device, peer_req); - if (unlikely(err)) drbd_err(device, "drbd_send_block() failed\n"); +out: + dec_unacked(device); + drbd_free_peer_req(device, peer_req); + return err; } @@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel) int err; if (unlikely(cancel)) { - drbd_free_peer_req(device, peer_req); - dec_unacked(device); - return 0; + err = 0; + goto out; } if (get_ldev_if_state(device, D_FAILED)) { @@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel) /* update resync data with failure */ drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size); } - - dec_unacked(device); - - move_to_net_ee_or_free(device, peer_req); - if (unlikely(err)) drbd_err(device, "drbd_send_block() failed\n"); +out: + dec_unacked(device); + drbd_free_peer_req(device, peer_req); + return err; } @@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) int err, eq = 0; if (unlikely(cancel)) { - drbd_free_peer_req(device, peer_req); - dec_unacked(device); - return 0; + err = 0; + goto out; } if (get_ldev(device)) { @@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) if (drbd_ratelimit()) drbd_err(device, "Sending NegDReply. I guess it gets messy.\n"); } - - dec_unacked(device); - move_to_net_ee_or_free(device, peer_req); - if (unlikely(err)) drbd_err(device, "drbd_send_block/ack() failed\n"); +out: + dec_unacked(device); + drbd_free_peer_req(device, peer_req); + return err; } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 6561d2a561fa..99abd67b708b 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -235,7 +235,7 @@ struct ublk_device { struct completion completion; unsigned int nr_queues_ready; - unsigned int nr_privileged_daemon; + bool unprivileged_daemons; struct mutex cancel_mutex; bool canceling; pid_t ublksrv_tgid; @@ -1389,7 +1389,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq, { blk_status_t res; - if (unlikely(ubq->fail_io)) + if (unlikely(READ_ONCE(ubq->fail_io))) return BLK_STS_TARGET; /* With recovery feature enabled, force_abort is set in @@ -1401,7 +1401,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq, * Note: force_abort is guaranteed to be seen because it is set * before request queue is unqiuesced. */ - if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort)) + if (ublk_nosrv_should_queue_io(ubq) && + unlikely(READ_ONCE(ubq->force_abort))) return BLK_STS_IOERR; if (check_cancel && unlikely(ubq->canceling)) @@ -1550,7 +1551,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub) /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */ ub->mm = NULL; ub->nr_queues_ready = 0; - ub->nr_privileged_daemon = 0; + ub->unprivileged_daemons = false; ub->ublksrv_tgid = -1; } @@ -1644,7 +1645,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp) * Transition the device to the nosrv state. What exactly this * means depends on the recovery flags */ - blk_mq_quiesce_queue(disk->queue); if (ublk_nosrv_should_stop_dev(ub)) { /* * Allow any pending/future I/O to pass through quickly @@ -1652,8 +1652,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp) * waits for all pending I/O to complete */ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) - ublk_get_queue(ub, i)->force_abort = true; - blk_mq_unquiesce_queue(disk->queue); + WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true); ublk_stop_dev_unlocked(ub); } else { @@ -1663,9 +1662,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp) } else { ub->dev_info.state = UBLK_S_DEV_FAIL_IO; for (i = 0; i < ub->dev_info.nr_hw_queues; i++) - ublk_get_queue(ub, i)->fail_io = true; + WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true); } - blk_mq_unquiesce_queue(disk->queue); } unlock: mutex_unlock(&ub->mutex); @@ -1980,12 +1978,10 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) __must_hold(&ub->mutex) { ubq->nr_io_ready++; - if (ublk_queue_ready(ubq)) { + if (ublk_queue_ready(ubq)) ub->nr_queues_ready++; - - if (capable(CAP_SYS_ADMIN)) - ub->nr_privileged_daemon++; - } + if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN)) + ub->unprivileged_daemons = true; if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) { /* now we are ready for handling ublk io request */ @@ -2880,8 +2876,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, ublk_apply_params(ub); - /* don't probe partitions if any one ubq daemon is un-trusted */ - if (ub->nr_privileged_daemon != ub->nr_queues_ready) + /* don't probe partitions if any daemon task is un-trusted */ + if (ub->unprivileged_daemons) set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); ublk_get_device(ub); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 06a1c7dd081f..f366d35c5840 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2793,6 +2793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs), X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs), X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs), + X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs), {} }; #endif diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 52d5d26fc7c6..81306612a5c6 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns) static DEFINE_PER_CPU(struct menu_device, menu_devices); +static void menu_update_intervals(struct menu_device *data, unsigned int interval_us) +{ + /* Update the repeating-pattern data. */ + data->intervals[data->interval_ptr++] = interval_us; + if (data->interval_ptr >= INTERVALS) + data->interval_ptr = 0; +} + static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); /* @@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (data->needs_update) { menu_update(drv, dev); data->needs_update = 0; + } else if (!dev->last_residency_ns) { + /* + * This happens when the driver rejects the previously selected + * idle state and returns an error, so update the recent + * intervals table to prevent invalid information from being + * used going forward. + */ + menu_update_intervals(data, UINT_MAX); } /* Find the shortest expected idle interval. */ @@ -482,10 +498,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->correction_factor[data->bucket] = new_factor; - /* update the repeating-pattern data */ - data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); - if (data->interval_ptr >= INTERVALS) - data->interval_ptr = 0; + menu_update_intervals(data, ktime_to_us(measured_ns)); } /** diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index d28477d84697..1d1c2d8f85ae 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -550,6 +550,23 @@ const struct fw_address_region fw_unit_space_region = { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; #endif /* 0 */ +static void complete_address_handler(struct kref *kref) +{ + struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref); + + complete(&handler->done); +} + +static void get_address_handler(struct fw_address_handler *handler) +{ + kref_get(&handler->kref); +} + +static int put_address_handler(struct fw_address_handler *handler) +{ + return kref_put(&handler->kref, complete_address_handler); +} + /** * fw_core_add_address_handler() - register for incoming requests * @handler: callback @@ -596,6 +613,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler, if (other != NULL) { handler->offset += other->length; } else { + init_completion(&handler->done); + kref_init(&handler->kref); list_add_tail_rcu(&handler->link, &address_handler_list); ret = 0; break; @@ -621,6 +640,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler) list_del_rcu(&handler->link); synchronize_rcu(); + + if (!put_address_handler(handler)) + wait_for_completion(&handler->done); } EXPORT_SYMBOL(fw_core_remove_address_handler); @@ -914,22 +936,31 @@ static void handle_exclusive_region_request(struct fw_card *card, handler = lookup_enclosing_address_handler(&address_handler_list, offset, request->length); if (handler) - handler->address_callback(card, request, tcode, destination, source, - p->generation, offset, request->data, - request->length, handler->callback_data); + get_address_handler(handler); } - if (!handler) + if (!handler) { fw_send_response(card, request, RCODE_ADDRESS_ERROR); + return; + } + + // Outside the RCU read-side critical section. Without spinlock. With reference count. + handler->address_callback(card, request, tcode, destination, source, p->generation, offset, + request->data, request->length, handler->callback_data); + put_address_handler(handler); } +// To use kmalloc allocator efficiently, this should be power of two. +#define BUFFER_ON_KERNEL_STACK_SIZE 4 + static void handle_fcp_region_request(struct fw_card *card, struct fw_packet *p, struct fw_request *request, unsigned long long offset) { - struct fw_address_handler *handler; - int tcode, destination, source; + struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE]; + struct fw_address_handler *handler, **handlers; + int tcode, destination, source, i, count, buffer_size; if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || @@ -950,15 +981,55 @@ static void handle_fcp_region_request(struct fw_card *card, return; } + count = 0; + handlers = buffer_on_kernel_stack; + buffer_size = ARRAY_SIZE(buffer_on_kernel_stack); scoped_guard(rcu) { list_for_each_entry_rcu(handler, &address_handler_list, link) { - if (is_enclosing_handler(handler, offset, request->length)) - handler->address_callback(card, request, tcode, destination, source, - p->generation, offset, request->data, - request->length, handler->callback_data); + if (is_enclosing_handler(handler, offset, request->length)) { + if (count >= buffer_size) { + int next_size = buffer_size * 2; + struct fw_address_handler **buffer_on_kernel_heap; + + if (handlers == buffer_on_kernel_stack) + buffer_on_kernel_heap = NULL; + else + buffer_on_kernel_heap = handlers; + + buffer_on_kernel_heap = + krealloc_array(buffer_on_kernel_heap, next_size, + sizeof(*buffer_on_kernel_heap), GFP_ATOMIC); + // FCP is used for purposes unrelated to significant system + // resources (e.g. storage or networking), so allocation + // failures are not considered so critical. + if (!buffer_on_kernel_heap) + break; + + if (handlers == buffer_on_kernel_stack) { + memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack, + sizeof(buffer_on_kernel_stack)); + } + + handlers = buffer_on_kernel_heap; + buffer_size = next_size; + } + get_address_handler(handler); + handlers[count++] = handler; + } } } + for (i = 0; i < count; ++i) { + handler = handlers[i]; + handler->address_callback(card, request, tcode, destination, source, + p->generation, offset, request->data, + request->length, handler->callback_data); + put_address_handler(handler); + } + + if (handlers != buffer_on_kernel_stack) + kfree(handlers); + fw_send_response(card, request, RCODE_COMPLETE); } diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c index 9875e34bde72..ed29b07d16c1 100644 --- a/drivers/gpio/gpio-mlxbf3.c +++ b/drivers/gpio/gpio-mlxbf3.c @@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) struct mlxbf3_gpio_context *gs; struct gpio_irq_chip *girq; struct gpio_chip *gc; - char *colon_ptr; int ret, irq; - long num; gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL); if (!gs) @@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev) gc->owner = THIS_MODULE; gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges; - colon_ptr = strchr(dev_name(dev), ':'); - if (!colon_ptr) { - dev_err(dev, "invalid device name format\n"); - return -EINVAL; - } - - ret = kstrtol(++colon_ptr, 16, &num); - if (ret) { - dev_err(dev, "invalid device instance\n"); - return ret; - } - - if (!num) { - irq = platform_get_irq(pdev, 0); - if (irq >= 0) { - girq = &gs->gc.irq; - gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); - girq->default_type = IRQ_TYPE_NONE; - /* This will let us handle the parent IRQ in the driver */ - girq->num_parents = 0; - girq->parents = NULL; - girq->parent_handler = NULL; - girq->handler = handle_bad_irq; - - /* - * Directly request the irq here instead of passing - * a flow-handler because the irq is shared. - */ - ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, - IRQF_SHARED, dev_name(dev), gs); - if (ret) - return dev_err_probe(dev, ret, "failed to request IRQ"); - } + irq = platform_get_irq_optional(pdev, 0); + if (irq >= 0) { + girq = &gs->gc.irq; + gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip); + girq->default_type = IRQ_TYPE_NONE; + /* This will let us handle the parent IRQ in the driver */ + girq->num_parents = 0; + girq->parents = NULL; + girq->parent_handler = NULL; + girq->handler = handle_bad_irq; + + /* + * Directly request the irq here instead of passing + * a flow-handler because the irq is shared. + */ + ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler, + IRQF_SHARED, dev_name(dev), gs); + if (ret) + return dev_err_probe(dev, ret, "failed to request IRQ"); } platform_set_drvdata(pdev, gs); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a2adaacf6adb..d3f220be2ef9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1139,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) } } + if (!amdgpu_vm_ready(vm)) + return -EINVAL; + r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 02138aa55793..dfb6cfd83760 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, } r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); + AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | + AMDGPU_VM_PAGE_EXECUTABLE); if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 0bd51a04be79..23484317a5fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1039,15 +1039,28 @@ int psp_update_fw_reservation(struct psp_context *psp) { int ret; uint64_t reserv_addr, reserv_addr_ext; - uint32_t reserv_size, reserv_size_ext; + uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; struct amdgpu_device *adev = psp->adev; + mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); + if (amdgpu_sriov_vf(psp->adev)) return 0; - if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) && - (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3))) + switch (mp0_ip_ver) { + case IP_VERSION(14, 0, 2): + if (adev->psp.sos.fw_version < 0x3b0e0d) + return 0; + break; + + case IP_VERSION(14, 0, 3): + if (adev->psp.sos.fw_version < 0x3a0e14) + return 0; + break; + + default: return 0; + } ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5cacf5717016..0b87798daebd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if VM is not evicting. + * True if VM is not evicting and all VM entities are not stopped */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - bool empty; bool ret; amdgpu_vm_eviction_lock(vm); @@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) amdgpu_vm_eviction_unlock(vm); spin_lock(&vm->status_lock); - empty = list_empty(&vm->evicted); + ret &= list_empty(&vm->evicted); spin_unlock(&vm->status_lock); - return ret && empty; + spin_lock(&vm->immediate.lock); + ret &= !vm->immediate.stopped; + spin_unlock(&vm->immediate.lock); + + spin_lock(&vm->delayed.lock); + ret &= !vm->delayed.stopped; + spin_unlock(&vm->delayed.lock); + + return ret; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 07c936e90d8e..78f9e86ccc09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, list_for_each_entry(block, &vres->blocks, link) vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - amdgpu_vram_mgr_do_reserve(man); - drm_buddy_free_list(mm, &vres->blocks, vres->flags); + amdgpu_vram_mgr_do_reserve(man); mutex_unlock(&mgr->lock); atomic64_sub(vis_usage, &mgr->vis_usage); diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index b9e0ca85226a..6945029b3592 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -122,6 +122,7 @@ config DRM_ITE_IT6505 select EXTCON select CRYPTO select CRYPTO_HASH + select REGMAP_I2C help ITE IT6505 DisplayPort bridge chip driver. @@ -316,6 +317,19 @@ config DRM_SIMPLE_BRIDGE Support for non-programmable DRM bridges, such as ADI ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs. +config DRM_SOLOMON_SSD2825 + tristate "SSD2825 RGB/DSI bridge" + depends on SPI_MASTER && OF + select DRM_MIPI_DSI + select DRM_KMS_HELPER + select DRM_PANEL + help + Say Y here if you want support for the Solomon SSD2825 RGB/DSI + SPI bridge driver. + + Say M here if you want to support this hardware as a module. + The module will be named "ssd2825". + config DRM_THINE_THC63LVD1024 tristate "Thine THC63LVD1024 LVDS decoder bridge" depends on OF @@ -438,6 +452,18 @@ config DRM_TI_TPD12S015 Texas Instruments TPD12S015 HDMI level shifter and ESD protection driver. +config DRM_WAVESHARE_BRIDGE + tristate "Waveshare DSI bridge" + depends on OF + depends on BACKLIGHT_CLASS_DEVICE + select DRM_PANEL_BRIDGE + select DRM_KMS_HELPER + select DRM_MIPI_DSI + select REGMAP_I2C + help + Driver for waveshare DSI to DPI bridge board. + Please say Y if you have such hardware + source "drivers/gpu/drm/bridge/analogix/Kconfig" source "drivers/gpu/drm/bridge/adv7511/Kconfig" diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 245e8a27e3fc..c7dc03182e59 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o obj-$(CONFIG_DRM_SII902X) += sii902x.o obj-$(CONFIG_DRM_SII9234) += sii9234.o obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o +obj-$(CONFIG_DRM_SOLOMON_SSD2825) += ssd2825.o obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o obj-$(CONFIG_DRM_TOSHIBA_TC358762) += tc358762.o obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o @@ -40,6 +41,7 @@ obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o +obj-$(CONFIG_DRM_WAVESHARE_BRIDGE) += waveshare-dsi.o obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o obj-$(CONFIG_DRM_ITE_IT66121) += ite-it66121.o diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index c0ad8f59e483..609cdb9d371e 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2604,6 +2604,7 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux) platform->bridge.type = platform->pdata.panel_bridge ? DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort; + platform->bridge.support_hdcp = true; drm_bridge_add(&platform->bridge); diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index b63304d3a80f..b3e4cdff61d6 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -18,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev) { struct auxiliary_device *adev = to_auxiliary_dev(dev); + of_node_put(dev->of_node); ida_free(&drm_aux_bridge_ida, adev->id); kfree(adev); @@ -65,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent) ret = auxiliary_device_init(adev); if (ret) { + of_node_put(adev->dev.of_node); ida_free(&drm_aux_bridge_ida, adev->id); kfree(adev); return ret; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index a57ca8c3bdae..09b289f0fcbf 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -9,6 +9,7 @@ #include <drm/drm_drv.h> #include <drm/drm_probe_helper.h> #include <video/mipi_display.h> +#include <video/videomode.h> #include <linux/clk.h> #include <linux/interrupt.h> @@ -417,7 +418,8 @@ #define DSI_OUTPUT_PORT 0 #define DSI_INPUT_PORT(inputid) (1 + (inputid)) -#define DSI_HBP_FRAME_OVERHEAD 12 +#define DSI_HBP_FRAME_PULSE_OVERHEAD 12 +#define DSI_HBP_FRAME_EVENT_OVERHEAD 16 #define DSI_HSA_FRAME_OVERHEAD 14 #define DSI_HFP_FRAME_OVERHEAD 6 #define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4 @@ -452,15 +454,6 @@ bridge_to_cdns_dsi_input(struct drm_bridge *bridge) return container_of(bridge, struct cdns_dsi_input, bridge); } -static unsigned int mode_to_dpi_hfp(const struct drm_display_mode *mode, - bool mode_valid_check) -{ - if (mode_valid_check) - return mode->hsync_start - mode->hdisplay; - - return mode->crtc_hsync_start - mode->crtc_hdisplay; -} - static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing, unsigned int dpi_bpp, unsigned int dsi_pkt_overhead) @@ -476,145 +469,77 @@ static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing, } static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi, - const struct drm_display_mode *mode, - struct cdns_dsi_cfg *dsi_cfg, - bool mode_valid_check) + const struct videomode *vm, + struct cdns_dsi_cfg *dsi_cfg) { struct cdns_dsi_output *output = &dsi->output; - unsigned int tmp; - bool sync_pulse = false; + u32 dpi_hsa, dpi_hbp, dpi_hfp, dpi_hact; + bool sync_pulse; int bpp; + dpi_hsa = vm->hsync_len; + dpi_hbp = vm->hback_porch; + dpi_hfp = vm->hfront_porch; + dpi_hact = vm->hactive; + memset(dsi_cfg, 0, sizeof(*dsi_cfg)); - if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) - sync_pulse = true; + sync_pulse = output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE; bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format); - if (mode_valid_check) - tmp = mode->htotal - - (sync_pulse ? mode->hsync_end : mode->hsync_start); - else - tmp = mode->crtc_htotal - - (sync_pulse ? - mode->crtc_hsync_end : mode->crtc_hsync_start); - - dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD); - if (sync_pulse) { - if (mode_valid_check) - tmp = mode->hsync_end - mode->hsync_start; - else - tmp = mode->crtc_hsync_end - mode->crtc_hsync_start; + dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp, bpp, + DSI_HBP_FRAME_PULSE_OVERHEAD); - dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp, + dsi_cfg->hsa = dpi_to_dsi_timing(dpi_hsa, bpp, DSI_HSA_FRAME_OVERHEAD); - } - - dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ? - mode->hdisplay : mode->crtc_hdisplay, - bpp, 0); - dsi_cfg->hfp = dpi_to_dsi_timing(mode_to_dpi_hfp(mode, mode_valid_check), - bpp, DSI_HFP_FRAME_OVERHEAD); - - return 0; -} - -static int cdns_dsi_adjust_phy_config(struct cdns_dsi *dsi, - struct cdns_dsi_cfg *dsi_cfg, - struct phy_configure_opts_mipi_dphy *phy_cfg, - const struct drm_display_mode *mode, - bool mode_valid_check) -{ - struct cdns_dsi_output *output = &dsi->output; - unsigned long long dlane_bps; - unsigned long adj_dsi_htotal; - unsigned long dsi_htotal; - unsigned long dpi_htotal; - unsigned long dpi_hz; - unsigned int dsi_hfp_ext; - unsigned int lanes = output->dev->lanes; - - dsi_htotal = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD; - if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) - dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD; + } else { + dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp + dpi_hsa, bpp, + DSI_HBP_FRAME_EVENT_OVERHEAD); - dsi_htotal += dsi_cfg->hact; - dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD; - - /* - * Make sure DSI htotal is aligned on a lane boundary when calculating - * the expected data rate. This is done by extending HFP in case of - * misalignment. - */ - adj_dsi_htotal = dsi_htotal; - if (dsi_htotal % lanes) - adj_dsi_htotal += lanes - (dsi_htotal % lanes); + dsi_cfg->hsa = 0; + } - dpi_hz = (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000; - dlane_bps = (unsigned long long)dpi_hz * adj_dsi_htotal; + dsi_cfg->hact = dpi_to_dsi_timing(dpi_hact, bpp, 0); - /* data rate in bytes/sec is not an integer, refuse the mode. */ - dpi_htotal = mode_valid_check ? mode->htotal : mode->crtc_htotal; - if (do_div(dlane_bps, lanes * dpi_htotal)) - return -EINVAL; + dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD); - /* data rate was in bytes/sec, convert to bits/sec. */ - phy_cfg->hs_clk_rate = dlane_bps * 8; + dsi_cfg->htotal = dsi_cfg->hact + dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD; - dsi_hfp_ext = adj_dsi_htotal - dsi_htotal; - dsi_cfg->hfp += dsi_hfp_ext; - dsi_cfg->htotal = dsi_htotal + dsi_hfp_ext; + if (sync_pulse) { + dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_PULSE_OVERHEAD; + dsi_cfg->htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD; + } else { + dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_EVENT_OVERHEAD; + } return 0; } static int cdns_dsi_check_conf(struct cdns_dsi *dsi, - const struct drm_display_mode *mode, - struct cdns_dsi_cfg *dsi_cfg, - bool mode_valid_check) + const struct videomode *vm, + struct cdns_dsi_cfg *dsi_cfg) { struct cdns_dsi_output *output = &dsi->output; struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; - unsigned long dsi_hss_hsa_hse_hbp; unsigned int nlanes = output->dev->lanes; - int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock); int ret; - ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check); + ret = cdns_dsi_mode2cfg(dsi, vm, dsi_cfg); if (ret) return ret; - ret = phy_mipi_dphy_get_default_config(mode_clock * 1000, + ret = phy_mipi_dphy_get_default_config(vm->pixelclock, mipi_dsi_pixel_format_to_bpp(output->dev->format), nlanes, phy_cfg); if (ret) return ret; - ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check); - if (ret) - return ret; - ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &output->phy_opts); if (ret) return ret; - dsi_hss_hsa_hse_hbp = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD; - if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) - dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD; - - /* - * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO - * is empty before we start a receiving a new line on the DPI - * interface. - */ - if ((u64)phy_cfg->hs_clk_rate * - mode_to_dpi_hfp(mode, mode_valid_check) * nlanes < - (u64)dsi_hss_hsa_hse_hbp * - (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000) - return -EINVAL; - return 0; } @@ -644,8 +569,7 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge, struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); struct cdns_dsi_output *output = &dsi->output; - struct cdns_dsi_cfg dsi_cfg; - int bpp, ret; + int bpp; /* * VFP_DSI should be less than VFP_DPI and VFP_DSI should be at @@ -663,10 +587,6 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge, if ((mode->hdisplay * bpp) % 32) return MODE_H_ILLEGAL; - ret = cdns_dsi_check_conf(dsi, mode, &dsi_cfg, true); - if (ret) - return MODE_BAD; - return MODE_OK; } @@ -882,7 +802,13 @@ static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge, tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8, phy_cfg->hs_clk_rate); - reg_wakeup = (phy_cfg->hs_prepare + phy_cfg->hs_zero) / tx_byte_period; + + /* + * Estimated time [in clock cycles] to perform LP->HS on D-PHY. + * It is not clear how to calculate this, so for now, + * set it to 1/10 of the total number of clocks in a line. + */ + reg_wakeup = dsi_cfg.htotal / nlanes / 10; writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp), dsi->regs + VID_DPHY_TIME); @@ -989,6 +915,28 @@ static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge, return input_fmts; } +static long cdns_dsi_round_pclk(struct cdns_dsi *dsi, unsigned long pclk) +{ + struct cdns_dsi_output *output = &dsi->output; + unsigned int nlanes = output->dev->lanes; + union phy_configure_opts phy_opts = { 0 }; + u32 bitspp; + int ret; + + bitspp = mipi_dsi_pixel_format_to_bpp(output->dev->format); + + ret = phy_mipi_dphy_get_default_config(pclk, bitspp, nlanes, + &phy_opts.mipi_dphy); + if (ret) + return ret; + + ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &phy_opts); + if (ret) + return ret; + + return div_u64((u64)phy_opts.mipi_dphy.hs_clk_rate * nlanes, bitspp); +} + static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, @@ -997,10 +945,32 @@ static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge, struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state); - const struct drm_display_mode *mode = &crtc_state->mode; + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg; + struct videomode vm; + long pclk; + + /* cdns-dsi requires negative syncs */ + adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); + adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC; - return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false); + /* + * The DPHY PLL has quite a coarsely grained clock rate options. See + * what hsclk rate we can achieve based on the pixel clock, convert it + * back to pixel clock, set that to the adjusted_mode->clock. This is + * all in hopes that the CRTC will be able to provide us the requested + * clock, as otherwise the DPI and DSI clocks will be out of sync. + */ + + pclk = cdns_dsi_round_pclk(dsi, adjusted_mode->clock * 1000); + if (pclk < 0) + return (int)pclk; + + adjusted_mode->clock = pclk / 1000; + + drm_display_mode_to_videomode(adjusted_mode, &vm); + + return cdns_dsi_check_conf(dsi, &vm, dsi_cfg); } static struct drm_bridge_state * @@ -1082,10 +1052,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, if (output->dev) return -EBUSY; - /* We do not support burst mode yet. */ - if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) - return -ENOTSUPP; - /* * The host <-> device link might be described using an OF-graph * representation, in this case we extract the device of_node from @@ -1442,4 +1408,3 @@ MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>"); MODULE_DESCRIPTION("Cadence DSI driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:cdns-dsi"); - diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 52b7b5889e6f..e9f16dbc9535 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -108,7 +108,7 @@ static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { - struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); + struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge); struct drm_bridge_state *prev_bridge_state; if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) { @@ -151,7 +151,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge, u32 output_fmt, unsigned int *num_input_fmts) { - struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); + struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge); struct drm_bridge_state *prev_bridge_state; if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) { @@ -373,7 +373,8 @@ static int display_connector_probe(struct platform_device *pdev) if (conn->bridge.ddc) conn->bridge.ops |= DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; - if (conn->hpd_gpio) + /* Detecting the monitor requires reading DPCD */ + if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort) conn->bridge.ops |= DRM_BRIDGE_OP_DETECT; if (conn->hpd_irq >= 0) conn->bridge.ops |= DRM_BRIDGE_OP_HPD; diff --git a/drivers/gpu/drm/bridge/ssd2825.c b/drivers/gpu/drm/bridge/ssd2825.c new file mode 100644 index 000000000000..f2fdbf7c117d --- /dev/null +++ b/drivers/gpu/drm/bridge/ssd2825.c @@ -0,0 +1,775 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> +#include <linux/units.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_drv.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <video/mipi_display.h> + +#define SSD2825_DEVICE_ID_REG 0xb0 +#define SSD2825_RGB_INTERFACE_CTRL_REG_1 0xb1 +#define SSD2825_RGB_INTERFACE_CTRL_REG_2 0xb2 +#define SSD2825_RGB_INTERFACE_CTRL_REG_3 0xb3 +#define SSD2825_RGB_INTERFACE_CTRL_REG_4 0xb4 +#define SSD2825_RGB_INTERFACE_CTRL_REG_5 0xb5 +#define SSD2825_RGB_INTERFACE_CTRL_REG_6 0xb6 +#define SSD2825_NON_BURST_EV BIT(2) +#define SSD2825_BURST BIT(3) +#define SSD2825_PCKL_HIGH BIT(13) +#define SSD2825_HSYNC_HIGH BIT(14) +#define SSD2825_VSYNC_HIGH BIT(15) +#define SSD2825_CONFIGURATION_REG 0xb7 +#define SSD2825_CONF_REG_HS BIT(0) +#define SSD2825_CONF_REG_CKE BIT(1) +#define SSD2825_CONF_REG_SLP BIT(2) +#define SSD2825_CONF_REG_VEN BIT(3) +#define SSD2825_CONF_REG_HCLK BIT(4) +#define SSD2825_CONF_REG_CSS BIT(5) +#define SSD2825_CONF_REG_DCS BIT(6) +#define SSD2825_CONF_REG_REN BIT(7) +#define SSD2825_CONF_REG_ECD BIT(8) +#define SSD2825_CONF_REG_EOT BIT(9) +#define SSD2825_CONF_REG_LPE BIT(10) +#define SSD2825_VC_CTRL_REG 0xb8 +#define SSD2825_PLL_CTRL_REG 0xb9 +#define SSD2825_PLL_CONFIGURATION_REG 0xba +#define SSD2825_CLOCK_CTRL_REG 0xbb +#define SSD2825_PACKET_SIZE_CTRL_REG_1 0xbc +#define SSD2825_PACKET_SIZE_CTRL_REG_2 0xbd +#define SSD2825_PACKET_SIZE_CTRL_REG_3 0xbe +#define SSD2825_PACKET_DROP_REG 0xbf +#define SSD2825_OPERATION_CTRL_REG 0xc0 +#define SSD2825_MAX_RETURN_SIZE_REG 0xc1 +#define SSD2825_RETURN_DATA_COUNT_REG 0xc2 +#define SSD2825_ACK_RESPONSE_REG 0xc3 +#define SSD2825_LINE_CTRL_REG 0xc4 +#define SSD2825_INTERRUPT_CTRL_REG 0xc5 +#define SSD2825_INTERRUPT_STATUS_REG 0xc6 +#define SSD2825_ERROR_STATUS_REG 0xc7 +#define SSD2825_DATA_FORMAT_REG 0xc8 +#define SSD2825_DELAY_ADJ_REG_1 0xc9 +#define SSD2825_DELAY_ADJ_REG_2 0xca +#define SSD2825_DELAY_ADJ_REG_3 0xcb +#define SSD2825_DELAY_ADJ_REG_4 0xcc +#define SSD2825_DELAY_ADJ_REG_5 0xcd +#define SSD2825_DELAY_ADJ_REG_6 0xce +#define SSD2825_HS_TX_TIMER_REG_1 0xcf +#define SSD2825_HS_TX_TIMER_REG_2 0xd0 +#define SSD2825_LP_RX_TIMER_REG_1 0xd1 +#define SSD2825_LP_RX_TIMER_REG_2 0xd2 +#define SSD2825_TE_STATUS_REG 0xd3 +#define SSD2825_SPI_READ_REG 0xd4 +#define SSD2825_SPI_READ_REG_RESET 0xfa +#define SSD2825_PLL_LOCK_REG 0xd5 +#define SSD2825_TEST_REG 0xd6 +#define SSD2825_TE_COUNT_REG 0xd7 +#define SSD2825_ANALOG_CTRL_REG_1 0xd8 +#define SSD2825_ANALOG_CTRL_REG_2 0xd9 +#define SSD2825_ANALOG_CTRL_REG_3 0xda +#define SSD2825_ANALOG_CTRL_REG_4 0xdb +#define SSD2825_INTERRUPT_OUT_CTRL_REG 0xdc +#define SSD2825_RGB_INTERFACE_CTRL_REG_7 0xdd +#define SSD2825_LANE_CONFIGURATION_REG 0xde +#define SSD2825_DELAY_ADJ_REG_7 0xdf +#define SSD2825_INPUT_PIN_CTRL_REG_1 0xe0 +#define SSD2825_INPUT_PIN_CTRL_REG_2 0xe1 +#define SSD2825_BIDIR_PIN_CTRL_REG_1 0xe2 +#define SSD2825_BIDIR_PIN_CTRL_REG_2 0xe3 +#define SSD2825_BIDIR_PIN_CTRL_REG_3 0xe4 +#define SSD2825_BIDIR_PIN_CTRL_REG_4 0xe5 +#define SSD2825_BIDIR_PIN_CTRL_REG_5 0xe6 +#define SSD2825_BIDIR_PIN_CTRL_REG_6 0xe7 +#define SSD2825_BIDIR_PIN_CTRL_REG_7 0xe8 +#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_1 0xe9 +#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_2 0xea +#define SSD2825_CABC_BRIGHTNESS_STATUS_REG 0xeb +#define SSD2825_READ_REG 0xff + +#define SSD2825_COM_BYTE 0x00 +#define SSD2825_DAT_BYTE 0x01 + +#define SSD2828_LP_CLOCK_DIVIDER(n) (((n) - 1) & 0x3f) +#define SSD2825_LP_MIN_CLK 5000 /* KHz */ +#define SSD2825_REF_MIN_CLK 2000 /* KHz */ + +static const struct regulator_bulk_data ssd2825_supplies[] = { + { .supply = "dvdd" }, + { .supply = "avdd" }, + { .supply = "vddio" }, +}; + +struct ssd2825_dsi_output { + struct mipi_dsi_device *dev; + struct drm_panel *panel; + struct drm_bridge *bridge; +}; + +struct ssd2825_priv { + struct spi_device *spi; + struct device *dev; + + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; + + struct clk *tx_clk; + + struct mipi_dsi_host dsi_host; + struct drm_bridge bridge; + struct ssd2825_dsi_output output; + + struct mutex mlock; /* for host transfer operations */ + + u32 pd_lines; /* number of Parallel Port Input Data Lines */ + u32 dsi_lanes; /* number of DSI Lanes */ + + /* Parameters for PLL programming */ + u32 pll_freq_kbps; /* PLL in kbps */ + u32 nibble_freq_khz; /* PLL div by 4 */ + + u32 hzd; /* HS Zero Delay in ns*/ + u32 hpd; /* HS Prepare Delay is ns */ +}; + +static inline struct ssd2825_priv *dsi_host_to_ssd2825(struct mipi_dsi_host *host) +{ + return container_of(host, struct ssd2825_priv, dsi_host); +} + +static inline struct ssd2825_priv *bridge_to_ssd2825(struct drm_bridge *bridge) +{ + return container_of(bridge, struct ssd2825_priv, bridge); +} + +static int ssd2825_write_raw(struct ssd2825_priv *priv, u8 high_byte, u8 low_byte) +{ + struct spi_device *spi = priv->spi; + u8 tx_buf[2]; + + /* + * Low byte is the value, high byte defines type of + * write cycle, 0 for command and 1 for data. + */ + tx_buf[0] = low_byte; + tx_buf[1] = high_byte; + + return spi_write(spi, tx_buf, 2); +} + +static int ssd2825_write_reg(struct ssd2825_priv *priv, u8 reg, u16 command) +{ + u8 datal = (command & 0x00FF); + u8 datah = (command & 0xFF00) >> 8; + int ret; + + /* Command write cycle */ + ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg); + if (ret) + return ret; + + /* Data write cycle bits 7-0 */ + ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datal); + if (ret) + return ret; + + /* Data write cycle bits 15-8 */ + ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datah); + if (ret) + return ret; + + return 0; +} + +static int ssd2825_write_dsi(struct ssd2825_priv *priv, const u8 *command, int len) +{ + int ret, i; + + ret = ssd2825_write_reg(priv, SSD2825_PACKET_SIZE_CTRL_REG_1, len); + if (ret) + return ret; + + ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, SSD2825_PACKET_DROP_REG); + if (ret) + return ret; + + for (i = 0; i < len; i++) { + ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, command[i]); + if (ret) + return ret; + } + + return 0; +} + +static int ssd2825_read_raw(struct ssd2825_priv *priv, u8 cmd, u16 *data) +{ + struct spi_device *spi = priv->spi; + struct spi_message msg; + struct spi_transfer xfer[2]; + u8 tx_buf[2]; + u8 rx_buf[2]; + int ret; + + memset(&xfer, 0, sizeof(xfer)); + + tx_buf[1] = (cmd & 0xFF00) >> 8; + tx_buf[0] = (cmd & 0x00FF); + + xfer[0].tx_buf = tx_buf; + xfer[0].bits_per_word = 9; + xfer[0].len = 2; + + xfer[1].rx_buf = rx_buf; + xfer[1].bits_per_word = 16; + xfer[1].len = 2; + + spi_message_init(&msg); + spi_message_add_tail(&xfer[0], &msg); + spi_message_add_tail(&xfer[1], &msg); + + ret = spi_sync(spi, &msg); + if (ret) { + dev_err(&spi->dev, "ssd2825 read raw failed %d\n", ret); + return ret; + } + + *data = rx_buf[1] | (rx_buf[0] << 8); + + return 0; +} + +static int ssd2825_read_reg(struct ssd2825_priv *priv, u8 reg, u16 *data) +{ + int ret; + + /* Reset the read register */ + ret = ssd2825_write_reg(priv, SSD2825_SPI_READ_REG, SSD2825_SPI_READ_REG_RESET); + if (ret) + return ret; + + /* Push the address to read */ + ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg); + if (ret) + return ret; + + /* Perform a reading cycle */ + ret = ssd2825_read_raw(priv, SSD2825_SPI_READ_REG_RESET, data); + if (ret) + return ret; + + return 0; +} + +static int ssd2825_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev) +{ + struct ssd2825_priv *priv = dsi_host_to_ssd2825(host); + struct drm_bridge *bridge; + struct drm_panel *panel; + struct device_node *ep; + int ret; + + if (dev->lanes > 4) { + dev_err(priv->dev, "unsupported number of data lanes(%u)\n", dev->lanes); + return -EINVAL; + } + + /* + * ssd2825 supports both Video and Pulse mode, but the driver only + * implements Video (event) mode currently + */ + if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) { + dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n"); + return -EOPNOTSUPP; + } + + ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel, &bridge); + if (ret) + return ret; + + if (panel) { + bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + } + + priv->output.dev = dev; + priv->output.bridge = bridge; + priv->output.panel = panel; + + priv->dsi_lanes = dev->lanes; + + /* get input ep (port0/endpoint0) */ + ret = -EINVAL; + ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0); + if (ep) { + ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines); + of_node_put(ep); + } + + if (ret) + priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format); + + drm_bridge_add(&priv->bridge); + + return 0; +} + +static int ssd2825_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev) +{ + struct ssd2825_priv *priv = dsi_host_to_ssd2825(host); + + drm_bridge_remove(&priv->bridge); + if (priv->output.panel) + drm_panel_bridge_remove(priv->output.bridge); + + return 0; +} + +static ssize_t ssd2825_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct ssd2825_priv *priv = dsi_host_to_ssd2825(host); + u16 config; + int ret; + + if (msg->rx_len) { + dev_warn(priv->dev, "MIPI rx is not supported\n"); + return -EOPNOTSUPP; + } + + guard(mutex)(&priv->mlock); + + ret = ssd2825_read_reg(priv, SSD2825_CONFIGURATION_REG, &config); + if (ret) + return ret; + + switch (msg->type) { + case MIPI_DSI_DCS_SHORT_WRITE: + case MIPI_DSI_DCS_SHORT_WRITE_PARAM: + case MIPI_DSI_DCS_LONG_WRITE: + config |= SSD2825_CONF_REG_DCS; + break; + case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: + case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: + case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: + case MIPI_DSI_GENERIC_LONG_WRITE: + config &= ~SSD2825_CONF_REG_DCS; + break; + case MIPI_DSI_DCS_READ: + case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: + case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: + case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: + default: + return 0; + } + + ret = ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config); + if (ret) + return ret; + + ret = ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000); + if (ret) + return ret; + + ret = ssd2825_write_dsi(priv, msg->tx_buf, msg->tx_len); + if (ret) + return ret; + + return 0; +} + +static const struct mipi_dsi_host_ops ssd2825_dsi_host_ops = { + .attach = ssd2825_dsi_host_attach, + .detach = ssd2825_dsi_host_detach, + .transfer = ssd2825_dsi_host_transfer, +}; + +static void ssd2825_hw_reset(struct ssd2825_priv *priv) +{ + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(priv->reset_gpio, 0); + usleep_range(5000, 6000); +} + +/* + * PLL configuration register settings. + * + * See the "PLL Configuration Register Description" in the SSD2825 datasheet. + */ +static u16 construct_pll_config(struct ssd2825_priv *priv, + u32 desired_pll_freq_kbps, u32 reference_freq_khz) +{ + u32 div_factor = 1, mul_factor, fr = 0; + + while (reference_freq_khz / (div_factor + 1) >= SSD2825_REF_MIN_CLK) + div_factor++; + if (div_factor > 31) + div_factor = 31; + + mul_factor = DIV_ROUND_UP(desired_pll_freq_kbps * div_factor, + reference_freq_khz); + + priv->pll_freq_kbps = reference_freq_khz * mul_factor / div_factor; + priv->nibble_freq_khz = priv->pll_freq_kbps / 4; + + if (priv->pll_freq_kbps >= 501000) + fr = 3; + else if (priv->pll_freq_kbps >= 251000) + fr = 2; + else if (priv->pll_freq_kbps >= 126000) + fr = 1; + + return (fr << 14) | (div_factor << 8) | mul_factor; +} + +static int ssd2825_setup_pll(struct ssd2825_priv *priv, + const struct drm_display_mode *mode) +{ + u16 pll_config, lp_div; + u32 nibble_delay, pclk_mult, tx_freq_khz; + u8 hzd, hpd; + + tx_freq_khz = clk_get_rate(priv->tx_clk) / KILO; + if (!tx_freq_khz) + tx_freq_khz = SSD2825_REF_MIN_CLK; + + pclk_mult = priv->pd_lines / priv->dsi_lanes + 1; + pll_config = construct_pll_config(priv, pclk_mult * mode->clock, + tx_freq_khz); + + lp_div = priv->pll_freq_kbps / (SSD2825_LP_MIN_CLK * 8); + + /* nibble_delay in nanoseconds */ + nibble_delay = MICRO / priv->nibble_freq_khz; + + hzd = priv->hzd / nibble_delay; + hpd = (priv->hpd - 4 * nibble_delay) / nibble_delay; + + /* Disable PLL */ + ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0000); + ssd2825_write_reg(priv, SSD2825_LINE_CTRL_REG, 0x0001); + + /* Set delays */ + ssd2825_write_reg(priv, SSD2825_DELAY_ADJ_REG_1, (hzd << 8) | hpd); + + /* Set PLL coefficients */ + ssd2825_write_reg(priv, SSD2825_PLL_CONFIGURATION_REG, pll_config); + + /* Clock Control Register */ + ssd2825_write_reg(priv, SSD2825_CLOCK_CTRL_REG, + SSD2828_LP_CLOCK_DIVIDER(lp_div)); + + /* Enable PLL */ + ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001); + ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0); + + return 0; +} + +static void ssd2825_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct ssd2825_priv *priv = bridge_to_ssd2825(bridge); + struct mipi_dsi_device *dsi_dev = priv->output.dev; + const struct drm_crtc_state *crtc_state; + const struct drm_display_mode *mode; + struct drm_connector *connector; + struct drm_crtc *crtc; + u32 input_bus_flags = bridge->timings->input_bus_flags; + u16 flags = 0, config; + u8 pixel_format; + int ret; + + /* Power Sequence */ + ret = clk_prepare_enable(priv->tx_clk); + if (ret) + dev_err(priv->dev, "error enabling tx_clk (%d)\n", ret); + + ret = regulator_bulk_enable(ARRAY_SIZE(ssd2825_supplies), priv->supplies); + if (ret) + dev_err(priv->dev, "error enabling regulators (%d)\n", ret); + + usleep_range(1000, 2000); + + ssd2825_hw_reset(priv); + + /* Perform SW reset */ + ssd2825_write_reg(priv, SSD2825_OPERATION_CTRL_REG, 0x0100); + + /* Set pixel format */ + switch (dsi_dev->format) { + case MIPI_DSI_FMT_RGB565: + pixel_format = 0x00; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + pixel_format = 0x01; + break; + case MIPI_DSI_FMT_RGB666: + pixel_format = 0x02; + break; + case MIPI_DSI_FMT_RGB888: + default: + pixel_format = 0x03; + break; + } + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + mode = &crtc_state->adjusted_mode; + + /* Set panel timings */ + ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_1, + ((mode->vtotal - mode->vsync_end) << 8) | + (mode->htotal - mode->hsync_end)); + ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_2, + ((mode->vtotal - mode->vsync_start) << 8) | + (mode->htotal - mode->hsync_start)); + ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_3, + ((mode->vsync_start - mode->vdisplay) << 8) | + (mode->hsync_start - mode->hdisplay)); + ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_4, mode->hdisplay); + ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_5, mode->vdisplay); + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + flags |= SSD2825_HSYNC_HIGH; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + flags |= SSD2825_VSYNC_HIGH; + + if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO) + flags |= SSD2825_NON_BURST_EV; + + if (input_bus_flags & DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE) + flags |= SSD2825_PCKL_HIGH; + + ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_6, flags | pixel_format); + ssd2825_write_reg(priv, SSD2825_LANE_CONFIGURATION_REG, dsi_dev->lanes - 1); + ssd2825_write_reg(priv, SSD2825_TEST_REG, 0x0004); + + /* Call PLL configuration */ + ssd2825_setup_pll(priv, mode); + + usleep_range(10000, 11000); + + config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_CKE | SSD2825_CONF_REG_DCS | + SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT; + + if (dsi_dev->mode_flags & MIPI_DSI_MODE_LPM) + config &= ~SSD2825_CONF_REG_HS; + + if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) + config &= ~SSD2825_CONF_REG_EOT; + + /* Initial DSI configuration register set */ + ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config); + ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0); + + if (priv->output.panel) + drm_panel_enable(priv->output.panel); +} + +static void ssd2825_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct ssd2825_priv *priv = bridge_to_ssd2825(bridge); + struct mipi_dsi_device *dsi_dev = priv->output.dev; + u16 config; + + config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_DCS | + SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT; + + if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO) + config |= SSD2825_CONF_REG_VEN; + + if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) + config &= ~SSD2825_CONF_REG_EOT; + + /* Complete configuration after DSI commands were sent */ + ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config); + ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001); + ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000); +} + +static void ssd2825_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct ssd2825_priv *priv = bridge_to_ssd2825(bridge); + int ret; + + msleep(100); + + /* Exit DSI configuration register set */ + ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, + SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT); + ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0); + + /* HW disable */ + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(5000, 6000); + + ret = regulator_bulk_disable(ARRAY_SIZE(ssd2825_supplies), + priv->supplies); + if (ret < 0) + dev_err(priv->dev, "error disabling regulators (%d)\n", ret); + + clk_disable_unprepare(priv->tx_clk); +} + +static int ssd2825_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct ssd2825_priv *priv = bridge_to_ssd2825(bridge); + + return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge, + flags); +} + +static enum drm_mode_status +ssd2825_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->hdisplay > 1366) + return MODE_H_ILLEGAL; + + if (mode->vdisplay > 1366) + return MODE_V_ILLEGAL; + + return MODE_OK; +} + +static bool ssd2825_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* Default to positive sync */ + + if (!(adjusted_mode->flags & + (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) + adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC; + + if (!(adjusted_mode->flags & + (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) + adjusted_mode->flags |= DRM_MODE_FLAG_PVSYNC; + + return true; +} + +static const struct drm_bridge_funcs ssd2825_bridge_funcs = { + .attach = ssd2825_bridge_attach, + .mode_valid = ssd2825_bridge_mode_valid, + .mode_fixup = ssd2825_mode_fixup, + + .atomic_pre_enable = ssd2825_bridge_atomic_pre_enable, + .atomic_enable = ssd2825_bridge_atomic_enable, + .atomic_disable = ssd2825_bridge_atomic_disable, + + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, +}; + +static const struct drm_bridge_timings default_ssd2825_timings = { + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE + | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE + | DRM_BUS_FLAG_DE_HIGH, +}; + +static int ssd2825_probe(struct spi_device *spi) +{ + struct ssd2825_priv *priv; + struct device *dev = &spi->dev; + struct device_node *np = dev->of_node; + int ret; + + /* Driver supports only 8 bit 3 Wire mode */ + spi->bits_per_word = 9; + + ret = spi_setup(spi); + if (ret) + return ret; + + priv = devm_drm_bridge_alloc(dev, struct ssd2825_priv, bridge, &ssd2825_bridge_funcs); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + spi_set_drvdata(spi, priv); + + priv->spi = spi; + priv->dev = dev; + + mutex_init(&priv->mlock); + + priv->tx_clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(priv->tx_clk)) + return dev_err_probe(dev, PTR_ERR(priv->tx_clk), + "can't retrieve bridge tx_clk\n"); + + priv->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(priv->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), + "failed to get reset GPIO\n"); + + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(ssd2825_supplies), + ssd2825_supplies, &priv->supplies); + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + priv->hzd = 133; /* ns */ + device_property_read_u32(dev, "solomon,hs-zero-delay-ns", &priv->hzd); + + priv->hpd = 40; /* ns */ + device_property_read_u32(dev, "solomon,hs-prep-delay-ns", &priv->hpd); + + priv->dsi_host.dev = dev; + priv->dsi_host.ops = &ssd2825_dsi_host_ops; + + priv->bridge.timings = &default_ssd2825_timings; + priv->bridge.of_node = np; + + return mipi_dsi_host_register(&priv->dsi_host); +} + +static void ssd2825_remove(struct spi_device *spi) +{ + struct ssd2825_priv *priv = spi_get_drvdata(spi); + + mipi_dsi_host_unregister(&priv->dsi_host); +} + +static const struct of_device_id ssd2825_of_match[] = { + { .compatible = "solomon,ssd2825" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ssd2825_of_match); + +static struct spi_driver ssd2825_driver = { + .driver = { + .name = "ssd2825", + .of_match_table = ssd2825_of_match, + }, + .probe = ssd2825_probe, + .remove = ssd2825_remove, +}; +module_spi_driver(ssd2825_driver); + +MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>"); +MODULE_DESCRIPTION("Solomon SSD2825 RGB to MIPI-DSI bridge driver SPI"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/waveshare-dsi.c b/drivers/gpu/drm/bridge/waveshare-dsi.c new file mode 100644 index 000000000000..01c70e7d3d3b --- /dev/null +++ b/drivers/gpu/drm/bridge/waveshare-dsi.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2025 NXP + * Based on panel-raspberrypi-touchscreen by Broadcom + */ + +#include <linux/backlight.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/regmap.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> + +struct ws_bridge { + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct backlight_device *backlight; + struct device *dev; + struct regmap *reg_map; +}; + +static const struct regmap_config ws_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xff, +}; + +static struct ws_bridge *bridge_to_ws_bridge(struct drm_bridge *bridge) +{ + return container_of(bridge, struct ws_bridge, bridge); +} + +static int ws_bridge_attach_dsi(struct ws_bridge *ws) +{ + const struct mipi_dsi_device_info info = { + .type = "ws-bridge", + .channel = 0, + .node = NULL, + }; + struct device_node *dsi_host_node; + struct device *dev = ws->dev; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + int ret; + + dsi_host_node = of_graph_get_remote_node(dev->of_node, 0, 0); + if (!dsi_host_node) { + dev_err(dev, "Failed to get remote port\n"); + return -ENODEV; + } + host = of_find_mipi_dsi_host_by_node(dsi_host_node); + of_node_put(dsi_host_node); + if (!host) + return dev_err_probe(dev, -EPROBE_DEFER, "Failed to find dsi_host\n"); + + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) + return dev_err_probe(dev, PTR_ERR(dsi), "Failed to create dsi device\n"); + + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO | + MIPI_DSI_CLOCK_NON_CONTINUOUS; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->lanes = 2; + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to attach dsi to host\n"); + + return 0; +} + +static int ws_bridge_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct ws_bridge *ws = bridge_to_ws_bridge(bridge); + int ret; + + ret = ws_bridge_attach_dsi(ws); + if (ret) + return ret; + + return drm_bridge_attach(encoder, ws->next_bridge, + &ws->bridge, flags); +} + +static void ws_bridge_bridge_enable(struct drm_bridge *bridge) +{ + struct ws_bridge *ws = bridge_to_ws_bridge(bridge); + + regmap_write(ws->reg_map, 0xad, 0x01); + backlight_enable(ws->backlight); +} + +static void ws_bridge_bridge_disable(struct drm_bridge *bridge) +{ + struct ws_bridge *ws = bridge_to_ws_bridge(bridge); + + backlight_disable(ws->backlight); + regmap_write(ws->reg_map, 0xad, 0x00); +} + +static const struct drm_bridge_funcs ws_bridge_bridge_funcs = { + .enable = ws_bridge_bridge_enable, + .disable = ws_bridge_bridge_disable, + .attach = ws_bridge_bridge_attach, +}; + +static int ws_bridge_bl_update_status(struct backlight_device *bl) +{ + struct ws_bridge *ws = bl_get_data(bl); + + regmap_write(ws->reg_map, 0xab, 0xff - backlight_get_brightness(bl)); + regmap_write(ws->reg_map, 0xaa, 0x01); + + return 0; +} + +static const struct backlight_ops ws_bridge_bl_ops = { + .update_status = ws_bridge_bl_update_status, +}; + +static struct backlight_device *ws_bridge_create_backlight(struct ws_bridge *ws) +{ + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 255, + .max_brightness = 255, + }; + struct device *dev = ws->dev; + + return devm_backlight_device_register(dev, dev_name(dev), dev, ws, + &ws_bridge_bl_ops, &props); +} + +static int ws_bridge_probe(struct i2c_client *i2c) +{ + struct device *dev = &i2c->dev; + struct drm_panel *panel; + struct ws_bridge *ws; + int ret; + + ws = devm_drm_bridge_alloc(dev, struct ws_bridge, bridge, &ws_bridge_bridge_funcs); + if (!ws) + return -ENOMEM; + + ws->dev = dev; + + ws->reg_map = devm_regmap_init_i2c(i2c, &ws_regmap_config); + if (IS_ERR(ws->reg_map)) + return dev_err_probe(dev, PTR_ERR(ws->reg_map), "Failed to allocate regmap\n"); + + ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, &panel, NULL); + if (ret) + return dev_err_probe(dev, ret, "Failed to find remote panel\n"); + + ws->next_bridge = devm_drm_panel_bridge_add(dev, panel); + if (IS_ERR(ws->next_bridge)) + return PTR_ERR(ws->next_bridge); + + ws->backlight = ws_bridge_create_backlight(ws); + if (IS_ERR(ws->backlight)) { + ret = PTR_ERR(ws->backlight); + dev_err(dev, "Failed to create backlight: %d\n", ret); + return ret; + } + + regmap_write(ws->reg_map, 0xc0, 0x01); + regmap_write(ws->reg_map, 0xc2, 0x01); + regmap_write(ws->reg_map, 0xac, 0x01); + + ws->bridge.type = DRM_MODE_CONNECTOR_DPI; + ws->bridge.of_node = dev->of_node; + devm_drm_bridge_add(dev, &ws->bridge); + + return 0; +} + +static const struct of_device_id ws_bridge_of_ids[] = { + {.compatible = "waveshare,dsi2dpi",}, + { } +}; + +MODULE_DEVICE_TABLE(of, ws_bridge_of_ids); + +static struct i2c_driver ws_bridge_driver = { + .driver = { + .name = "ws_dsi2dpi", + .of_match_table = ws_bridge_of_ids, + }, + .probe = ws_bridge_probe, +}; +module_i2c_driver(ws_bridge_driver); + +MODULE_AUTHOR("Joseph Guo <qijian.guo@nxp.com>"); +MODULE_DESCRIPTION("Waveshare DSI2DPI bridge driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 5eb7e9bfe361..091c5335355a 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -20,6 +20,7 @@ #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <drm/display/drm_hdcp_helper.h> #include <drm/display/drm_hdmi_audio_helper.h> #include <drm/display/drm_hdmi_cec_helper.h> #include <drm/display/drm_hdmi_helper.h> @@ -641,6 +642,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, struct drm_bridge *bridge, *panel_bridge = NULL; unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); unsigned int max_bpc = 8; + bool support_hdcp = false; int connector_type; int ret; @@ -763,6 +765,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (drm_bridge_is_panel(bridge)) panel_bridge = bridge; + + if (bridge->support_hdcp) + support_hdcp = true; } if (connector_type == DRM_MODE_CONNECTOR_Unknown) @@ -816,6 +821,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { + bridge = bridge_connector->bridge_hdmi_cec; + ret = drmm_connector_hdmi_cec_notifier_register(connector, NULL, bridge->hdmi_cec_dev); @@ -825,6 +832,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { + bridge = bridge_connector->bridge_hdmi_cec; + ret = drmm_connector_hdmi_cec_register(connector, &drm_bridge_connector_hdmi_cec_funcs, bridge->hdmi_cec_adapter_name, @@ -845,6 +854,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (panel_bridge) drm_panel_bridge_set_orientation(connector, panel_bridge); + if (support_hdcp && IS_REACHABLE(CONFIG_DRM_DISPLAY_HELPER) && + IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER)) + drm_connector_attach_content_protection_property(connector, true); + return connector; } EXPORT_SYMBOL_GPL(drm_bridge_connector_init); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ef56b474acf5..d5ebe6ea0acb 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -456,6 +456,7 @@ mode_fixup(struct drm_atomic_state *state) ret = drm_atomic_bridge_chain_check(bridge, new_crtc_state, new_conn_state); + drm_bridge_put(bridge); if (ret) { drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n"); return ret; @@ -527,6 +528,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector, bridge = drm_bridge_chain_get_first_bridge(encoder); ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info, mode); + drm_bridge_put(bridge); if (ret != MODE_OK) { drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n"); return ret; @@ -1212,6 +1214,7 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state) */ bridge = drm_bridge_chain_get_first_bridge(encoder); drm_atomic_bridge_chain_disable(bridge, state); + drm_bridge_put(bridge); /* Right function depends upon target state. */ if (funcs) { @@ -1329,6 +1332,7 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta */ bridge = drm_bridge_chain_get_first_bridge(encoder); drm_atomic_bridge_chain_post_disable(bridge, state); + drm_bridge_put(bridge); } } @@ -1501,6 +1505,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state) bridge = drm_bridge_chain_get_first_bridge(encoder); drm_bridge_chain_mode_set(bridge, mode, adjusted_mode); + drm_bridge_put(bridge); } } @@ -1580,6 +1585,7 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state */ bridge = drm_bridge_chain_get_first_bridge(encoder); drm_atomic_bridge_chain_pre_enable(bridge, state); + drm_bridge_put(bridge); } } @@ -1655,6 +1661,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state) } drm_atomic_bridge_chain_enable(bridge, state); + drm_bridge_put(bridge); } } diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index dd45d9b504d8..dd439d55177a 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -941,11 +941,11 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge, { unsigned int i, num_in_bus_fmts = 0; struct drm_bridge_state *cur_state; - struct drm_bridge *prev_bridge; + struct drm_bridge *prev_bridge __free(drm_bridge_put) = + drm_bridge_get_prev_bridge(cur_bridge); u32 *in_bus_fmts; int ret; - prev_bridge = drm_bridge_get_prev_bridge(cur_bridge); cur_state = drm_atomic_get_new_bridge_state(crtc_state->state, cur_bridge); @@ -1227,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check); /** * drm_bridge_detect - check if anything is attached to the bridge output * @bridge: bridge control structure + * @connector: attached connector * * If the bridge supports output detection, as reported by the * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6a44351e58b7..4a89b6acb6af 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -332,7 +332,12 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) if (obj->funcs->close) obj->funcs->close(obj, file_priv); + mutex_lock(&file_priv->prime.lock); + drm_prime_remove_buf_handle(&file_priv->prime, id); + + mutex_unlock(&file_priv->prime.lock); + drm_vma_node_revoke(&obj->vma_node, file_priv); drm_gem_object_handle_put_unlocked(obj); @@ -870,14 +875,6 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, } EXPORT_SYMBOL(drm_gem_dma_resv_wait); -/** - * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl - * @dev: drm_device - * @data: ioctl data - * @file_priv: drm file-private structure - * - * Releases the handle to an mm object. - */ int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -893,17 +890,6 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data, return ret; } -/** - * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl - * @dev: drm_device - * @data: ioctl data - * @file_priv: drm file-private structure - * - * Create a global name for an object, returning the name. - * - * Note that the name does not hold a reference; when the object - * is freed, the name goes away. - */ int drm_gem_flink_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -943,17 +929,6 @@ err: return ret; } -/** - * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl - * @dev: drm_device - * @data: ioctl data - * @file_priv: drm file-private structure - * - * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. - */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -988,6 +963,57 @@ err: return ret; } +int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_gem_change_handle *args = data; + struct drm_gem_object *obj; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_GEM)) + return -EOPNOTSUPP; + + obj = drm_gem_object_lookup(file_priv, args->handle); + if (!obj) + return -ENOENT; + + if (args->handle == args->new_handle) + return 0; + + mutex_lock(&file_priv->prime.lock); + + spin_lock(&file_priv->table_lock); + ret = idr_alloc(&file_priv->object_idr, obj, + args->new_handle, args->new_handle + 1, GFP_NOWAIT); + spin_unlock(&file_priv->table_lock); + + if (ret < 0) + goto out_unlock; + + if (obj->dma_buf) { + ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle); + if (ret < 0) { + spin_lock(&file_priv->table_lock); + idr_remove(&file_priv->object_idr, args->new_handle); + spin_unlock(&file_priv->table_lock); + goto out_unlock; + } + + drm_prime_remove_buf_handle(&file_priv->prime, args->handle); + } + + ret = 0; + + spin_lock(&file_priv->table_lock); + idr_remove(&file_priv->object_idr, args->handle); + spin_unlock(&file_priv->table_lock); + +out_unlock: + mutex_unlock(&file_priv->prime.lock); + + return ret; +} + /** * drm_gem_open - initializes GEM file-private structures at devnode open time * @dev: drm_device which is being opened by userspace diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index 5bb4c77db2c3..647b49ff2da5 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -271,107 +271,50 @@ npages_in_range(unsigned long start, unsigned long end) } /** - * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier - * @notifier: Pointer to the GPU SVM notifier structure. - * @start: Start address of the range - * @end: End address of the range + * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM + * @gpusvm: Pointer to the GPU SVM structure. + * @start: Start address of the notifier + * @end: End address of the notifier * - * Return: A pointer to the drm_gpusvm_range if found or NULL + * Return: A pointer to the drm_gpusvm_notifier if found or NULL */ -struct drm_gpusvm_range * -drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, - unsigned long end) +struct drm_gpusvm_notifier * +drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start, + unsigned long end) { struct interval_tree_node *itree; - itree = interval_tree_iter_first(¬ifier->root, start, end - 1); + itree = interval_tree_iter_first(&gpusvm->root, start, end - 1); if (itree) - return container_of(itree, struct drm_gpusvm_range, itree); + return container_of(itree, struct drm_gpusvm_notifier, itree); else return NULL; } -EXPORT_SYMBOL_GPL(drm_gpusvm_range_find); +EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find); /** - * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier - * @range__: Iterator variable for the ranges - * @next__: Iterator variable for the ranges temporay storage - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the range - * @end__: End address of the range - * - * This macro is used to iterate over GPU SVM ranges in a notifier while - * removing ranges from it. - */ -#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \ - for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \ - (next__) = __drm_gpusvm_range_next(range__); \ - (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ - (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__)) - -/** - * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list - * @notifier: a pointer to the current drm_gpusvm_notifier + * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier + * @notifier: Pointer to the GPU SVM notifier structure. + * @start: Start address of the range + * @end: End address of the range * - * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if - * the current notifier is the last one or if the input notifier is - * NULL. + * Return: A pointer to the drm_gpusvm_range if found or NULL */ -static struct drm_gpusvm_notifier * -__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier) -{ - if (notifier && !list_is_last(¬ifier->entry, - ¬ifier->gpusvm->notifier_list)) - return list_next_entry(notifier, entry); - - return NULL; -} - -static struct drm_gpusvm_notifier * -notifier_iter_first(struct rb_root_cached *root, unsigned long start, - unsigned long last) +struct drm_gpusvm_range * +drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, + unsigned long end) { struct interval_tree_node *itree; - itree = interval_tree_iter_first(root, start, last); + itree = interval_tree_iter_first(¬ifier->root, start, end - 1); if (itree) - return container_of(itree, struct drm_gpusvm_notifier, itree); + return container_of(itree, struct drm_gpusvm_range, itree); else return NULL; } - -/** - * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm - * @notifier__: Iterator variable for the notifiers - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the notifier - * @end__: End address of the notifier - * - * This macro is used to iterate over GPU SVM notifiers in a gpusvm. - */ -#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \ - for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \ - (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ - (notifier__) = __drm_gpusvm_notifier_next(notifier__)) - -/** - * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm - * @notifier__: Iterator variable for the notifiers - * @next__: Iterator variable for the notifiers temporay storage - * @notifier__: Pointer to the GPU SVM notifier - * @start__: Start address of the notifier - * @end__: End address of the notifier - * - * This macro is used to iterate over GPU SVM notifiers in a gpusvm while - * removing notifiers from it. - */ -#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \ - for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \ - (next__) = __drm_gpusvm_notifier_next(notifier__); \ - (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ - (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__)) +EXPORT_SYMBOL_GPL(drm_gpusvm_range_find); /** * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier. @@ -473,22 +416,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm, EXPORT_SYMBOL_GPL(drm_gpusvm_init); /** - * drm_gpusvm_notifier_find() - Find GPU SVM notifier - * @gpusvm: Pointer to the GPU SVM structure - * @fault_addr: Fault address - * - * This function finds the GPU SVM notifier associated with the fault address. - * - * Return: Pointer to the GPU SVM notifier on success, NULL otherwise. - */ -static struct drm_gpusvm_notifier * -drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, - unsigned long fault_addr) -{ - return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1); -} - -/** * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct * @@ -943,7 +870,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm, if (!mmget_not_zero(mm)) return ERR_PTR(-EFAULT); - notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr); + notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1); if (!notifier) { notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr); if (IS_ERR(notifier)) { @@ -1107,7 +1034,8 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm, drm_gpusvm_driver_lock_held(gpusvm); notifier = drm_gpusvm_notifier_find(gpusvm, - drm_gpusvm_range_start(range)); + drm_gpusvm_range_start(range), + drm_gpusvm_range_start(range) + 1); if (WARN_ON_ONCE(!notifier)) return; diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index bbc7fecb6f4a..d6bea8a4fffd 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -421,6 +421,71 @@ */ /** + * DOC: Madvise Logic - Splitting and Traversal + * + * This logic handles GPU VA range updates by generating remap and map operations + * without performing unmaps or merging existing mappings. + * + * 1) The requested range lies entirely within a single drm_gpuva. The logic splits + * the existing mapping at the start and end boundaries and inserts a new map. + * + * :: + * a start end b + * pre: |-----------------------| + * drm_gpuva1 + * + * a start end b + * new: |-----|=========|-------| + * remap map remap + * + * one REMAP and one MAP : Same behaviour as SPLIT and MERGE + * + * 2) The requested range spans multiple drm_gpuva regions. The logic traverses + * across boundaries, remapping the start and end segments, and inserting two + * map operations to cover the full range. + * + * :: a start b c end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a start b c end d + * new: |-------|==========|--------------|========|---------| + * remap1 map1 drm_gpuva2 map2 remap2 + * + * two REMAPS and two MAPS + * + * 3) Either start or end lies within a drm_gpuva. A single remap and map operation + * are generated to update the affected portion. + * + * + * :: a/start b c end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a/start b c end d + * new: |------------------|--------------|========|---------| + * drm_gpuva1 drm_gpuva2 map1 remap1 + * + * :: a start b c/end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a start b c/end d + * new: |-------|==========|--------------|------------------| + * remap1 map1 drm_gpuva2 drm_gpuva3 + * + * one REMAP and one MAP + * + * 4) Both start and end align with existing drm_gpuva boundaries. No operations + * are needed as the range is already covered. + * + * 5) No existing drm_gpuvas. No operations. + * + * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging, + * focusing solely on remap and map operations for efficient traversal and update. + */ + +/** * DOC: Locking * * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of @@ -486,13 +551,18 @@ * u64 addr, u64 range, * struct drm_gem_object *obj, u64 offset) * { + * struct drm_gpuvm_map_req map_req = { + * .map.va.addr = addr, + * .map.va.range = range, + * .map.gem.obj = obj, + * .map.gem.offset = offset, + * }; * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op * struct drm_gpuvm_bo *vm_bo; * * driver_lock_va_space(); - * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range, - * obj, offset); + * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req); * if (IS_ERR(ops)) * return PTR_ERR(ops); * @@ -2054,16 +2124,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap); static int op_map_cb(const struct drm_gpuvm_ops *fn, void *priv, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset) + const struct drm_gpuvm_map_req *req) { struct drm_gpuva_op op = {}; + if (!req) + return 0; + op.op = DRM_GPUVA_OP_MAP; - op.map.va.addr = addr; - op.map.va.range = range; - op.map.gem.obj = obj; - op.map.gem.offset = offset; + op.map.va.addr = req->map.va.addr; + op.map.va.range = req->map.va.range; + op.map.gem.obj = req->map.gem.obj; + op.map.gem.offset = req->map.gem.offset; return fn->sm_step_map(&op, priv); } @@ -2088,10 +2160,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv, static int op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, - struct drm_gpuva *va, bool merge) + struct drm_gpuva *va, bool merge, bool madvise) { struct drm_gpuva_op op = {}; + if (madvise) + return 0; + op.op = DRM_GPUVA_OP_UNMAP; op.unmap.va = va; op.unmap.keep = merge; @@ -2102,10 +2177,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, static int __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, const struct drm_gpuvm_ops *ops, void *priv, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req, + bool madvise) { + struct drm_gem_object *req_obj = req->map.gem.obj; + const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req; struct drm_gpuva *va, *next; + u64 req_offset = req->map.gem.offset; + u64 req_range = req->map.va.range; + u64 req_addr = req->map.va.addr; u64 req_end = req_addr + req_range; int ret; @@ -2120,19 +2200,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, u64 end = addr + range; bool merge = !!va->gem.obj; + if (madvise && obj) + continue; + if (addr == req_addr) { merge &= obj == req_obj && offset == req_offset; if (end == req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; break; } if (end < req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; continue; @@ -2153,6 +2236,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, NULL, &n, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } } else if (addr < req_addr) { @@ -2173,6 +2259,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } @@ -2180,6 +2269,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; + + if (madvise) { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = req_addr, + .map.va.range = end - req_addr, + }; + + ret = op_map_cb(ops, priv, &map_req); + if (ret) + return ret; + } + continue; } @@ -2195,6 +2296,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, &n, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } } else if (addr > req_addr) { @@ -2203,16 +2307,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, (addr - req_addr); if (end == req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; + break; } if (end < req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; + continue; } @@ -2231,14 +2337,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, NULL, &n, &u); if (ret) return ret; + + if (madvise) { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = addr, + .map.va.range = req_end - addr, + }; + + return op_map_cb(ops, priv, &map_req); + } break; } } } - - return op_map_cb(ops, priv, - req_addr, req_range, - req_obj, req_offset); + return op_map_cb(ops, priv, op_map); } static int @@ -2290,7 +2402,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, if (ret) return ret; } else { - ret = op_unmap_cb(ops, priv, va, false); + ret = op_unmap_cb(ops, priv, va, false, false); if (ret) return ret; } @@ -2303,10 +2415,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps * @gpuvm: the &drm_gpuvm representing the GPU VA space * @priv: pointer to a driver private data structure - * @req_addr: the start address of the new mapping - * @req_range: the range of the new mapping - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: ptr to struct drm_gpuvm_map_req * * This function iterates the given range of the GPU VA space. It utilizes the * &drm_gpuvm_ops to call back into the driver providing the split and merge @@ -2333,8 +2442,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, */ int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req) { const struct drm_gpuvm_ops *ops = gpuvm->ops; @@ -2343,9 +2451,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, ops->sm_step_unmap))) return -EINVAL; - return __drm_gpuvm_sm_map(gpuvm, ops, priv, - req_addr, req_range, - req_obj, req_offset); + return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map); @@ -2421,10 +2527,7 @@ static const struct drm_gpuvm_ops lock_ops = { * @gpuvm: the &drm_gpuvm representing the GPU VA space * @exec: the &drm_exec locking context * @num_fences: for newly mapped objects, the # of fences to reserve - * @req_addr: the start address of the range to unmap - * @req_range: the range of the mappings to unmap - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: ptr to drm_gpuvm_map_req struct * * This function locks (drm_exec_lock_obj()) objects that will be unmapped/ * remapped, and locks+prepares (drm_exec_prepare_object()) objects that @@ -2445,9 +2548,7 @@ static const struct drm_gpuvm_ops lock_ops = { * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range); * break; * case DRIVER_OP_MAP: - * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, - * op->addr, op->range, - * obj, op->obj_offset); + * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req); * break; * } * @@ -2478,18 +2579,17 @@ static const struct drm_gpuvm_ops lock_ops = { int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, unsigned int num_fences, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + struct drm_gpuvm_map_req *req) { + struct drm_gem_object *req_obj = req->map.gem.obj; + if (req_obj) { int ret = drm_exec_prepare_obj(exec, req_obj, num_fences); if (ret) return ret; } - return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, - req_addr, req_range, - req_obj, req_offset); + return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock); @@ -2608,13 +2708,42 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { .sm_step_unmap = drm_gpuva_sm_step, }; +static struct drm_gpuva_ops * +__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req, + bool madvise) +{ + struct drm_gpuva_ops *ops; + struct { + struct drm_gpuvm *vm; + struct drm_gpuva_ops *ops; + } args; + int ret; + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (unlikely(!ops)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ops->list); + + args.vm = gpuvm; + args.ops = ops; + + ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise); + if (ret) + goto err_free_ops; + + return ops; + +err_free_ops: + drm_gpuva_ops_free(gpuvm, ops); + return ERR_PTR(ret); +} + /** * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge * @gpuvm: the &drm_gpuvm representing the GPU VA space - * @req_addr: the start address of the new mapping - * @req_range: the range of the new mapping - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: map request arguments * * This function creates a list of operations to perform splitting and merging * of existent mapping(s) with the newly requested one. @@ -2642,40 +2771,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { */ struct drm_gpuva_ops * drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req) { - struct drm_gpuva_ops *ops; - struct { - struct drm_gpuvm *vm; - struct drm_gpuva_ops *ops; - } args; - int ret; - - ops = kzalloc(sizeof(*ops), GFP_KERNEL); - if (unlikely(!ops)) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&ops->list); - - args.vm = gpuvm; - args.ops = ops; - - ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, - req_addr, req_range, - req_obj, req_offset); - if (ret) - goto err_free_ops; - - return ops; - -err_free_ops: - drm_gpuva_ops_free(gpuvm, ops); - return ERR_PTR(ret); + return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create); /** + * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split + * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @req: map request arguments + * + * This function creates a list of operations to perform splitting + * of existent mapping(s) at start or end, based on the request map. + * + * The list can be iterated with &drm_gpuva_for_each_op and must be processed + * in the given order. It can contain map and remap operations, but it + * also can be empty if no operation is required, e.g. if the requested mapping + * already exists is the exact same way. + * + * There will be no unmap operations, a maximum of two remap operations and two + * map operations. The two map operations correspond to: one from start to the + * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end. + * + * Note that before calling this function again with another mapping request it + * is necessary to update the &drm_gpuvm's view of the GPU VA space. The + * previously obtained operations must be either processed or abandoned. To + * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be + * used. + * + * After the caller finished processing the returned &drm_gpuva_ops, they must + * be freed with &drm_gpuva_ops_free. + * + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure + */ +struct drm_gpuva_ops * +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req) +{ + return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create); + +/** * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on * unmap * @gpuvm: the &drm_gpuvm representing the GPU VA space diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index e79c3c623c9a..5a3bed48ab1f 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -85,6 +85,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); +int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, + struct dma_buf *dma_buf, uint32_t handle); void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle); @@ -170,6 +172,8 @@ int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_gem_flink_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f593dc569d31..d8a24875a7ba 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -653,6 +653,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CHANGE_HANDLE, drm_gem_change_handle_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0), diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 3a9b3278a6e3..a712e177b350 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -773,41 +773,13 @@ ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, EXPORT_SYMBOL(mipi_dsi_generic_write); /** - * mipi_dsi_generic_write_chatty() - mipi_dsi_generic_write() w/ an error log - * @dsi: DSI peripheral device - * @payload: buffer containing the payload - * @size: size of payload buffer - * - * Like mipi_dsi_generic_write() but includes a dev_err() - * call for you and returns 0 upon success, not the number of bytes sent. - * - * Return: 0 on success or a negative error code on failure. - */ -int mipi_dsi_generic_write_chatty(struct mipi_dsi_device *dsi, - const void *payload, size_t size) -{ - struct device *dev = &dsi->dev; - ssize_t ret; - - ret = mipi_dsi_generic_write(dsi, payload, size); - if (ret < 0) { - dev_err(dev, "sending generic data %*ph failed: %zd\n", - (int)size, payload, ret); - return ret; - } - - return 0; -} -EXPORT_SYMBOL(mipi_dsi_generic_write_chatty); - -/** - * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write_chatty() w/ accum_err + * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write() w/ accum_err * @ctx: Context for multiple DSI transactions * @payload: buffer containing the payload * @size: size of payload buffer * - * Like mipi_dsi_generic_write_chatty() but deals with errors in a way that - * makes it convenient to make several calls in a row. + * A wrapper around mipi_dsi_generic_write() that deals with errors in a way + * that makes it convenient to make several calls in a row. */ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx, const void *payload, size_t size) @@ -829,6 +801,30 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx, EXPORT_SYMBOL(mipi_dsi_generic_write_multi); /** + * mipi_dsi_dual_generic_write_multi() - mipi_dsi_generic_write_multi() for + * two dsi channels, one after the other + * @ctx: Context for multiple DSI transactions + * @dsi1: First dsi channel to write buffer to + * @dsi2: Second dsi channel to write buffer to + * @payload: Buffer containing the payload + * @size: Size of payload buffer + * + * A wrapper around mipi_dsi_generic_write_multi() that allows the user to + * conveniently write to two dsi channels, one after the other. + */ +void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx, + struct mipi_dsi_device *dsi1, + struct mipi_dsi_device *dsi2, + const void *payload, size_t size) +{ + ctx->dsi = dsi1; + mipi_dsi_generic_write_multi(ctx, payload, size); + ctx->dsi = dsi2; + mipi_dsi_generic_write_multi(ctx, payload, size); +} +EXPORT_SYMBOL(mipi_dsi_dual_generic_write_multi); + +/** * mipi_dsi_generic_read() - receive data using a generic read packet * @dsi: DSI peripheral device * @params: buffer containing the request parameters @@ -1008,6 +1004,30 @@ void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx, EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer_multi); /** + * mipi_dsi_dual_dcs_write_buffer_multi - mipi_dsi_dcs_write_buffer_multi() for + * two dsi channels, one after the other + * @ctx: Context for multiple DSI transactions + * @dsi1: First dsi channel to write buffer to + * @dsi2: Second dsi channel to write buffer to + * @data: Buffer containing data to be transmitted + * @len: Size of transmission buffer + * + * A wrapper around mipi_dsi_dcs_write_buffer_multi() that allows the user to + * conveniently write to two dsi channels, one after the other. + */ +void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx, + struct mipi_dsi_device *dsi1, + struct mipi_dsi_device *dsi2, + const void *data, size_t len) +{ + ctx->dsi = dsi1; + mipi_dsi_dcs_write_buffer_multi(ctx, data, len); + ctx->dsi = dsi2; + mipi_dsi_dcs_write_buffer_multi(ctx, data, len); +} +EXPORT_SYMBOL(mipi_dsi_dual_dcs_write_buffer_multi); + +/** * mipi_dsi_dcs_write() - send DCS write command * @dsi: DSI peripheral device * @cmd: DCS command @@ -1077,6 +1097,43 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, EXPORT_SYMBOL(mipi_dsi_dcs_read); /** + * mipi_dsi_dcs_read_multi() - mipi_dsi_dcs_read() w/ accum_err + * @ctx: Context for multiple DSI transactions + * @cmd: DCS command + * @data: buffer in which to receive data + * @len: size of receive buffer + * + * Like mipi_dsi_dcs_read() but deals with errors in a way that makes it + * convenient to make several calls in a row. + */ +void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd, + void *data, size_t len) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + struct mipi_dsi_msg msg = { + .channel = dsi->channel, + .type = MIPI_DSI_DCS_READ, + .tx_buf = &cmd, + .tx_len = 1, + .rx_buf = data, + .rx_len = len + }; + ssize_t ret; + + if (ctx->accum_err) + return; + + ret = mipi_dsi_device_transfer(dsi, &msg); + if (ret < 0) { + ctx->accum_err = ret; + dev_err(dev, "dcs read with command %#x failed: %d\n", cmd, + ctx->accum_err); + } +} +EXPORT_SYMBOL(mipi_dsi_dcs_read_multi); + +/** * mipi_dsi_dcs_nop() - send DCS nop packet * @dsi: DSI peripheral device * diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index a23fc712a8b7..43a10b4af43a 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -93,7 +93,7 @@ struct drm_prime_member { struct rb_node handle_rb; }; -static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, +int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) { struct drm_prime_member *member; @@ -190,8 +190,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, { struct rb_node *rb; - mutex_lock(&prime_fpriv->lock); - rb = prime_fpriv->handles.rb_node; while (rb) { struct drm_prime_member *member; @@ -210,8 +208,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, rb = rb->rb_left; } } - - mutex_unlock(&prime_fpriv->lock); } void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 6b3541159c0f..09b12c30df69 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -119,6 +119,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, *status = drm_bridge_chain_mode_valid(bridge, &connector->display_info, mode); + drm_bridge_put(bridge); if (*status != MODE_OK) { /* There is also no point in continuing for crtc check * here. */ diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index 5385a2126e45..b52a12cbba3e 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -620,8 +620,6 @@ static void gud_disconnect(struct usb_interface *interface) struct gud_device *gdrm = usb_get_intfdata(interface); struct drm_device *drm = &gdrm->drm; - drm_dbg(drm, "%s:\n", __func__); - drm_kms_helper_poll_fini(drm); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 6e26cb4c5724..685ac98bd001 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -552,10 +552,6 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc) if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); - - /* wa_18038517565 Enable DPFC clock gating after FBC disable */ - if (display->platform.dg2 || DISPLAY_VER(display) >= 14) - fbc_compressor_clkgate_disable_wa(fbc, false); } } @@ -1710,6 +1706,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc) __intel_fbc_cleanup_cfb(fbc); + /* wa_18038517565 Enable DPFC clock gating after FBC disable */ + if (display->platform.dg2 || DISPLAY_VER(display) >= 14) + fbc_compressor_clkgate_disable_wa(fbc, false); + fbc->state.plane = NULL; fbc->flip_pending = false; fbc->busy_bits = 0; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index ae9053919211..41988e193a41 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -3275,7 +3275,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp) static void _psr_invalidate_handle(struct intel_dp *intel_dp) { - if (intel_dp->psr.psr2_sel_fetch_enabled) { + struct intel_display *display = to_intel_display(intel_dp); + + if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) { intel_dp->psr.psr2_sel_fetch_cff_enabled = true; intel_psr_configure_full_frame_update(intel_dp); @@ -3361,7 +3363,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - if (intel_dp->psr.psr2_sel_fetch_enabled) { + if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { /* can we turn CFF off? */ if (intel_dp->psr.busy_frontbuffer_bits == 0) @@ -3378,11 +3380,13 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) * existing SU configuration */ intel_psr_configure_full_frame_update(intel_dp); - } - intel_psr_force_update(intel_dp); + intel_psr_force_update(intel_dp); + } else { + intel_psr_exit(intel_dp); + } - if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active && + if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) && !intel_dp->psr.busy_frontbuffer_bits) queue_work(display->wq.unordered, &intel_dp->psr.work); } diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 2896fa7501b1..3d97990170bf 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -185,12 +185,17 @@ struct pvr_vm_bind_op { static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op) { switch (bind_op->type) { - case PVR_VM_BIND_TYPE_MAP: + case PVR_VM_BIND_TYPE_MAP: { + const struct drm_gpuvm_map_req map_req = { + .map.va.addr = bind_op->device_addr, + .map.va.range = bind_op->size, + .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj), + .map.gem.offset = bind_op->offset, + }; + return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr, - bind_op, bind_op->device_addr, - bind_op->size, - gem_from_pvr_gem(bind_op->pvr_obj), - bind_op->offset); + bind_op, &map_req); + } case PVR_VM_BIND_TYPE_UNMAP: return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr, diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 3cd8562a5109..210604181c05 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -371,6 +371,12 @@ struct drm_gpuva * msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, u64 offset, u64 range_start, u64 range_end) { + struct drm_gpuva_op_map op_map = { + .va.addr = range_start, + .va.range = range_end - range_start, + .gem.obj = obj, + .gem.offset = offset, + }; struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuvm_bo *vm_bo; struct msm_gem_vma *vma; @@ -399,7 +405,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, if (obj) GEM_WARN_ON((range_end - range_start) > obj->size); - drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset); + drm_gpuva_init_from_op(&vma->base, &op_map); vma->mapped = false; ret = drm_gpuva_insert(&vm->base, &vma->base); @@ -1171,11 +1177,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) op->obj_offset); break; case MSM_VM_BIND_OP_MAP: - case MSM_VM_BIND_OP_MAP_NULL: - ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, - op->iova, op->range, - op->obj, op->obj_offset); + case MSM_VM_BIND_OP_MAP_NULL: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->iova, + .map.va.range = op->range, + .map.gem.obj = op->obj, + .map.gem.offset = op->obj_offset, + }; + + ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req); break; + } default: /* * lookup_op() should have already thrown an error for @@ -1282,10 +1294,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job) if (op->flags & MSM_VM_BIND_OP_DUMP) arg.flags |= MSM_VMA_DUMP; fallthrough; - case MSM_VM_BIND_OP_MAP_NULL: - ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova, - op->range, op->obj, op->obj_offset); + case MSM_VM_BIND_OP_MAP_NULL: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->iova, + .map.va.range = op->range, + .map.gem.obj = op->obj, + .map.gem.offset = op->obj_offset, + }; + + ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req); break; + } default: /* * lookup_op() should have already thrown an error for diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c index dbd42cc1da87..1c3b33be6c40 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_kms.c +++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c @@ -433,7 +433,6 @@ static int lcdif_crtc_atomic_check(struct drm_crtc *crtc, struct drm_connector *connector; struct drm_encoder *encoder; struct drm_bridge_state *bridge_state; - struct drm_bridge *bridge; u32 bus_format, bus_flags; bool format_set = false, flags_set = false; int ret, i; @@ -453,7 +452,8 @@ static int lcdif_crtc_atomic_check(struct drm_crtc *crtc, encoder = connector_state->best_encoder; - bridge = drm_bridge_chain_get_first_bridge(encoder); + struct drm_bridge *bridge __free(drm_bridge_put) = + drm_bridge_chain_get_first_bridge(encoder); if (!bridge) continue; diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index d1587639ebb0..c88776d1e784 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -102,14 +102,6 @@ config DRM_NOUVEAU_SVM Say Y here if you want to enable experimental support for Shared Virtual Memory (SVM). -config DRM_NOUVEAU_GSP_DEFAULT - bool "Use GSP firmware for Turing/Ampere (needs firmware installed)" - depends on DRM_NOUVEAU - default n - help - Say Y here if you want to use the GSP codepaths by default on - Turing and Ampere GPUs. - config DRM_NOUVEAU_CH7006 tristate "Chrontel ch7006 TV encoder" depends on DRM_NOUVEAU diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h index 561877725aac..bb34b0a6082d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.h +++ b/drivers/gpu/drm/nouveau/nouveau_chan.h @@ -31,8 +31,6 @@ struct nouveau_channel { u64 addr; } push; - /* TODO: this will be reworked in the near future */ - bool accel_done; void *fence; struct { int max; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index e1e542126310..805d0a87aa54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, int nouveau_framebuffer_new(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *gem, struct drm_framebuffer **pfb) @@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev, struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct drm_framebuffer *fb; - const struct drm_format_info *info; unsigned int height, i; uint32_t tile_mode; uint8_t kind; @@ -295,9 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev, kind = nvbo->kind; } - info = drm_get_format_info(dev, mode_cmd->pixel_format, - mode_cmd->modifier[0]); - for (i = 0; i < info->num_planes; i++) { height = drm_format_info_plane_height(info, mode_cmd->height, @@ -321,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev, if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL))) return -ENOMEM; - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); fb->obj[0] = gem; ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); @@ -344,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, if (!gem) return ERR_PTR(-ENOENT); - ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb); + ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb); if (ret == 0) return fb; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index e45f211501f6..470e0910d484 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -8,8 +8,11 @@ #include <drm/drm_framebuffer.h> +struct drm_format_info; + int nouveau_framebuffer_new(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *gem, struct drm_framebuffer **pfb); diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 0e27b76d1e1c..c25ef9a54b9f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -90,7 +90,6 @@ FIRE_RING(struct nouveau_channel *chan) { if (chan->dma.cur == chan->dma.put) return; - chan->accel_done = true; WRITE_PUT(chan->dma.cur); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index ddfc46bc1b3e..d94a85509176 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, break; case OP_MAP: { struct nouveau_uvma_region *reg; + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->va.addr, + .map.va.range = op->va.range, + .map.gem.obj = op->gem.obj, + .map.gem.offset = op->gem.offset, + }; reg = nouveau_uvma_region_find_first(uvmm, op->va.addr, @@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job, } op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base, - op->va.addr, - op->va.range, - op->gem.obj, - op->gem.offset); + &map_req); if (IS_ERR(op->ops)) { ret = PTR_ERR(op->ops); goto unwind_continue; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c index b9581feb24cc..a23b40b27b81 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c @@ -44,7 +44,7 @@ nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value) bool space = false; while (size >= 1 && bf->name) { if (value & bf->mask) { - int this = snprintf(data, size, "%s%s", + int this = scnprintf(data, size, "%s%s", space ? " " : "", bf->name); size -= this; data += this; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c index eb765da0876e..35d1fcef520b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c @@ -41,8 +41,8 @@ ad102_gsp = { static struct nvkm_gsp_fwif ad102_gsps[] = { - { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true }, - { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true }, + { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144" }, + { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01" }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index d23243a83a4c..7ccb41761066 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -138,8 +138,10 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev); fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp); - if (IS_ERR(fwif)) + if (IS_ERR(fwif)) { + nvkm_error(&gsp->subdev, "Failed to load required firmware for device."); return PTR_ERR(fwif); + } gsp->func = fwif->func; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c index 12a3f2c1ed82..1b3b31b95ce4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c @@ -20,7 +20,7 @@ gb100_gsp = { static struct nvkm_gsp_fwif gb100_gsps[] = { - { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true }, + { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144" }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c index c1d718172ddf..51384c63148c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c @@ -20,7 +20,7 @@ gb202_gsp = { static struct nvkm_gsp_fwif gb202_gsps[] = { - { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true }, + { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144" }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c index ce31e8248807..b0dd5fce7bad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c @@ -344,7 +344,7 @@ done: static struct nvkm_gsp_fwif gh100_gsps[] = { - { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true }, + { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144" }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 4f14e85fc69e..c3494b7ac572 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -14,7 +14,6 @@ struct nvkm_gsp_fwif { const struct nvkm_gsp_func *func; const struct nvkm_rm_impl *rm; const char *ver; - bool enable; }; int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 588cb4ab85cb..32e6a065d6d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -582,10 +582,13 @@ struct nv_gsp_registry_entries { * RMSecBusResetEnable - enables PCI secondary bus reset * RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration * registers on any PCI reset. + * RMDevidCheckIgnore - allows GSP-RM to boot even if the PCI dev ID + * is not found in the internal product name database. */ static const struct nv_gsp_registry_entries r535_registry_entries[] = { { "RMSecBusResetEnable", 1 }, { "RMForcePcieConfigSave", 1 }, + { "RMDevidCheckIgnore", 1 }, }; #define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c index 58e233bc53b1..81e56da0474a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c @@ -383,13 +383,9 @@ int tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif) { struct nvkm_subdev *subdev = &gsp->subdev; - bool enable_gsp = fwif->enable; int ret; -#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) - enable_gsp = true; -#endif - if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) + if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", true)) return -EINVAL; ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm); diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs index 7e59a34b830d..7e7d4e2de2fb 100644 --- a/drivers/gpu/drm/nova/file.rs +++ b/drivers/gpu/drm/nova/file.rs @@ -2,13 +2,11 @@ use crate::driver::{NovaDevice, NovaDriver}; use crate::gem::NovaObject; -use crate::uapi::{GemCreate, GemInfo, Getparam}; use kernel::{ alloc::flags::*, drm::{self, gem::BaseObject}, pci, prelude::*, - types::Opaque, uapi, }; @@ -26,20 +24,19 @@ impl File { /// IOCTL: get_param: Query GPU / driver metadata. pub(crate) fn get_param( dev: &NovaDevice, - getparam: &Opaque<uapi::drm_nova_getparam>, + getparam: &mut uapi::drm_nova_getparam, _file: &drm::File<File>, ) -> Result<u32> { let adev = &dev.adev; let parent = adev.parent().ok_or(ENOENT)?; let pdev: &pci::Device = parent.try_into()?; - let getparam: &Getparam = getparam.into(); - let value = match getparam.param() as u32 { + let value = match getparam.param as u32 { uapi::NOVA_GETPARAM_VRAM_BAR_SIZE => pdev.resource_len(1)?, _ => return Err(EINVAL), }; - getparam.set_value(value); + getparam.value = value; Ok(0) } @@ -47,13 +44,12 @@ impl File { /// IOCTL: gem_create: Create a new DRM GEM object. pub(crate) fn gem_create( dev: &NovaDevice, - req: &Opaque<uapi::drm_nova_gem_create>, + req: &mut uapi::drm_nova_gem_create, file: &drm::File<File>, ) -> Result<u32> { - let req: &GemCreate = req.into(); - let obj = NovaObject::new(dev, req.size().try_into()?)?; + let obj = NovaObject::new(dev, req.size.try_into()?)?; - req.set_handle(obj.create_handle(file)?); + req.handle = obj.create_handle(file)?; Ok(0) } @@ -61,13 +57,12 @@ impl File { /// IOCTL: gem_info: Query GEM metadata. pub(crate) fn gem_info( _dev: &NovaDevice, - req: &Opaque<uapi::drm_nova_gem_info>, + req: &mut uapi::drm_nova_gem_info, file: &drm::File<File>, ) -> Result<u32> { - let req: &GemInfo = req.into(); - let bo = NovaObject::lookup_handle(file, req.handle())?; + let bo = NovaObject::lookup_handle(file, req.handle)?; - req.set_size(bo.size().try_into()?); + req.size = bo.size().try_into()?; Ok(0) } diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs index 64fd670e99e1..8893e58ee0db 100644 --- a/drivers/gpu/drm/nova/nova.rs +++ b/drivers/gpu/drm/nova/nova.rs @@ -5,7 +5,6 @@ mod driver; mod file; mod gem; -mod uapi; use crate::driver::NovaDriver; diff --git a/drivers/gpu/drm/nova/uapi.rs b/drivers/gpu/drm/nova/uapi.rs deleted file mode 100644 index eb228a58d423..000000000000 --- a/drivers/gpu/drm/nova/uapi.rs +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -use kernel::uapi; - -// TODO Work out some common infrastructure to avoid boilerplate code for uAPI abstractions. - -macro_rules! define_uapi_abstraction { - ($name:ident <= $inner:ty) => { - #[repr(transparent)] - pub struct $name(::kernel::types::Opaque<$inner>); - - impl ::core::convert::From<&::kernel::types::Opaque<$inner>> for &$name { - fn from(value: &::kernel::types::Opaque<$inner>) -> Self { - // SAFETY: `Self` is a transparent wrapper of `$inner`. - unsafe { ::core::mem::transmute(value) } - } - } - }; -} - -define_uapi_abstraction!(Getparam <= uapi::drm_nova_getparam); - -impl Getparam { - pub fn param(&self) -> u64 { - // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`. - unsafe { (*self.0.get()).param } - } - - pub fn set_value(&self, v: u64) { - // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`. - unsafe { (*self.0.get()).value = v }; - } -} - -define_uapi_abstraction!(GemCreate <= uapi::drm_nova_gem_create); - -impl GemCreate { - pub fn size(&self) -> u64 { - // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`. - unsafe { (*self.0.get()).size } - } - - pub fn set_handle(&self, handle: u32) { - // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`. - unsafe { (*self.0.get()).handle = handle }; - } -} - -define_uapi_abstraction!(GemInfo <= uapi::drm_nova_gem_info); - -impl GemInfo { - pub fn handle(&self) -> u32 { - // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`. - unsafe { (*self.0.get()).handle } - } - - pub fn set_size(&self, size: u64) { - // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`. - unsafe { (*self.0.get()).size = size }; - } -} diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 30c81e2e5d6b..bb3105556f19 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -351,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, } } - fb = omap_framebuffer_init(dev, mode_cmd, bos); + fb = omap_framebuffer_init(dev, info, mode_cmd, bos); if (IS_ERR(fb)) goto error; @@ -365,9 +365,9 @@ error: } struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { - const struct drm_format_info *format = NULL; struct omap_framebuffer *omap_fb = NULL; struct drm_framebuffer *fb = NULL; unsigned int pitch = mode_cmd->pitches[0]; @@ -377,15 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); - format = drm_get_format_info(dev, mode_cmd->pixel_format, - mode_cmd->modifier[0]); - for (i = 0; i < ARRAY_SIZE(formats); i++) { if (formats[i] == mode_cmd->pixel_format) break; } - if (!format || i == ARRAY_SIZE(formats)) { + if (i == ARRAY_SIZE(formats)) { dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); ret = -EINVAL; @@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, } fb = &omap_fb->base; - omap_fb->format = format; + omap_fb->format = info; mutex_init(&omap_fb->lock); /* @@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, * that the two planes of multiplane formats need the same number of * bytes per pixel. */ - if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) { + if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) { dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n"); ret = -EINVAL; goto fail; } - if (pitch % format->cpp[0]) { + if (pitch % info->cpp[0]) { dev_dbg(dev->dev, "buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n", - pitch, format->cpp[0]); + pitch, info->cpp[0]); ret = -EINVAL; goto fail; } - for (i = 0; i < format->num_planes; i++) { + for (i = 0; i < info->num_planes; i++) { struct plane *plane = &omap_fb->planes[i]; - unsigned int vsub = i == 0 ? 1 : format->vsub; + unsigned int vsub = i == 0 ? 1 : info->vsub; unsigned int size; size = pitch * mode_cmd->height / vsub; @@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, plane->dma_addr = 0; } - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); if (ret) { diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h index 0873f953cf1d..e6010302a22b 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.h +++ b/drivers/gpu/drm/omapdrm/omap_fb.h @@ -13,6 +13,7 @@ struct drm_connector; struct drm_device; struct drm_file; struct drm_framebuffer; +struct drm_format_info; struct drm_gem_object; struct drm_mode_fb_cmd2; struct drm_plane_state; @@ -23,6 +24,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 7b6396890681..948af7ec1130 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto fail; } - fb = omap_framebuffer_init(dev, &mode_cmd, &bo); + fb = omap_framebuffer_init(dev, + drm_get_format_info(dev, mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd, &bo); if (IS_ERR(fb)) { dev_err(dev->dev, "failed to allocate fb\n"); /* note: if fb creation failed, we can't rely on fb destroy diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 09b9f7ff9340..407c5f6a268b 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -215,6 +215,19 @@ config DRM_PANEL_HIMAX_HX8394 If M is selected the module will be called panel-himax-hx8394. +config DRM_PANEL_HYDIS_HV101HD1 + tristate "Hydis HV101HD1 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the Hydis HV101HD1 + 2-lane 1366x768 MIPI DSI panel found in ASUS VivoTab RT TF600T. + HV101HD1 is a color active matrix TFT LCD module using amorphous + silicon TFT's (Thin Film Transistors) as an active switching devices. + + If M is selected the module will be called panel-hydis-hv101hd1 + config DRM_PANEL_ILITEK_IL9322 tristate "Ilitek ILI9322 320x240 QVGA panels" depends on OF && SPI @@ -843,6 +856,17 @@ config DRM_PANEL_SAMSUNG_S6E8AA0 select DRM_MIPI_DSI select VIDEOMODE_HELPERS +config DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01 + tristate "Samsung AMS561RA01 panel with S6E8AA5X01 controller" + depends on GPIOLIB && OF && REGULATOR + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Samsung AMS561RA01 + panel, which uses Samsung's S6E8AA5X01 controller. The panel has a + ~5.6 inch AMOLED display, and the controller is driven by the MIPI + DSI protocol with 4 lanes. + config DRM_PANEL_SAMSUNG_SOFEF00 tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels" depends on OF @@ -971,7 +995,7 @@ config DRM_PANEL_STARTEK_KD070FHFID015 depends on BACKLIGHT_CLASS_DEVICE help Say Y here if you want to enable support for STARTEK KD070FHFID015 DSI panel - based on RENESAS-R69429 controller. The pannel is a 7-inch TFT LCD display + based on RENESAS-R69429 controller. The panel is a 7-inch TFT LCD display with a resolution of 1024 x 600 pixels. It provides a MIPI DSI interface to the host, a built-in LED backlight and touch controller. diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 957555b49996..3615a761b44f 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112B) += panel-himax-hx83112b.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o +obj-$(CONFIG_DRM_PANEL_HYDIS_HV101HD1) += panel-hydis-hv101hd1.o obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9805) += panel-ilitek-ili9805.o @@ -87,6 +88,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01) += panel-samsung-s6e8aa5x01-ams561ra01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 9a56e208cbdd..feba520a0f45 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1736,10 +1736,11 @@ static const struct panel_delay delay_200_500_e50 = { .enable = 50, }; -static const struct panel_delay delay_200_500_e50_p2e200 = { +static const struct panel_delay delay_200_500_e50_d50_p2e200 = { .hpd_absent = 200, .unprepare = 500, .enable = 50, + .disable = 50, .prepare_to_enable = 200, }; @@ -1795,6 +1796,13 @@ static const struct panel_delay delay_200_500_e200_d10 = { .disable = 10, }; +static const struct panel_delay delay_200_500_e200_d50 = { + .hpd_absent = 200, + .unprepare = 500, + .enable = 200, + .disable = 50, +}; + static const struct panel_delay delay_200_150_e200 = { .hpd_absent = 200, .unprepare = 150, @@ -1828,6 +1836,13 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = { .powered_on_to_enable = 335, }; +static const struct panel_delay delay_200_500_e50_d100 = { + .hpd_absent = 200, + .unprepare = 500, + .enable = 50, + .disable = 100, +}; + #define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \ { \ .ident = { \ @@ -1857,6 +1872,7 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = { * Sort first by vendor, then by product ID. */ static const struct edp_panel_entry edp_panels[] = { + EDP_PANEL_ENTRY('A', 'U', 'O', 0x04a4, &delay_200_500_e50, "B122UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x105c, &delay_200_500_e50, "B116XTN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x125c, &delay_200_500_e50, "Unknown"), @@ -1875,6 +1891,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0", &auo_b116xa3_mode), EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x52b0, &delay_200_500_e50, "B116XAK02.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"), @@ -1882,10 +1899,12 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"), @@ -1934,21 +1953,24 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a1b, &delay_200_500_e50, "NV133WUM-N63"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"), - EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"), - EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80_d50, "NV122WUM-N41"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ddf, &delay_200_500_e80, "NV116WHM-T01"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"), @@ -1966,27 +1988,33 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x115d, &delay_200_500_e80_d50, "N116BCA-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x115f, &delay_200_500_e80_d50, "N116BCL-EAK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x124c, &delay_200_500_e80_d50, "N122JCA-ENK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"), - EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"), - EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"), + EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_d50_p2e200, "MNC207QS1-1"), + EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_d50_p2e200, "MNE007JA1-2"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"), - EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"), EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"), @@ -2027,12 +2055,16 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), + EDP_PANEL_ENTRY('S', 'H', 'P', 0x158f, &delay_200_500_p2e100, "LQ134Z1"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0009, &delay_200_500_e250, "116QHD024002"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"), + EDP_PANEL_ENTRY('T', 'M', 'A', 0x0811, &delay_200_500_e80_d50, "TM140VDXP01-04"), + EDP_PANEL_ENTRY('T', 'M', 'A', 0x2094, &delay_200_500_e50_d100, "TL140VDMS03-01"), + { /* sentinal */ } }; diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c index fb302d1f91b9..9e443c719843 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8279.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c @@ -935,7 +935,7 @@ static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u j++; x++; } while (x < 4); - }; + } return 0; } diff --git a/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c b/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c new file mode 100644 index 000000000000..46426c388932 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/array_size.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct hv101hd1 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data *supplies; +}; + +static const struct regulator_bulk_data hv101hd1_supplies[] = { + { .supply = "vdd" }, + { .supply = "vio" }, +}; + +static inline struct hv101hd1 *to_hv101hd1(struct drm_panel *panel) +{ + return container_of(panel, struct hv101hd1, panel); +} + +static int hv101hd1_prepare(struct drm_panel *panel) +{ + struct hv101hd1 *hv = to_hv101hd1(panel); + struct mipi_dsi_multi_context ctx = { .dsi = hv->dsi }; + struct device *dev = &hv->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(hv101hd1_supplies), hv->supplies); + if (ret) { + dev_err(dev, "error enabling regulators (%d)\n", ret); + return ret; + } + + mipi_dsi_dcs_exit_sleep_mode_multi(&ctx); + mipi_dsi_msleep(&ctx, 20); + + mipi_dsi_dcs_set_display_on_multi(&ctx); + mipi_dsi_msleep(&ctx, 20); + + return 0; +} + +static int hv101hd1_disable(struct drm_panel *panel) +{ + struct hv101hd1 *hv = to_hv101hd1(panel); + struct mipi_dsi_multi_context ctx = { .dsi = hv->dsi }; + + mipi_dsi_dcs_set_display_off_multi(&ctx); + mipi_dsi_msleep(&ctx, 120); + mipi_dsi_dcs_enter_sleep_mode_multi(&ctx); + mipi_dsi_msleep(&ctx, 20); + + return 0; +} + +static int hv101hd1_unprepare(struct drm_panel *panel) +{ + struct hv101hd1 *hv = to_hv101hd1(panel); + + return regulator_bulk_disable(ARRAY_SIZE(hv101hd1_supplies), + hv->supplies); +} + +static const struct drm_display_mode hv101hd1_mode = { + .clock = (1366 + 74 + 36 + 24) * (768 + 21 + 7 + 4) * 60 / 1000, + .hdisplay = 1366, + .hsync_start = 1366 + 74, + .hsync_end = 1366 + 74 + 36, + .htotal = 1366 + 74 + 36 + 24, + .vdisplay = 768, + .vsync_start = 768 + 21, + .vsync_end = 768 + 21 + 7, + .vtotal = 768 + 21 + 7 + 4, + .width_mm = 140, + .height_mm = 220, +}; + +static int hv101hd1_get_modes(struct drm_panel *panel, struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &hv101hd1_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs hv101hd1_panel_funcs = { + .prepare = hv101hd1_prepare, + .disable = hv101hd1_disable, + .unprepare = hv101hd1_unprepare, + .get_modes = hv101hd1_get_modes, +}; + +static int hv101hd1_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct hv101hd1 *hv; + int ret; + + hv = devm_drm_panel_alloc(dev, struct hv101hd1, panel, + &hv101hd1_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(hv)) + return PTR_ERR(hv); + + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(hv101hd1_supplies), + hv101hd1_supplies, &hv->supplies); + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + hv->dsi = dsi; + mipi_dsi_set_drvdata(dsi, hv); + + dsi->lanes = 2; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM; + + ret = drm_panel_of_backlight(&hv->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&hv->panel); + + ret = mipi_dsi_attach(dsi); + if (ret) { + drm_panel_remove(&hv->panel); + return dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + } + + return 0; +} + +static void hv101hd1_remove(struct mipi_dsi_device *dsi) +{ + struct hv101hd1 *hv = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, + "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&hv->panel); +} + +static const struct of_device_id hv101hd1_of_match[] = { + { .compatible = "hydis,hv101hd1" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, hv101hd1_of_match); + +static struct mipi_dsi_driver hv101hd1_driver = { + .driver = { + .name = "panel-hv101hd1", + .of_match_table = hv101hd1_of_match, + }, + .probe = hv101hd1_probe, + .remove = hv101hd1_remove, +}; +module_mipi_dsi_driver(hv101hd1_driver); + +MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>"); +MODULE_DESCRIPTION("DRM driver for Hydis HV101HD1 panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c index 5f897e143758..83656bb4b0b2 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c +++ b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c @@ -81,25 +81,25 @@ static int jdi_panel_disable(struct drm_panel *panel) static int jdi_panel_unprepare(struct drm_panel *panel) { struct jdi_panel *jdi = to_panel_jdi(panel); - int ret; - ret = mipi_dsi_dcs_set_display_off(jdi->link1); - if (ret < 0) - dev_err(panel->dev, "failed to set display off: %d\n", ret); + /* + * One context per panel since we'll continue trying to shut down the + * other panel even if one isn't responding. + */ + struct mipi_dsi_multi_context dsi_ctx1 = { .dsi = jdi->link1 }; + struct mipi_dsi_multi_context dsi_ctx2 = { .dsi = jdi->link2 }; - ret = mipi_dsi_dcs_set_display_off(jdi->link2); - if (ret < 0) - dev_err(panel->dev, "failed to set display off: %d\n", ret); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx1); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx2); /* Specified by JDI @ 50ms, subject to change */ msleep(50); - ret = mipi_dsi_dcs_enter_sleep_mode(jdi->link1); - if (ret < 0) - dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret); - ret = mipi_dsi_dcs_enter_sleep_mode(jdi->link2); - if (ret < 0) - dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret); + /* Doesn't hurt to try sleep mode even if display off fails */ + dsi_ctx1.accum_err = 0; + dsi_ctx2.accum_err = 0; + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx1); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx2); /* Specified by JDI @ 150ms, subject to change */ msleep(150); @@ -123,72 +123,46 @@ static int jdi_panel_unprepare(struct drm_panel *panel) /* Specified by JDI @ 20ms, subject to change */ msleep(20); - return ret; + return 0; } -static int jdi_setup_symmetrical_split(struct mipi_dsi_device *left, - struct mipi_dsi_device *right, - const struct drm_display_mode *mode) +static void jdi_setup_symmetrical_split(struct mipi_dsi_multi_context *dsi_ctx, + struct mipi_dsi_device *left, + struct mipi_dsi_device *right, + const struct drm_display_mode *mode) { - int err; - - err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1); - if (err < 0) { - dev_err(&left->dev, "failed to set column address: %d\n", err); - return err; - } - - err = mipi_dsi_dcs_set_column_address(right, 0, mode->hdisplay / 2 - 1); - if (err < 0) { - dev_err(&right->dev, "failed to set column address: %d\n", err); - return err; - } - - err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1); - if (err < 0) { - dev_err(&left->dev, "failed to set page address: %d\n", err); - return err; - } - - err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1); - if (err < 0) { - dev_err(&right->dev, "failed to set page address: %d\n", err); - return err; - } - - return 0; + mipi_dsi_dual(mipi_dsi_dcs_set_column_address_multi, + dsi_ctx, left, right, + 0, mode->hdisplay / 2 - 1); + mipi_dsi_dual(mipi_dsi_dcs_set_page_address_multi, + dsi_ctx, left, right, + 0, mode->vdisplay - 1); } -static int jdi_write_dcdc_registers(struct jdi_panel *jdi) +static void jdi_write_dcdc_registers(struct mipi_dsi_multi_context *dsi_ctx, + struct jdi_panel *jdi) { /* Clear the manufacturer command access protection */ - mipi_dsi_generic_write_seq(jdi->link1, MCS_CMD_ACS_PROT, - MCS_CMD_ACS_PROT_OFF); - mipi_dsi_generic_write_seq(jdi->link2, MCS_CMD_ACS_PROT, - MCS_CMD_ACS_PROT_OFF); + mipi_dsi_dual_generic_write_seq_multi(dsi_ctx, jdi->link1, jdi->link2, + MCS_CMD_ACS_PROT, + MCS_CMD_ACS_PROT_OFF); /* - * Change the VGH/VGL divide rations to move the noise generated by the + * Change the VGH/VGL divide ratios to move the noise generated by the * TCONN. This should hopefully avoid interaction with the backlight * controller. */ - mipi_dsi_generic_write_seq(jdi->link1, MCS_PWR_CTRL_FUNC, - MCS_PWR_CTRL_PARAM1_VGH_330_DIV | - MCS_PWR_CTRL_PARAM1_DEFAULT, - MCS_PWR_CTRL_PARAM2_VGL_410_DIV | - MCS_PWR_CTRL_PARAM2_DEFAULT); - - mipi_dsi_generic_write_seq(jdi->link2, MCS_PWR_CTRL_FUNC, - MCS_PWR_CTRL_PARAM1_VGH_330_DIV | - MCS_PWR_CTRL_PARAM1_DEFAULT, - MCS_PWR_CTRL_PARAM2_VGL_410_DIV | - MCS_PWR_CTRL_PARAM2_DEFAULT); - - return 0; + mipi_dsi_dual_generic_write_seq_multi(dsi_ctx, jdi->link1, jdi->link2, + MCS_PWR_CTRL_FUNC, + MCS_PWR_CTRL_PARAM1_VGH_330_DIV | + MCS_PWR_CTRL_PARAM1_DEFAULT, + MCS_PWR_CTRL_PARAM2_VGL_410_DIV | + MCS_PWR_CTRL_PARAM2_DEFAULT); } static int jdi_panel_prepare(struct drm_panel *panel) { struct jdi_panel *jdi = to_panel_jdi(panel); + struct mipi_dsi_multi_context dsi_ctx = {}; int err; /* Disable backlight to avoid showing random pixels @@ -231,88 +205,36 @@ static int jdi_panel_prepare(struct drm_panel *panel) * put in place to communicate the configuration back to the DSI host * controller. */ - err = jdi_setup_symmetrical_split(jdi->link1, jdi->link2, - jdi->mode); - if (err < 0) { - dev_err(panel->dev, "failed to set up symmetrical split: %d\n", - err); - goto poweroff; - } - - err = mipi_dsi_dcs_set_tear_scanline(jdi->link1, - jdi->mode->vdisplay - 16); - if (err < 0) { - dev_err(panel->dev, "failed to set tear scanline: %d\n", err); - goto poweroff; - } - - err = mipi_dsi_dcs_set_tear_scanline(jdi->link2, - jdi->mode->vdisplay - 16); - if (err < 0) { - dev_err(panel->dev, "failed to set tear scanline: %d\n", err); - goto poweroff; - } - - err = mipi_dsi_dcs_set_tear_on(jdi->link1, - MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (err < 0) { - dev_err(panel->dev, "failed to set tear on: %d\n", err); - goto poweroff; - } - - err = mipi_dsi_dcs_set_tear_on(jdi->link2, - MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (err < 0) { - dev_err(panel->dev, "failed to set tear on: %d\n", err); - goto poweroff; - } + jdi_setup_symmetrical_split(&dsi_ctx, jdi->link1, jdi->link2, + jdi->mode); - err = mipi_dsi_dcs_set_pixel_format(jdi->link1, MIPI_DCS_PIXEL_FMT_24BIT); - if (err < 0) { - dev_err(panel->dev, "failed to set pixel format: %d\n", err); - goto poweroff; - } + mipi_dsi_dual(mipi_dsi_dcs_set_tear_scanline_multi, + &dsi_ctx, jdi->link1, jdi->link2, + jdi->mode->vdisplay - 16); - err = mipi_dsi_dcs_set_pixel_format(jdi->link2, MIPI_DCS_PIXEL_FMT_24BIT); - if (err < 0) { - dev_err(panel->dev, "failed to set pixel format: %d\n", err); - goto poweroff; - } + mipi_dsi_dual(mipi_dsi_dcs_set_tear_on_multi, + &dsi_ctx, jdi->link1, jdi->link2, + MIPI_DSI_DCS_TEAR_MODE_VBLANK); - err = mipi_dsi_dcs_exit_sleep_mode(jdi->link1); - if (err < 0) { - dev_err(panel->dev, "failed to exit sleep mode: %d\n", err); - goto poweroff; - } + mipi_dsi_dual(mipi_dsi_dcs_set_pixel_format_multi, + &dsi_ctx, jdi->link1, jdi->link2, + MIPI_DCS_PIXEL_FMT_24BIT); - err = mipi_dsi_dcs_exit_sleep_mode(jdi->link2); - if (err < 0) { - dev_err(panel->dev, "failed to exit sleep mode: %d\n", err); - goto poweroff; - } + mipi_dsi_dual(mipi_dsi_dcs_exit_sleep_mode_multi, + &dsi_ctx, jdi->link1, jdi->link2); - err = jdi_write_dcdc_registers(jdi); - if (err < 0) { - dev_err(panel->dev, "failed to write dcdc registers: %d\n", err); - goto poweroff; - } + jdi_write_dcdc_registers(&dsi_ctx, jdi); /* - * We need to wait 150ms between mipi_dsi_dcs_exit_sleep_mode() and - * mipi_dsi_dcs_set_display_on(). + * We need to wait 150ms between mipi_dsi_dcs_exit_sleep_mode_multi() + * and mipi_dsi_dcs_set_display_on_multi(). */ - msleep(150); + mipi_dsi_msleep(&dsi_ctx, 150); - err = mipi_dsi_dcs_set_display_on(jdi->link1); - if (err < 0) { - dev_err(panel->dev, "failed to set display on: %d\n", err); - goto poweroff; - } + mipi_dsi_dual(mipi_dsi_dcs_set_display_on_multi, + &dsi_ctx, jdi->link1, jdi->link2); - err = mipi_dsi_dcs_set_display_on(jdi->link2); - if (err < 0) { - dev_err(panel->dev, "failed to set display on: %d\n", err); + if (dsi_ctx.accum_err < 0) goto poweroff; - } jdi->link1->mode_flags &= ~MIPI_DSI_MODE_LPM; jdi->link2->mode_flags &= ~MIPI_DSI_MODE_LPM; diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c index 98f0782c8411..561e6643dcbb 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c @@ -148,24 +148,20 @@ static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel) static int nt35560_set_brightness(struct backlight_device *bl) { struct nt35560 *nt = bl_get_data(bl); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - int period_ns = 1023; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; int duty_ns = bl->props.brightness; + int period_ns = 1023; u8 pwm_ratio; u8 pwm_div; - u8 par; - int ret; if (backlight_is_blank(bl)) { /* Disable backlight */ - par = 0x00; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret) { - dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret); - return ret; - } - return 0; + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, + MIPI_DCS_WRITE_CONTROL_DISPLAY, + 0x00); + return dsi_ctx.accum_err; } /* Calculate the PWM duty cycle in n/256's */ @@ -176,12 +172,6 @@ static int nt35560_set_brightness(struct backlight_device *bl) /* Set up PWM dutycycle ONE byte (differs from the standard) */ dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio); - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, - &pwm_ratio, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret); - return ret; - } /* * Sequence to write PWMDIV: @@ -192,46 +182,23 @@ static int nt35560_set_brightness(struct backlight_device *bl) * 0x22 PWMDIV * 0x7F 0xAA CMD2 page 1 lock */ - par = 0xaa; - ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret); - return ret; - } - par = 0x01; - ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret); - return ret; - } - par = 0x01; - ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret); - return ret; - } - ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret); - return ret; - } - par = 0xaa; - ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, + MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + pwm_ratio); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x01); + + mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, 0x22, pwm_div); + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0xaa); /* Enable backlight */ - par = 0x24; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret < 0) { - dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, + 0x24); - return 0; + return dsi_ctx.accum_err; } static const struct backlight_ops nt35560_bl_ops = { @@ -244,32 +211,23 @@ static const struct backlight_properties nt35560_bl_props = { .max_brightness = 1023, }; -static int nt35560_read_id(struct nt35560 *nt) +static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); + struct device dev = dsi_ctx->dsi->dev; u8 vendor, version, panel; u16 val; - int ret; - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1); - if (ret < 0) { - dev_err(nt->dev, "could not vendor ID byte\n"); - return ret; - } - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1); - if (ret < 0) { - dev_err(nt->dev, "could not read device version byte\n"); - return ret; - } - ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1); - if (ret < 0) { - dev_err(nt->dev, "could not read panel ID byte\n"); - return ret; - } + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID1, &vendor, 1); + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID2, &version, 1); + mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID3, &panel, 1); + + if (dsi_ctx->accum_err < 0) + return; if (vendor == 0x00) { - dev_err(nt->dev, "device vendor ID is zero\n"); - return -ENODEV; + dev_err(&dev, "device vendor ID is zero\n"); + dsi_ctx->accum_err = -ENODEV; + return; } val = (vendor << 8) | panel; @@ -278,16 +236,16 @@ static int nt35560_read_id(struct nt35560 *nt) case DISPLAY_SONY_ACX424AKP_ID2: case DISPLAY_SONY_ACX424AKP_ID3: case DISPLAY_SONY_ACX424AKP_ID4: - dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n", + dev_info(&dev, + "MTP vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; default: - dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n", + dev_info(&dev, + "unknown vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; } - - return 0; } static int nt35560_power_on(struct nt35560 *nt) @@ -322,92 +280,56 @@ static void nt35560_power_off(struct nt35560 *nt) static int nt35560_prepare(struct drm_panel *panel) { struct nt35560 *nt = panel_to_nt35560(panel); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - const u8 mddi = 3; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; int ret; ret = nt35560_power_on(nt); if (ret) return ret; - ret = nt35560_read_id(nt); - if (ret) { - dev_err(nt->dev, "failed to read panel ID (%d)\n", ret); - goto err_power_off; - } + nt35560_read_id(&dsi_ctx); - /* Enabe tearing mode: send TE (tearing effect) at VBLANK */ - ret = mipi_dsi_dcs_set_tear_on(dsi, + /* Enable tearing mode: send TE (tearing effect) at VBLANK */ + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret) { - dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret); - goto err_power_off; - } /* * Set MDDI * * This presumably deactivates the Qualcomm MDDI interface and * selects DSI, similar code is found in other drivers such as the - * Sharp LS043T1LE01 which makes us suspect that this panel may be - * using a Novatek NT35565 or similar display driver chip that shares - * this command. Due to the lack of documentation we cannot know for - * sure. + * Sharp LS043T1LE01. */ - ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI, - &mddi, sizeof(mddi)); - if (ret < 0) { - dev_err(nt->dev, "failed to set MDDI (%d)\n", ret); - goto err_power_off; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, NT35560_DCS_SET_MDDI, 3); - /* Exit sleep mode */ - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret) { - dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret); - goto err_power_off; - } - msleep(140); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 140); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn display on (%d)\n", ret); - goto err_power_off; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); if (nt->video_mode) { - /* In video mode turn peripheral on */ - ret = mipi_dsi_turn_on_peripheral(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn on peripheral\n"); - goto err_power_off; - } + mipi_dsi_turn_on_peripheral_multi(&dsi_ctx); } - return 0; - -err_power_off: - nt35560_power_off(nt); - return ret; + if (dsi_ctx.accum_err < 0) + nt35560_power_off(nt); + return dsi_ctx.accum_err; } static int nt35560_unprepare(struct drm_panel *panel) { struct nt35560 *nt = panel_to_nt35560(panel); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { + .dsi = to_mipi_dsi_device(nt->dev) + }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret) { - dev_err(nt->dev, "failed to turn display off (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + + if (dsi_ctx.accum_err < 0) + return dsi_ctx.accum_err; - /* Enter sleep mode */ - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret) { - dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret); - return ret; - } msleep(85); nt35560_power_off(nt); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c index 32cf64c7c18b..226d91daf8c7 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c @@ -23,14 +23,6 @@ #define DSI_NUM_MIN 1 -#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \ - do { \ - dsi_ctx.dsi = dsi0; \ - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \ - dsi_ctx.dsi = dsi1; \ - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \ - } while (0) - struct panel_info { struct drm_panel panel; struct mipi_dsi_device *dsi[2]; @@ -71,217 +63,217 @@ static int elish_boe_init_sequence(struct panel_info *pinfo) struct mipi_dsi_device *dsi1 = pinfo->dsi[1]; struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; /* No datasheet, so write magic init sequence directly */ - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x05); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x18, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0x84); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x05, 0x2d); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x06, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x07, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x08, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0x45); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x12, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x15, 0x83); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x31, 0xfe); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x32, 0xfd); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x33, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x34, 0xf8); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0xf5); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x36, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x37, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x38, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0xec); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3d, 0xe9); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3f, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x40, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x41, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x45, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x46, 0xf4); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x47, 0xe7); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x48, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x49, 0xcd); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4a, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4b, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4c, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4d, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4e, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x50, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x68); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x52, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x54, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0x0e); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x59, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5a, 0xf7); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5b, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5c, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5d, 0xe3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5e, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5f, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x60, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x61, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x62, 0xcb); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x63, 0xbf); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x64, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x65, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x66, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x67, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x25, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1a, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x84, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x85, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x91, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x92, 0x0f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x93, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x94, 0x18); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x95, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x96, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb0, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x1b); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x24); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x27); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x31); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd1, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd2, 0x30); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xde, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdf, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x81); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9f, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x6f, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x70, 0x11); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x73, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x74, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x76, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x77, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa0, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa9, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xaa, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xab, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xad, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xba, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbe, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbf, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc0, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc1, 0x59); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc5, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc6, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc7, 0x48); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xca, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcb, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xce, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcf, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd3, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd7, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdc, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdd, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xe1, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xe2, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf3, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf4, 0x48); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x13, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x14, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbc, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbd, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x97, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x98, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x99, 0x95); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9a, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9b, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9c, 0x0b); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9d, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9e, 0x90); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9f, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa3, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x14, 0x60); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xd0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x02, 0xaf); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0xee); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1d, 0x09); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x2c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11); mipi_dsi_msleep(&dsi_ctx, 70); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29); return dsi_ctx.accum_err; } @@ -292,195 +284,195 @@ static int elish_csot_init_sequence(struct panel_info *pinfo) struct mipi_dsi_device *dsi1 = pinfo->dsi[1]; struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; /* No datasheet, so write magic init sequence directly */ - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x05); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x18, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xd0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x02, 0xaf); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x30); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0xee); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1d, 0x09); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0x84); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x05, 0x2d); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x06, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x07, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x08, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0x45); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x12, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x15, 0x83); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x31, 0xfe); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x32, 0xfd); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x33, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x34, 0xf8); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0xf5); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x36, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x37, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x38, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0xec); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3d, 0xe9); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3f, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x40, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x41, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x45, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x46, 0xf4); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x47, 0xe7); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x48, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x49, 0xcd); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4a, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4b, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4c, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4d, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4e, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x50, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x68); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x52, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x54, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0x0e); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x59, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5a, 0xf7); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5b, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5c, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5d, 0xe3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5e, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5f, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x60, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x61, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x62, 0xcb); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x63, 0xbf); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x64, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x65, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x66, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x67, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x2c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x55, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x25, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1a, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x84, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x85, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x91, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x92, 0x0f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x93, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x94, 0x18); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x95, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x96, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb0, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x1b); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x24); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x27); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x31); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd1, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xde, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdf, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x81); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x6f, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x70, 0x11); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x73, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x74, 0x4d); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa0, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa9, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xaa, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xab, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xad, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xba, 0x96); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbe, 0x07); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbf, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc0, 0x07); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc1, 0x5c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc5, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc6, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc7, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xca, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcb, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xce, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcf, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd3, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbc, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbd, 0x1c); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9a, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11); mipi_dsi_msleep(&dsi_ctx, 70); - mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29); + mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29); return dsi_ctx.accum_err; } diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c index 3231e84dc66c..8a608972fc41 100644 --- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c +++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c @@ -276,11 +276,8 @@ static int ota5601a_probe(struct spi_device *spi) } err = drm_panel_of_backlight(&panel->drm_panel); - if (err) { - if (err != -EPROBE_DEFER) - dev_err(dev, "Failed to get backlight handle\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "Failed to get backlight handle\n"); drm_panel_add(&panel->drm_panel); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c index e91f50662997..7e2f4e043d62 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c @@ -7,7 +7,9 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c new file mode 100644 index 000000000000..56e10c7c3a76 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c @@ -0,0 +1,981 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Samsung AMS561RA01 panel with S6E8AA5X01 controller. + * + * Copyright (C) 2025 Kaustabh Chakraborty <kauschluss@disroot.org> + */ + +#include <linux/backlight.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +/* Manufacturer Command Set */ +#define MCS_AIDCTL 0xb2 +#define MCS_ADAPTIVECTL 0xb5 +#define MCS_ELVSS 0xb6 +#define MCS_TEMPERCTL 0xb8 +#define MCS_PENTILE 0xc0 +#define MCS_GAMMACTL 0xca +#define MCS_LTPSCTL 0xcb +#define MCS_PCD 0xcc +#define MCS_ERRFLAG 0xe7 +#define MCS_ACCESSPROT 0xf0 +#define MCS_DISPCTL 0xf2 +#define MCS_GAMMAUPD 0xf7 + +#define GAMMA_CMD_LEN 34 +#define AID_CMD_LEN 3 + +static const struct { + u8 gamma[GAMMA_CMD_LEN]; + u8 aid[AID_CMD_LEN]; +} s6e8aa5x01_ams561ra01_cmds[] = { + { + /* 5 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x94, + 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89, + 0x8d, 0x8c, 0x8d, 0x89, 0x8c, 0x8e, + 0x8e, 0x8f, 0x90, 0xa3, 0xa2, 0x9a, + 0xcf, 0xca, 0x9f, 0xe6, 0xff, 0xb4, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0xa5 }, + }, { + /* 6 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x8c, 0x8a, 0x8c, 0x85, 0x88, 0x8c, + 0x8b, 0x8c, 0x8e, 0xa2, 0xa2, 0x9a, + 0xd0, 0xcc, 0xa2, 0xed, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x95 }, + }, { + /* 7 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x8c, 0x8a, 0x8c, 0x85, 0x88, 0x8c, + 0x8b, 0x8c, 0x8e, 0xa2, 0xa2, 0x99, + 0xc8, 0xc4, 0x9d, 0xed, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x89 }, + }, { + /* 8 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89, + 0x8a, 0x88, 0x8b, 0x83, 0x86, 0x8b, + 0x8c, 0x8b, 0x8d, 0x9d, 0x9f, 0x97, + 0xc7, 0xc3, 0x9c, 0xf5, 0xff, 0xbb, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x7e }, + }, { + /* 9 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88, + 0x90, 0x8f, 0x91, 0x95, 0x97, 0x94, + 0xc6, 0xc2, 0x9d, 0xf5, 0xff, 0xbb, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x73 }, + }, { + /* 10 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88, + 0x90, 0x8f, 0x91, 0x94, 0x97, 0x93, + 0xc6, 0xc2, 0x9e, 0xec, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x67 }, + }, { + /* 11 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88, + 0x8b, 0x8b, 0x8d, 0x90, 0x93, 0x92, + 0xc5, 0xc1, 0x9c, 0xf5, 0xff, 0xbb, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x56 }, + }, { + /* 12 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x89, 0x82, 0x84, 0x88, + 0x87, 0x86, 0x8a, 0x8c, 0x90, 0x8f, + 0xcd, 0xc9, 0xa1, 0xec, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x4a }, + }, { + /* 13 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x89, 0x82, 0x84, 0x88, + 0x87, 0x86, 0x8a, 0x8c, 0x90, 0x8e, + 0xc4, 0xbf, 0x9c, 0xf5, 0xff, 0xbb, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x3b }, + }, { + /* 14 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x89, 0x82, 0x84, 0x88, + 0x87, 0x86, 0x89, 0x8c, 0x90, 0x8f, + 0xc2, 0xbf, 0x9c, 0xec, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x35 }, + }, { + /* 15 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x89, 0x82, 0x84, 0x88, + 0x87, 0x86, 0x89, 0x8c, 0x90, 0x8f, + 0xb7, 0xb6, 0x96, 0xec, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x25 }, + }, { + /* 16 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x89, 0x82, 0x84, 0x88, + 0x88, 0x86, 0x89, 0x8c, 0x90, 0x8f, + 0xb7, 0xb6, 0x96, 0xec, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x20 }, + }, { + /* 17 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86, + 0x86, 0x85, 0x89, 0x88, 0x8c, 0x8e, + 0xbf, 0xbe, 0x9c, 0xec, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x05, 0x11 }, + }, { + /* 19 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86, + 0x87, 0x85, 0x89, 0x88, 0x8c, 0x8e, + 0xb3, 0xb4, 0x97, 0xeb, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0xf2 }, + }, { + /* 20 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86, + 0x87, 0x85, 0x89, 0x89, 0x8c, 0x8e, + 0xb3, 0xb4, 0x97, 0xeb, 0xff, 0xb7, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0xe4 }, + }, { + /* 21 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x8a, 0x88, 0x8b, 0x7d, 0x7e, 0x84, + 0x8c, 0x8a, 0x8c, 0x8e, 0x90, 0x8f, + 0xb6, 0xb6, 0x97, 0xe3, 0xff, 0xb3, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0xd5 }, + }, { + /* 22 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x97, + 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89, + 0x8a, 0x88, 0x8b, 0x81, 0x82, 0x86, + 0x87, 0x86, 0x88, 0x8e, 0x90, 0x8f, + 0xb6, 0xb6, 0x95, 0xe3, 0xff, 0xb3, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0xc5 }, + }, { + /* 24 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x97, + 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86, + 0x87, 0x86, 0x88, 0x8e, 0x90, 0x8f, + 0xb6, 0xb6, 0x94, 0xe3, 0xff, 0xb3, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0xa7 }, + }, { + /* 25 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x98, + 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86, + 0x87, 0x86, 0x87, 0x8e, 0x90, 0x8f, + 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0x95 }, + }, { + /* 27 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x99, + 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8a, 0x83, 0x86, 0x8a, + 0x88, 0x87, 0x87, 0x88, 0x8b, 0x8c, + 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0x76 }, + }, { + /* 29 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x99, + 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x83, 0x86, 0x89, + 0x88, 0x87, 0x88, 0x88, 0x8b, 0x8b, + 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0x54 }, + }, { + /* 30 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9a, + 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8a, 0x84, 0x86, 0x8a, + 0x87, 0x87, 0x87, 0x88, 0x8b, 0x8b, + 0xbf, 0xbf, 0x99, 0xda, 0xfa, 0xaf, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0x44 }, + }, { + /* 32 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9a, + 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a, + 0x89, 0x87, 0x8a, 0x84, 0x86, 0x8a, + 0x87, 0x87, 0x87, 0x89, 0x8b, 0x8b, + 0xbf, 0xbf, 0x98, 0xd2, 0xf2, 0xac, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x04, 0x1f }, + }, { + /* 34 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b, + 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8b, 0x87, 0x8b, 0x83, 0x86, 0x89, + 0x87, 0x87, 0x88, 0x88, 0x8b, 0x8a, + 0xbf, 0xbf, 0x98, 0xd2, 0xf2, 0xac, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x03, 0xff }, + }, { + /* 37 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b, + 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86, + 0x86, 0x86, 0x86, 0x8d, 0x90, 0x8d, + 0xc0, 0xbf, 0x9a, 0xd2, 0xf2, 0xac, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x03, 0xd3 }, + }, { + /* 39 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b, + 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86, + 0x87, 0x86, 0x87, 0x8d, 0x90, 0x8d, + 0xb6, 0xb6, 0x93, 0xda, 0xf9, 0xaf, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x03, 0xb3 }, + }, { + /* 41 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b, + 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x85, + 0x87, 0x86, 0x87, 0x8d, 0x90, 0x8d, + 0xb6, 0xb6, 0x94, 0xda, 0xf9, 0xaf, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x03, 0x93 }, + }, { + /* 44 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b, + 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86, + 0x87, 0x86, 0x86, 0x85, 0x87, 0x8a, + 0xbe, 0xbe, 0x99, 0xda, 0xf9, 0xaf, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x03, 0x66 }, + }, { + /* 47 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b, + 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86, + 0x88, 0x86, 0x87, 0x84, 0x87, 0x89, + 0xb4, 0xb4, 0x94, 0xe2, 0xff, 0xb3, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x03, 0x40 }, + }, { + /* 50 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c, + 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86, + 0x88, 0x86, 0x87, 0x84, 0x87, 0x89, + 0xb4, 0xb4, 0x95, 0xe2, 0xff, 0xb3, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x03, 0x0e }, + }, { + /* 53 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c, + 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86, + 0x88, 0x86, 0x87, 0x85, 0x87, 0x8a, + 0xb4, 0xb4, 0x96, 0xe2, 0xff, 0xb3, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0xe2 }, + }, { + /* 56 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c, + 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86, + 0x88, 0x86, 0x87, 0x85, 0x87, 0x8a, + 0xab, 0xab, 0x90, 0xdd, 0xf7, 0xaf, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0xb5 }, + }, { + /* 60 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c, + 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x82, 0x82, 0x87, + 0x83, 0x81, 0x84, 0x81, 0x84, 0x88, + 0xb3, 0xb3, 0x96, 0xcf, 0xe5, 0xa8, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x77 }, + }, { + /* 64 nits */ + { MCS_GAMMACTL, + 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c, + 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a, + 0x8a, 0x87, 0x8b, 0x82, 0x82, 0x87, + 0x83, 0x81, 0x84, 0x82, 0x84, 0x88, + 0xb2, 0xb3, 0x97, 0xcf, 0xe5, 0xa8, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x36 }, + }, { + /* 68 nits */ + { MCS_GAMMACTL, + 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x9d, + 0x88, 0x88, 0x89, 0x89, 0x89, 0x8b, + 0x8a, 0x88, 0x8b, 0x7f, 0x80, 0x86, + 0x88, 0x86, 0x87, 0x7d, 0x7f, 0x85, + 0xb2, 0xb3, 0x97, 0xcf, 0xe5, 0xa8, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 72 nits */ + { MCS_GAMMACTL, + 0x00, 0x9c, 0x00, 0xa9, 0x00, 0xa0, + 0x88, 0x88, 0x89, 0x88, 0x88, 0x8a, + 0x8c, 0x8a, 0x8d, 0x7f, 0x81, 0x85, + 0x84, 0x82, 0x84, 0x85, 0x87, 0x8a, + 0xaa, 0xab, 0x93, 0xcf, 0xe5, 0xa8, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 77 nits */ + { MCS_GAMMACTL, + 0x00, 0xa1, 0x00, 0xad, 0x00, 0xa5, + 0x89, 0x89, 0x8a, 0x88, 0x87, 0x89, + 0x8c, 0x89, 0x8d, 0x7f, 0x81, 0x85, + 0x84, 0x83, 0x84, 0x81, 0x83, 0x86, + 0xaa, 0xab, 0x93, 0xc0, 0xd3, 0xa1, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 82 nits */ + { MCS_GAMMACTL, + 0x00, 0xa5, 0x00, 0xb0, 0x00, 0xa9, + 0x88, 0x89, 0x89, 0x85, 0x86, 0x89, + 0x8a, 0x88, 0x8b, 0x82, 0x82, 0x87, + 0x81, 0x80, 0x82, 0x89, 0x8b, 0x8b, + 0xa2, 0xa3, 0x8e, 0xc0, 0xd3, 0xa1, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 87 nits */ + { MCS_GAMMACTL, + 0x00, 0xab, 0x00, 0xb4, 0x00, 0xad, + 0x88, 0x89, 0x8a, 0x84, 0x86, 0x88, + 0x8a, 0x88, 0x8b, 0x7f, 0x7f, 0x84, + 0x86, 0x84, 0x85, 0x85, 0x86, 0x88, + 0xa2, 0xa3, 0x8f, 0xc0, 0xd3, 0xa1, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 93 nits */ + { MCS_GAMMACTL, + 0x00, 0xaf, 0x00, 0xb9, 0x00, 0xb1, + 0x88, 0x89, 0x8a, 0x84, 0x85, 0x87, + 0x8a, 0x89, 0x8b, 0x7e, 0x7e, 0x83, + 0x87, 0x86, 0x86, 0x88, 0x8a, 0x89, + 0x9c, 0x9c, 0x8b, 0xc0, 0xd3, 0xa1, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 98 nits */ + { MCS_GAMMACTL, + 0x00, 0xb3, 0x00, 0xbc, 0x00, 0xb5, + 0x88, 0x88, 0x88, 0x84, 0x84, 0x86, + 0x8a, 0x88, 0x8a, 0x7f, 0x7f, 0x84, + 0x84, 0x83, 0x84, 0x88, 0x8a, 0x89, + 0x9c, 0x9c, 0x8b, 0xc0, 0xd3, 0xa1, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 105 nits */ + { MCS_GAMMACTL, + 0x00, 0xb7, 0x00, 0xc0, 0x00, 0xba, + 0x87, 0x87, 0x88, 0x85, 0x85, 0x87, + 0x89, 0x88, 0x89, 0x7f, 0x7f, 0x83, + 0x81, 0x80, 0x82, 0x88, 0x8a, 0x89, + 0x9c, 0x9c, 0x8c, 0xb2, 0xc2, 0x9a, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 111 nits */ + { MCS_GAMMACTL, + 0x00, 0xbb, 0x00, 0xc3, 0x00, 0xbe, + 0x87, 0x87, 0x88, 0x85, 0x85, 0x88, + 0x88, 0x87, 0x89, 0x80, 0x80, 0x84, + 0x81, 0x81, 0x82, 0x85, 0x86, 0x87, + 0x9c, 0x9c, 0x8b, 0xb2, 0xc2, 0x9a, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x15 }, + }, { + /* 119 nits */ + { MCS_GAMMACTL, + 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4, + 0x87, 0x87, 0x88, 0x82, 0x84, 0x86, + 0x87, 0x85, 0x87, 0x82, 0x81, 0x84, + 0x83, 0x82, 0x83, 0x80, 0x81, 0x84, + 0x9c, 0x9c, 0x8c, 0xb2, 0xc2, 0x9a, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x02, 0x14 }, + }, { + /* 126 nits */ + { MCS_GAMMACTL, + 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4, + 0x87, 0x87, 0x88, 0x82, 0x84, 0x86, + 0x87, 0x85, 0x87, 0x82, 0x81, 0x84, + 0x83, 0x82, 0x83, 0x80, 0x81, 0x84, + 0x9c, 0x9c, 0x8d, 0xb2, 0xc2, 0x9a, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x01, 0xde }, + }, { + /* 134 nits */ + { MCS_GAMMACTL, + 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4, + 0x87, 0x87, 0x88, 0x82, 0x84, 0x86, + 0x87, 0x85, 0x87, 0x82, 0x81, 0x84, + 0x83, 0x82, 0x83, 0x80, 0x81, 0x84, + 0x9c, 0x9c, 0x8d, 0xa4, 0xb0, 0x92, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x01, 0x94 }, + }, { + /* 143 nits */ + { MCS_GAMMACTL, + 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3, + 0x87, 0x87, 0x88, 0x82, 0x84, 0x86, + 0x87, 0x85, 0x87, 0x82, 0x81, 0x85, + 0x83, 0x82, 0x83, 0x80, 0x81, 0x84, + 0x92, 0x92, 0x89, 0xab, 0xb6, 0x96, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x01, 0x46 }, + }, { + /* 152 nits */ + { MCS_GAMMACTL, + 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3, + 0x87, 0x87, 0x88, 0x83, 0x84, 0x86, + 0x87, 0x85, 0x87, 0x81, 0x81, 0x85, + 0x84, 0x82, 0x83, 0x80, 0x81, 0x83, + 0x92, 0x92, 0x8b, 0xab, 0xb6, 0x96, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0xfa }, + }, { + /* 162 nits */ + { MCS_GAMMACTL, + 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3, + 0x87, 0x87, 0x88, 0x83, 0x84, 0x86, + 0x87, 0x85, 0x87, 0x81, 0x81, 0x84, + 0x84, 0x82, 0x84, 0x80, 0x81, 0x83, + 0x92, 0x92, 0x8b, 0x9d, 0xa4, 0x8e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0xac }, + }, { + /* 172 nits */ + { MCS_GAMMACTL, + 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3, + 0x87, 0x87, 0x88, 0x83, 0x84, 0x86, + 0x87, 0x85, 0x87, 0x81, 0x81, 0x84, + 0x84, 0x82, 0x83, 0x80, 0x81, 0x84, + 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x57 }, + }, { + /* 183 nits */ + { MCS_GAMMACTL, + 0x00, 0xc2, 0x00, 0xca, 0x00, 0xc5, + 0x86, 0x86, 0x87, 0x85, 0x84, 0x87, + 0x87, 0x86, 0x88, 0x7e, 0x80, 0x83, + 0x84, 0x82, 0x83, 0x80, 0x81, 0x83, + 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 195 nits */ + { MCS_GAMMACTL, + 0x00, 0xc7, 0x00, 0xce, 0x00, 0xc9, + 0x86, 0x87, 0x86, 0x83, 0x83, 0x85, + 0x85, 0x84, 0x86, 0x82, 0x82, 0x85, + 0x80, 0x80, 0x81, 0x81, 0x81, 0x84, + 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 207 nits */ + { MCS_GAMMACTL, + 0x00, 0xcc, 0x00, 0xd2, 0x00, 0xce, + 0x86, 0x86, 0x87, 0x81, 0x83, 0x84, + 0x84, 0x82, 0x84, 0x83, 0x83, 0x85, + 0x81, 0x81, 0x82, 0x7c, 0x7d, 0x81, + 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 220 nits */ + { MCS_GAMMACTL, + 0x00, 0xd1, 0x00, 0xd6, 0x00, 0xd3, + 0x86, 0x86, 0x86, 0x81, 0x83, 0x84, + 0x84, 0x82, 0x84, 0x80, 0x80, 0x83, + 0x81, 0x81, 0x82, 0x7c, 0x7d, 0x81, + 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 234 nits */ + { MCS_GAMMACTL, + 0x00, 0xd6, 0x00, 0xdb, 0x00, 0xd8, + 0x85, 0x85, 0x85, 0x81, 0x83, 0x84, + 0x83, 0x82, 0x83, 0x80, 0x80, 0x82, + 0x84, 0x82, 0x83, 0x79, 0x79, 0x7e, + 0x93, 0x92, 0x8d, 0x9d, 0xa4, 0x8e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 249 nits */ + { MCS_GAMMACTL, + 0x00, 0xdc, 0x00, 0xe0, 0x00, 0xdd, + 0x84, 0x84, 0x84, 0x81, 0x82, 0x83, + 0x84, 0x82, 0x84, 0x7f, 0x7f, 0x82, + 0x81, 0x80, 0x81, 0x80, 0x81, 0x82, + 0x8c, 0x8c, 0x86, 0x9d, 0xa4, 0x8e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 265 nits */ + { MCS_GAMMACTL, + 0x00, 0xe2, 0x00, 0xe5, 0x00, 0xe3, + 0x83, 0x83, 0x83, 0x81, 0x82, 0x83, + 0x82, 0x82, 0x83, 0x82, 0x81, 0x83, + 0x7f, 0x7e, 0x80, 0x7c, 0x7d, 0x80, + 0x8c, 0x8c, 0x86, 0x8e, 0x92, 0x87, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 282 nits */ + { MCS_GAMMACTL, + 0x00, 0xe8, 0x00, 0xea, 0x00, 0xe9, + 0x83, 0x83, 0x83, 0x80, 0x82, 0x82, + 0x81, 0x82, 0x82, 0x82, 0x81, 0x82, + 0x81, 0x80, 0x81, 0x80, 0x80, 0x81, + 0x85, 0x85, 0x83, 0x8e, 0x92, 0x87, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 300 nits */ + { MCS_GAMMACTL, + 0x00, 0xed, 0x00, 0xef, 0x00, 0xed, + 0x81, 0x82, 0x81, 0x81, 0x81, 0x82, + 0x82, 0x82, 0x83, 0x80, 0x80, 0x81, + 0x81, 0x81, 0x82, 0x83, 0x83, 0x83, + 0x80, 0x80, 0x7f, 0x8e, 0x92, 0x87, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 316 nits */ + { MCS_GAMMACTL, + 0x00, 0xf3, 0x00, 0xf4, 0x00, 0xf3, + 0x80, 0x81, 0x80, 0x81, 0x81, 0x81, + 0x82, 0x82, 0x82, 0x81, 0x80, 0x81, + 0x82, 0x82, 0x83, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x7f, 0x80, 0x80, 0x80, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 333 nits */ + { MCS_GAMMACTL, + 0x00, 0xf8, 0x00, 0xf8, 0x00, 0xf8, + 0x80, 0x81, 0x80, 0x81, 0x80, 0x81, + 0x81, 0x82, 0x82, 0x81, 0x80, 0x81, + 0x83, 0x83, 0x83, 0x7e, 0x7d, 0x7e, + 0x80, 0x80, 0x7f, 0x80, 0x80, 0x80, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 360 nits */ + { MCS_GAMMACTL, + 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 378 nits */ + { MCS_GAMMACTL, + 0x01, 0x04, 0x01, 0x03, 0x01, 0x04, + 0x7f, 0x7f, 0x80, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x80, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7f, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 395 nits */ + { MCS_GAMMACTL, + 0x01, 0x09, 0x01, 0x07, 0x01, 0x08, + 0x7e, 0x7f, 0x80, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x80, 0x7f, 0x7f, 0x7e, 0x7e, 0x7e, + 0x80, 0x80, 0x7f, 0x7e, 0x7e, 0x7f, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 413 nits */ + { MCS_GAMMACTL, + 0x01, 0x0e, 0x01, 0x0b, 0x01, 0x0c, + 0x7e, 0x7f, 0x80, 0x7e, 0x7e, 0x7e, + 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, + 0x80, 0x7f, 0x7f, 0x7d, 0x7d, 0x7d, + 0x80, 0x80, 0x7f, 0x7d, 0x7e, 0x7e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 430 nits */ + { MCS_GAMMACTL, + 0x01, 0x13, 0x01, 0x0f, 0x01, 0x10, + 0x7d, 0x7f, 0x80, 0x7e, 0x7e, 0x7e, + 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, + 0x80, 0x7f, 0x7f, 0x7d, 0x7d, 0x7d, + 0x80, 0x80, 0x7f, 0x7c, 0x7d, 0x7e, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 448 nits */ + { MCS_GAMMACTL, + 0x01, 0x18, 0x01, 0x13, 0x01, 0x14, + 0x7c, 0x7e, 0x80, 0x7e, 0x7e, 0x7e, + 0x7e, 0x7e, 0x7d, 0x7e, 0x7f, 0x7e, + 0x80, 0x7f, 0x7f, 0x7c, 0x7c, 0x7c, + 0x80, 0x80, 0x7e, 0x7b, 0x7c, 0x7d, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 465 nits */ + { MCS_GAMMACTL, + 0x01, 0x1d, 0x01, 0x17, 0x01, 0x18, + 0x7c, 0x7e, 0x80, 0x7d, 0x7d, 0x7d, + 0x7d, 0x7d, 0x7d, 0x7e, 0x7f, 0x7e, + 0x80, 0x7f, 0x7f, 0x7b, 0x7b, 0x7b, + 0x80, 0x80, 0x7e, 0x7a, 0x7c, 0x7d, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 483 nits */ + { MCS_GAMMACTL, + 0x01, 0x22, 0x01, 0x1b, 0x01, 0x1c, + 0x7b, 0x7e, 0x80, 0x7d, 0x7d, 0x7d, + 0x7d, 0x7d, 0x7c, 0x7e, 0x7f, 0x7e, + 0x80, 0x7f, 0x7f, 0x7a, 0x7a, 0x7a, + 0x80, 0x80, 0x7e, 0x79, 0x7b, 0x7c, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, { + /* 500 nits */ + { MCS_GAMMACTL, + 0x01, 0x27, 0x01, 0x1f, 0x01, 0x20, + 0x7b, 0x7e, 0x80, 0x7d, 0x7d, 0x7d, + 0x7d, 0x7d, 0x7c, 0x7e, 0x7f, 0x7e, + 0x80, 0x7f, 0x7f, 0x7a, 0x7a, 0x7a, + 0x81, 0x80, 0x7e, 0x79, 0x7b, 0x7c, + 0x00, 0x00, 0x00, }, + { MCS_AIDCTL, 0x00, 0x10 }, + }, +}; + +struct s6e8aa5x01_ams561ra01_ctx { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct backlight_device *bl; + struct gpio_desc *reset_gpio; + struct regulator_bulk_data *supplies; + u32 nr_supplies; +}; + +static const struct regulator_bulk_data s6e8aa5x01_ams561ra01_supplies[] = { + { .supply = "vdd" }, + { .supply = "vci" }, +}; + +static inline struct s6e8aa5x01_ams561ra01_ctx *to_ctx(struct drm_panel *panel) +{ + return container_of(panel, struct s6e8aa5x01_ams561ra01_ctx, panel); +} + +static int s6e8aa5x01_ams561ra01_update_status(struct backlight_device *bl) +{ + struct s6e8aa5x01_ams561ra01_ctx *ctx = bl_get_data(bl); + struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi }; + u16 lvl = backlight_get_brightness(bl); + + if (!ctx->panel.enabled) + return 0; + + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0x5a, 0x5a); + + mipi_dsi_dcs_write_buffer_multi(&dsi, + s6e8aa5x01_ams561ra01_cmds[lvl].gamma, + GAMMA_CMD_LEN); + mipi_dsi_dcs_write_buffer_multi(&dsi, + s6e8aa5x01_ams561ra01_cmds[lvl].aid, + AID_CMD_LEN); + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_GAMMAUPD, 0x03); + + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0xa5, 0xa5); + + return dsi.accum_err; +} + +static int s6e8aa5x01_ams561ra01_prepare(struct drm_panel *panel) +{ + struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ctx->nr_supplies, ctx->supplies); + if (ret < 0) { + dev_err(dev, "failed to enable regulators: %d\n", ret); + return ret; + } + + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); + + return 0; +} + +static int s6e8aa5x01_ams561ra01_unprepare(struct drm_panel *panel) +{ + struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(5000, 6000); + + regulator_bulk_disable(ctx->nr_supplies, ctx->supplies); + + return 0; +} + +static int s6e8aa5x01_ams561ra01_enable(struct drm_panel *panel) +{ + struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel); + struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi }; + + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi); + mipi_dsi_msleep(&dsi, 100); + + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0x5a, 0x5a); + + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_PENTILE, 0xd8, 0xd8, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_PCD, 0x5c); + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ERRFLAG, 0xed, 0xc7, 0x23, 0x67); + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_DISPCTL, 0x0c, 0x0c, 0xb9, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_LTPSCTL, + 0x00, 0x45, 0x10, 0x10, 0x08, 0x32, 0x54, 0x00, + 0x00, 0x00, 0x00, 0x07, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x48, 0x5e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, + 0x08, 0x05, 0x2a, 0x54, 0x03, 0xcc, 0x00, 0xff, + 0xfb, 0x03, 0x0d, 0x00, 0x11, 0x0f, 0x02, 0x03, + 0x0b, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x13, 0x13, 0x13, 0x00, 0x02, 0x03, 0x0b, + 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x13); + + mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0xa5, 0xa5); + + mipi_dsi_dcs_set_display_on_multi(&dsi); + + return dsi.accum_err; +} + +static int s6e8aa5x01_ams561ra01_disable(struct drm_panel *panel) +{ + struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel); + struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi }; + + mipi_dsi_dcs_set_display_off_multi(&dsi); + mipi_dsi_msleep(&dsi, 100); + + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi); + mipi_dsi_msleep(&dsi, 150); + + return dsi.accum_err; +} + +static const struct drm_display_mode s6e8aa5x01_ams561ra01_mode = { + .clock = (720 + 62 + 2 + 26) * (1480 + 12 + 2 + 10) * 60 / 1000, + .hdisplay = 720, + .hsync_start = 720 + 62, + .hsync_end = 720 + 62 + 2, + .htotal = 720 + 62 + 2 + 26, + .vdisplay = 1480, + .vsync_start = 1480 + 12, + .vsync_end = 1480 + 12 + 2, + .vtotal = 1480 + 12 + 2 + 10, + .width_mm = 62, + .height_mm = 128, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static int s6e8aa5x01_ams561ra01_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, + &s6e8aa5x01_ams561ra01_mode); +} + +static const struct backlight_ops s6e8aa5x01_ams561ra01_bl_ops = { + .update_status = s6e8aa5x01_ams561ra01_update_status, +}; + +static const struct drm_panel_funcs s6e8aa5x01_ams561ra01_panel_funcs = { + .prepare = s6e8aa5x01_ams561ra01_prepare, + .unprepare = s6e8aa5x01_ams561ra01_unprepare, + .enable = s6e8aa5x01_ams561ra01_enable, + .disable = s6e8aa5x01_ams561ra01_disable, + .get_modes = s6e8aa5x01_ams561ra01_get_modes, +}; + +static int s6e8aa5x01_ams561ra01_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct s6e8aa5x01_ams561ra01_ctx *ctx; + int ret; + + ctx = devm_drm_panel_alloc(dev, struct s6e8aa5x01_ams561ra01_ctx, panel, + &s6e8aa5x01_ams561ra01_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + ctx->nr_supplies = ARRAY_SIZE(s6e8aa5x01_ams561ra01_supplies); + ret = devm_regulator_bulk_get_const(dev, ctx->nr_supplies, + s6e8aa5x01_ams561ra01_supplies, + &ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "failed to get reset-gpios\n"); + + ctx->bl = devm_backlight_device_register(dev, dev_name(dev), dev, ctx, + &s6e8aa5x01_ams561ra01_bl_ops, + NULL); + if (IS_ERR(ctx->bl)) + return dev_err_probe(dev, PTR_ERR(ctx->bl), + "failed to register backlight device\n"); + + ctx->bl->props.type = BACKLIGHT_PLATFORM; + ctx->bl->props.brightness = ARRAY_SIZE(s6e8aa5x01_ams561ra01_cmds) - 1; + ctx->bl->props.max_brightness = ctx->bl->props.brightness; + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_VIDEO_NO_HFP; + + ctx->panel.prepare_prev_first = true; + drm_panel_add(&ctx->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "failed to attach to DSI host\n"); + } + + return 0; +} + +static void s6e8aa5x01_ams561ra01_remove(struct mipi_dsi_device *dsi) +{ + struct s6e8aa5x01_ams561ra01_ctx *ctx = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id s6e8aa5x01_ams561ra01_of_device_id[] = { + { .compatible = "samsung,s6e8aa5x01-ams561ra01" }, + { } +}; +MODULE_DEVICE_TABLE(of, s6e8aa5x01_ams561ra01_of_device_id); + +static struct mipi_dsi_driver s6e8aa5x01_ams561ra01_dsi_driver = { + .probe = s6e8aa5x01_ams561ra01_probe, + .remove = s6e8aa5x01_ams561ra01_remove, + .driver = { + .name = "panel-samsung-s6e8aa5x01-ams561ra01", + .of_match_table = s6e8aa5x01_ams561ra01_of_device_id, + }, +}; +module_mipi_dsi_driver(s6e8aa5x01_ams561ra01_dsi_driver); + +MODULE_AUTHOR("Kaustabh Chakraborty <kauschluss@disroot.org>"); +MODULE_DESCRIPTION("Samsung AMS561RA01 Panel with S6E8AA5X01 Controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 3333d4a07504..0019de93be1b 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -3716,6 +3716,29 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct drm_display_mode olimex_lcd_olinuxino_5cts_mode = { + .clock = 33300, + .hdisplay = 800, + .hsync_start = 800 + 210, + .hsync_end = 800 + 210 + 20, + .htotal = 800 + 210 + 20 + 26, + .vdisplay = 480, + .vsync_start = 480 + 22, + .vsync_end = 480 + 22 + 10, + .vtotal = 480 + 22 + 10 + 13, +}; + +static const struct panel_desc olimex_lcd_olinuxino_5cts = { + .modes = &olimex_lcd_olinuxino_5cts_mode, + .num_modes = 1, + .size = { + .width = 154, + .height = 86, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, +}; + + static const struct display_timing ontat_kd50g21_40nt_a1_timing = { .pixelclock = { 30000000, 30000000, 50000000 }, .hactive = { 800, 800, 800 }, @@ -5279,6 +5302,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "olimex,lcd-olinuxino-43-ts", .data = &olimex_lcd_olinuxino_43ts, }, { + .compatible = "olimex,lcd-olinuxino-5-cts", + .data = &olimex_lcd_olinuxino_5cts, + }, { .compatible = "ontat,kd50g21-40nt-a1", .data = &ontat_kd50g21_40nt_a1, }, { diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c index 1a007a244d84..6c348fe28955 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Driver for panels based on Sitronix ST7703 controller, souch as: + * Driver for panels based on Sitronix ST7703 controller, such as: * * - Rocktech jh057n00900 5.5" MIPI-DSI panel * diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c index 4854437e2899..6d40b9ddfe02 100644 --- a/drivers/gpu/drm/panel/panel-summit.c +++ b/drivers/gpu/drm/panel/panel-summit.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/backlight.h> +#include <linux/mod_devicetable.h> +#include <linux/property.h> #include <drm/drm_device.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_mode.h> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index bb73f2a68a12..85d6289a6eda 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -432,7 +432,7 @@ static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo, if (!refcount) return; - resident_size = bo->base.pages ? bo->base.base.size : 0; + resident_size = panfrost_gem_rss(&bo->base.base); snprintf(creator_info, sizeof(creator_info), "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 563f16bae543..0dd62e8b2fa7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -203,7 +203,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); panfrost_gem_mapping_put(perfcnt->mapping); perfcnt->mapping = NULL; - pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); return 0; @@ -279,7 +278,6 @@ void panfrost_perfcnt_close(struct drm_file *file_priv) if (perfcnt->user == pfile) panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); - pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); } diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile index 15294719b09c..02db21748c12 100644 --- a/drivers/gpu/drm/panthor/Makefile +++ b/drivers/gpu/drm/panthor/Makefile @@ -8,6 +8,7 @@ panthor-y := \ panthor_gem.o \ panthor_gpu.o \ panthor_heap.o \ + panthor_hw.o \ panthor_mmu.o \ panthor_sched.o diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index f0b2da5b2b96..81df49880bd8 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -18,6 +18,7 @@ #include "panthor_device.h" #include "panthor_fw.h" #include "panthor_gpu.h" +#include "panthor_hw.h" #include "panthor_mmu.h" #include "panthor_regs.h" #include "panthor_sched.h" @@ -244,6 +245,10 @@ int panthor_device_init(struct panthor_device *ptdev) return ret; } + ret = panthor_hw_init(ptdev); + if (ret) + goto err_rpm_put; + ret = panthor_gpu_init(ptdev); if (ret) goto err_rpm_put; diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 1116f2d2826e..9256806eb662 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1103,14 +1103,15 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, ret = group_priority_permit(file, args->priority); if (ret) - return ret; + goto out; ret = panthor_group_create(pfile, args, queue_args); - if (ret >= 0) { - args->group_handle = ret; - ret = 0; - } + if (ret < 0) + goto out; + args->group_handle = ret; + ret = 0; +out: kvfree(queue_args); return ret; } @@ -1400,14 +1401,9 @@ panthor_open(struct drm_device *ddev, struct drm_file *file) struct panthor_file *pfile; int ret; - if (!try_module_get(THIS_MODULE)) - return -EINVAL; - pfile = kzalloc(sizeof(*pfile), GFP_KERNEL); - if (!pfile) { - ret = -ENOMEM; - goto err_put_mod; - } + if (!pfile) + return -ENOMEM; pfile->ptdev = ptdev; pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET; @@ -1439,9 +1435,6 @@ err_destroy_vm_pool: err_free_file: kfree(pfile); - -err_put_mod: - module_put(THIS_MODULE); return ret; } @@ -1454,7 +1447,6 @@ panthor_postclose(struct drm_device *ddev, struct drm_file *file) panthor_vm_pool_destroy(pfile); kfree(pfile); - module_put(THIS_MODULE); } static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = { @@ -1555,6 +1547,7 @@ static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file) } static const struct file_operations panthor_drm_driver_fops = { + .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 36f1034839c2..9bf06e55eaee 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -1402,3 +1402,8 @@ err_unplug_fw: } MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch10.10/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin"); diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index cb7a335e07d7..db69449a5be0 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -35,40 +35,9 @@ struct panthor_gpu { /** @reqs_acked: GPU request wait queue. */ wait_queue_head_t reqs_acked; -}; - -/** - * struct panthor_model - GPU model description - */ -struct panthor_model { - /** @name: Model name. */ - const char *name; - - /** @arch_major: Major version number of architecture. */ - u8 arch_major; - /** @product_major: Major version number of product. */ - u8 product_major; -}; - -/** - * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified - * by a combination of the major architecture version and the major product - * version. - * @_name: Name for the GPU model. - * @_arch_major: Architecture major. - * @_product_major: Product major. - */ -#define GPU_MODEL(_name, _arch_major, _product_major) \ -{\ - .name = __stringify(_name), \ - .arch_major = _arch_major, \ - .product_major = _product_major, \ -} - -static const struct panthor_model gpu_models[] = { - GPU_MODEL(g610, 10, 7), - {}, + /** @cache_flush_lock: Lock to serialize cache flushes */ + struct mutex cache_flush_lock; }; #define GPU_INTERRUPTS_MASK \ @@ -83,66 +52,6 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev) ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE); } -static void panthor_gpu_init_info(struct panthor_device *ptdev) -{ - const struct panthor_model *model; - u32 arch_major, product_major; - u32 major, minor, status; - unsigned int i; - - ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); - ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); - ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); - ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); - ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES); - ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES); - ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES); - ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES); - ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES); - ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS); - ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE); - ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE); - ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES); - for (i = 0; i < 4; i++) - ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i)); - - ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT); - - ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); - ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); - ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); - - arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id); - product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id); - major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id); - minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id); - status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id); - - for (model = gpu_models; model->name; model++) { - if (model->arch_major == arch_major && - model->product_major == product_major) - break; - } - - drm_info(&ptdev->base, - "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", - model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16, - major, minor, status); - - drm_info(&ptdev->base, - "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x", - ptdev->gpu_info.l2_features, - ptdev->gpu_info.tiler_features, - ptdev->gpu_info.mem_features, - ptdev->gpu_info.mmu_features, - ptdev->gpu_info.as_present); - - drm_info(&ptdev->base, - "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx", - ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present, - ptdev->gpu_info.tiler_present); -} - static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status) { gpu_write(ptdev, GPU_INT_CLEAR, status); @@ -204,8 +113,8 @@ int panthor_gpu_init(struct panthor_device *ptdev) spin_lock_init(&gpu->reqs_lock); init_waitqueue_head(&gpu->reqs_acked); + mutex_init(&gpu->cache_flush_lock); ptdev->gpu = gpu; - panthor_gpu_init_info(ptdev); dma_set_max_seg_size(ptdev->base.dev, UINT_MAX); pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features); @@ -353,6 +262,9 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev, bool timedout = false; unsigned long flags; + /* Serialize cache flush operations. */ + guard(mutex)(&ptdev->gpu->cache_flush_lock); + spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags); if (!drm_WARN_ON(&ptdev->base, ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) { diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c new file mode 100644 index 000000000000..4f2858114e5e --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_hw.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* Copyright 2025 ARM Limited. All rights reserved. */ + +#include "panthor_device.h" +#include "panthor_hw.h" +#include "panthor_regs.h" + +#define GPU_PROD_ID_MAKE(arch_major, prod_major) \ + (((arch_major) << 24) | (prod_major)) + +static char *get_gpu_model_name(struct panthor_device *ptdev) +{ + const u32 gpu_id = ptdev->gpu_info.gpu_id; + const u32 product_id = GPU_PROD_ID_MAKE(GPU_ARCH_MAJOR(gpu_id), + GPU_PROD_MAJOR(gpu_id)); + const bool ray_intersection = !!(ptdev->gpu_info.gpu_features & + GPU_FEATURES_RAY_INTERSECTION); + const u8 shader_core_count = hweight64(ptdev->gpu_info.shader_present); + + switch (product_id) { + case GPU_PROD_ID_MAKE(10, 2): + return "Mali-G710"; + case GPU_PROD_ID_MAKE(10, 3): + return "Mali-G510"; + case GPU_PROD_ID_MAKE(10, 4): + return "Mali-G310"; + case GPU_PROD_ID_MAKE(10, 7): + return "Mali-G610"; + case GPU_PROD_ID_MAKE(11, 2): + if (shader_core_count > 10 && ray_intersection) + return "Mali-G715-Immortalis"; + else if (shader_core_count >= 7) + return "Mali-G715"; + + fallthrough; + case GPU_PROD_ID_MAKE(11, 3): + return "Mali-G615"; + case GPU_PROD_ID_MAKE(12, 0): + if (shader_core_count >= 10 && ray_intersection) + return "Mali-G720-Immortalis"; + else if (shader_core_count >= 6) + return "Mali-G720"; + + fallthrough; + case GPU_PROD_ID_MAKE(12, 1): + return "Mali-G620"; + case GPU_PROD_ID_MAKE(13, 0): + if (shader_core_count >= 10 && ray_intersection) + return "Mali-G925-Immortalis"; + else if (shader_core_count >= 6) + return "Mali-G725"; + + fallthrough; + case GPU_PROD_ID_MAKE(13, 1): + return "Mali-G625"; + } + + return "(Unknown Mali GPU)"; +} + +static void panthor_gpu_info_init(struct panthor_device *ptdev) +{ + unsigned int i; + + ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); + ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); + ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); + ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); + ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES); + ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES); + ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES); + ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES); + ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES); + ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS); + ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE); + ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE); + ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES); + for (i = 0; i < 4; i++) + ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i)); + + ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT); + + ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); + ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); + ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); + + /* Introduced in arch 11.x */ + ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES); +} + +static void panthor_hw_info_init(struct panthor_device *ptdev) +{ + u32 major, minor, status; + + panthor_gpu_info_init(ptdev); + + major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id); + minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id); + status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id); + + drm_info(&ptdev->base, + "%s id 0x%x major 0x%x minor 0x%x status 0x%x", + get_gpu_model_name(ptdev), ptdev->gpu_info.gpu_id >> 16, + major, minor, status); + + drm_info(&ptdev->base, + "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x", + ptdev->gpu_info.l2_features, + ptdev->gpu_info.tiler_features, + ptdev->gpu_info.mem_features, + ptdev->gpu_info.mmu_features, + ptdev->gpu_info.as_present); + + drm_info(&ptdev->base, + "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx", + ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present, + ptdev->gpu_info.tiler_present); +} + +int panthor_hw_init(struct panthor_device *ptdev) +{ + panthor_hw_info_init(ptdev); + + return 0; +} diff --git a/drivers/gpu/drm/panthor/panthor_hw.h b/drivers/gpu/drm/panthor/panthor_hw.h new file mode 100644 index 000000000000..0af6acc6aa6a --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_hw.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ +/* Copyright 2025 ARM Limited. All rights reserved. */ + +#ifndef __PANTHOR_HW_H__ +#define __PANTHOR_HW_H__ + +struct panthor_device; + +int panthor_hw_init(struct panthor_device *ptdev); + +#endif /* __PANTHOR_HW_H__ */ diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 4140f697ba5a..2003b91a8409 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -29,6 +29,7 @@ #include "panthor_device.h" #include "panthor_gem.h" +#include "panthor_gpu.h" #include "panthor_heap.h" #include "panthor_mmu.h" #include "panthor_regs.h" @@ -568,6 +569,35 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr, write_cmd(ptdev, as_nr, AS_COMMAND_LOCK); } +static int mmu_hw_do_flush_on_gpu_ctrl(struct panthor_device *ptdev, int as_nr, + u32 op) +{ + const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV; + u32 lsc_flush_op = 0; + int ret; + + if (op == AS_COMMAND_FLUSH_MEM) + lsc_flush_op = CACHE_CLEAN | CACHE_INV; + + ret = wait_ready(ptdev, as_nr); + if (ret) + return ret; + + ret = panthor_gpu_flush_caches(ptdev, l2_flush_op, lsc_flush_op, 0); + if (ret) + return ret; + + /* + * Explicitly unlock the region as the AS is not unlocked automatically + * at the end of the GPU_CONTROL cache flush command, unlike + * AS_COMMAND_FLUSH_MEM or AS_COMMAND_FLUSH_PT. + */ + write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK); + + /* Wait for the unlock command to complete */ + return wait_ready(ptdev, as_nr); +} + static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, u64 iova, u64 size, u32 op) { @@ -585,6 +615,9 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, if (op != AS_COMMAND_UNLOCK) lock_region(ptdev, as_nr, iova, size); + if (op == AS_COMMAND_FLUSH_MEM || op == AS_COMMAND_FLUSH_PT) + return mmu_hw_do_flush_on_gpu_ctrl(ptdev, as_nr, op); + /* Run the MMU operation */ write_cmd(ptdev, as_nr, op); @@ -2169,15 +2202,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, mutex_lock(&vm->op_lock); vm->op_ctx = op; switch (op_type) { - case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: + case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: { + const struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->va.addr, + .map.va.range = op->va.range, + .map.gem.obj = op->map.vm_bo->obj, + .map.gem.offset = op->map.bo_offset, + }; + if (vm->unusable) { ret = -EINVAL; break; } - ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range, - op->map.vm_bo->obj, op->map.bo_offset); + ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req); break; + } case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range); diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h index 48bbfd40138c..8bee76d01bf8 100644 --- a/drivers/gpu/drm/panthor/panthor_regs.h +++ b/drivers/gpu/drm/panthor/panthor_regs.h @@ -70,6 +70,9 @@ #define GPU_PWR_OVERRIDE0 0x54 #define GPU_PWR_OVERRIDE1 0x58 +#define GPU_FEATURES 0x60 +#define GPU_FEATURES_RAY_INTERSECTION BIT(2) + #define GPU_TIMESTAMP_OFFSET 0x88 #define GPU_CYCLE_COUNT 0x90 #define GPU_TIMESTAMP 0x98 diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 8f17394cc82a..ba5dc3e443d9 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -641,6 +641,15 @@ struct panthor_group { size_t kbo_sizes; } fdinfo; + /** @task_info: Info of current->group_leader that created the group. */ + struct { + /** @task_info.pid: pid of current->group_leader */ + pid_t pid; + + /** @task_info.comm: comm of current->group_leader */ + char comm[TASK_COMM_LEN]; + } task_info; + /** @state: Group state. */ enum panthor_group_state state; @@ -1355,8 +1364,12 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev, fatal = cs_iface->output->fatal; info = cs_iface->output->fatal_info; - if (group) + if (group) { + drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n", + group->task_info.pid, group->task_info.comm); + group->fatal_queues |= BIT(cs_id); + } if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) { /* If this exception is unrecoverable, queue a reset, and make @@ -1416,6 +1429,11 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev, spin_unlock(&queue->fence_ctx.lock); } + if (group) { + drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n", + group->task_info.pid, group->task_info.comm); + } + drm_warn(&ptdev->base, "CSG slot %d CS slot: %d\n" "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n" @@ -1632,11 +1650,15 @@ csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 c lockdep_assert_held(&sched->lock); - drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id); - group = csg_slot->group; - if (!drm_WARN_ON(&ptdev->base, !group)) + if (!drm_WARN_ON(&ptdev->base, !group)) { + drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n", + group->task_info.pid, group->task_info.comm); + group->timedout = true; + } + + drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id); sched_queue_delayed_work(sched, tick, 0); } @@ -3218,7 +3240,8 @@ queue_timedout_job(struct drm_sched_job *sched_job) struct panthor_scheduler *sched = ptdev->scheduler; struct panthor_queue *queue = group->queues[job->queue_idx]; - drm_warn(&ptdev->base, "job timeout\n"); + drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n", + group->task_info.pid, group->task_info.comm, job->done_fence->seqno); drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress)); @@ -3389,6 +3412,14 @@ err_free_queue: return ERR_PTR(ret); } +static void group_init_task_info(struct panthor_group *group) +{ + struct task_struct *task = current->group_leader; + + group->task_info.pid = task->pid; + get_task_comm(group->task_info.comm, task); +} + static void add_group_kbo_sizes(struct panthor_device *ptdev, struct panthor_group *group) { @@ -3540,6 +3571,8 @@ int panthor_group_create(struct panthor_file *pfile, add_group_kbo_sizes(group->ptdev, group); spin_lock_init(&group->fdinfo.lock); + group_init_task_info(group); + return gid; err_put_group: diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b4bf5dfeea2d..4dc77c398617 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = { int radeon_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; fb->obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs); if (ret) { fb->obj[0] = NULL; @@ -1341,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj); + ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj); if (ret) { kfree(fb); drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index e3a481bbee7b..dc81b0c2dbff 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj) } static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, + const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { - const struct drm_format_info *info; struct radeon_device *rdev = fb_helper->dev->dev_private; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; @@ -67,8 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, int height = mode_cmd->height; u32 cpp; - info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format, - mode_cmd->modifier[0]); cpp = info->cpp[0]; /* need to align pitch with crtc limits */ @@ -206,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { struct radeon_device *rdev = fb_helper->dev->dev_private; + const struct drm_format_info *format_info; struct drm_mode_fb_cmd2 mode_cmd = { }; struct fb_info *info; struct drm_gem_object *gobj; @@ -224,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); - ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj); + format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format, + mode_cmd.modifier[0]); + ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; @@ -236,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, ret = -ENOMEM; goto err_radeon_fbdev_destroy_pinned_object; } - ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj); + ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto err_kfree; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 3102f6c2d055..9e34da2cacef 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -40,6 +40,7 @@ struct drm_fb_helper; struct drm_fb_helper_surface_size; +struct drm_format_info; struct edid; struct drm_edid; @@ -890,6 +891,7 @@ extern void radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); int radeon_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *rfb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c index f87337c3cbb5..3b52dfc0ea1e 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c @@ -913,7 +913,7 @@ static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = { * Power Management */ -static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev) +static int rzg2l_mipi_pm_runtime_suspend(struct device *dev) { struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev); @@ -923,7 +923,7 @@ static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev) +static int rzg2l_mipi_pm_runtime_resume(struct device *dev) { struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev); int ret; @@ -940,7 +940,7 @@ static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev) } static const struct dev_pm_ops rzg2l_mipi_pm_ops = { - SET_RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL) + RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL) }; /* ----------------------------------------------------------------------------- @@ -1072,7 +1072,7 @@ static struct platform_driver rzg2l_mipi_dsi_platform_driver = { .remove = rzg2l_mipi_dsi_remove, .driver = { .name = "rzg2l-mipi-dsi", - .pm = &rzg2l_mipi_pm_ops, + .pm = pm_ptr(&rzg2l_mipi_pm_ops), .of_match_table = rzg2l_mipi_dsi_of_table, }, }; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index e2cda28a1af4..5a550fd76bf0 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -349,37 +349,16 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) } /** - * __drm_sched_run_free_queue - enqueue free-job work + * drm_sched_run_free_queue - enqueue free-job work * @sched: scheduler instance */ -static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) +static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) { if (!READ_ONCE(sched->pause_submit)) queue_work(sched->submit_wq, &sched->work_free_job); } /** - * drm_sched_run_free_queue - enqueue free-job work if ready - * @sched: scheduler instance - */ -static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) -{ - struct drm_sched_job *job; - - job = list_first_entry_or_null(&sched->pending_list, - struct drm_sched_job, list); - if (job && dma_fence_is_signaled(&job->s_fence->finished)) - __drm_sched_run_free_queue(sched); -} - -static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched) -{ - spin_lock(&sched->job_list_lock); - drm_sched_run_free_queue(sched); - spin_unlock(&sched->job_list_lock); -} - -/** * drm_sched_job_done - complete a job * @s_job: pointer to the job which is done * @@ -398,7 +377,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence, result); dma_fence_put(&s_fence->finished); - __drm_sched_run_free_queue(sched); + drm_sched_run_free_queue(sched); } /** @@ -1134,12 +1113,16 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) * drm_sched_get_finished_job - fetch the next finished job to be destroyed * * @sched: scheduler instance + * @have_more: are there more finished jobs on the list + * + * Informs the caller through @have_more whether there are more finished jobs + * besides the returned one. * * Returns the next finished job from the pending list (if there is one) * ready for it to be destroyed. */ static struct drm_sched_job * -drm_sched_get_finished_job(struct drm_gpu_scheduler *sched) +drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more) { struct drm_sched_job *job, *next; @@ -1147,22 +1130,25 @@ drm_sched_get_finished_job(struct drm_gpu_scheduler *sched) job = list_first_entry_or_null(&sched->pending_list, struct drm_sched_job, list); - if (job && dma_fence_is_signaled(&job->s_fence->finished)) { /* remove job from pending_list */ list_del_init(&job->list); /* cancel this job's TO timer */ cancel_delayed_work(&sched->work_tdr); - /* make the scheduled timestamp more accurate */ + + *have_more = false; next = list_first_entry_or_null(&sched->pending_list, typeof(*next), list); - if (next) { + /* make the scheduled timestamp more accurate */ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &next->s_fence->scheduled.flags)) next->s_fence->scheduled.timestamp = dma_fence_timestamp(&job->s_fence->finished); + + *have_more = dma_fence_is_signaled(&next->s_fence->finished); + /* start TO timer for next job */ drm_sched_start_timeout(sched); } @@ -1221,12 +1207,15 @@ static void drm_sched_free_job_work(struct work_struct *w) struct drm_gpu_scheduler *sched = container_of(w, struct drm_gpu_scheduler, work_free_job); struct drm_sched_job *job; + bool have_more; - job = drm_sched_get_finished_job(sched); - if (job) + job = drm_sched_get_finished_job(sched, &have_more); + if (job) { sched->ops->free_job(job); + if (have_more) + drm_sched_run_free_queue(sched); + } - drm_sched_run_free_queue_unlocked(sched); drm_sched_run_job_queue(sched); } diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c index 65acffc3fea8..8e9ae7d980eb 100644 --- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c +++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c @@ -219,7 +219,7 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job) unsigned long flags; if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) { - job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET; + job->flags |= DRM_MOCK_SCHED_JOB_RESET_SKIPPED; return DRM_GPU_SCHED_STAT_NO_HANG; } diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h index 63d4f2ac7074..5b262126b776 100644 --- a/drivers/gpu/drm/scheduler/tests/sched_tests.h +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -95,9 +95,10 @@ struct drm_mock_sched_job { struct completion done; -#define DRM_MOCK_SCHED_JOB_DONE 0x1 -#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2 -#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4 +#define DRM_MOCK_SCHED_JOB_DONE 0x1 +#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2 +#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4 +#define DRM_MOCK_SCHED_JOB_RESET_SKIPPED 0x8 unsigned long flags; struct list_head link; diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c index 55eb142bd7c5..82a41a456b0a 100644 --- a/drivers/gpu/drm/scheduler/tests/tests_basic.c +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -317,8 +317,8 @@ static void drm_sched_skip_reset(struct kunit *test) KUNIT_ASSERT_FALSE(test, done); KUNIT_ASSERT_EQ(test, - job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET, - 0); + job->flags & DRM_MOCK_SCHED_JOB_RESET_SKIPPED, + DRM_MOCK_SCHED_JOB_RESET_SKIPPED); i = drm_mock_sched_advance(sched, 1); KUNIT_ASSERT_EQ(test, i, 1); diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c index 8530a3ef8a7a..0358164a623c 100644 --- a/drivers/gpu/drm/sysfb/simpledrm.c +++ b/drivers/gpu/drm/sysfb/simpledrm.c @@ -4,7 +4,7 @@ #include <linux/clk.h> #include <linux/of_clk.h> #include <linux/minmax.h> -#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_data/simplefb.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> @@ -179,22 +179,17 @@ simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node) static struct resource * simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node) { - struct device_node *np; - struct resource *res; + struct resource r, *res; int err; - np = of_parse_phandle(of_node, "memory-region", 0); - if (!np) + err = of_reserved_mem_region_to_resource(of_node, 0, &r); + if (err) return NULL; - res = devm_kzalloc(dev->dev, sizeof(*res), GFP_KERNEL); + res = devm_kmemdup(dev->dev, &r, sizeof(r), GFP_KERNEL); if (!res) return ERR_PTR(-ENOMEM); - err = of_address_to_resource(np, 0, res); - if (err) - return ERR_PTR(err); - if (of_property_present(of_node, "reg")) drm_warn(dev, "preferring \"memory-region\" over \"reg\" property\n"); diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index a2f40a5c7703..da89fd01c337 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -91,7 +91,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, struct dispc_device *dispc = tidss->dispc; struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); u32 hw_videoport = tcrtc->hw_videoport; - const struct drm_display_mode *mode; + struct drm_display_mode *mode; enum drm_mode_status ok; dev_dbg(ddev->dev, "%s\n", __func__); @@ -108,6 +108,9 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + if (drm_atomic_crtc_needs_modeset(crtc_state)) + drm_mode_set_crtcinfo(mode, 0); + return dispc_vp_bus_check(dispc, hw_videoport, crtc_state); } @@ -225,7 +228,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc, tidss_runtime_get(tidss); r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport, - mode->clock * 1000); + mode->crtc_clock * 1000); if (r != 0) return; diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index c0277fa36425..3f6cff2ab1b2 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -1215,13 +1215,13 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width); - hfp = mode->hsync_start - mode->hdisplay; - hsw = mode->hsync_end - mode->hsync_start; - hbp = mode->htotal - mode->hsync_end; + hfp = mode->crtc_hsync_start - mode->crtc_hdisplay; + hsw = mode->crtc_hsync_end - mode->crtc_hsync_start; + hbp = mode->crtc_htotal - mode->crtc_hsync_end; - vfp = mode->vsync_start - mode->vdisplay; - vsw = mode->vsync_end - mode->vsync_start; - vbp = mode->vtotal - mode->vsync_end; + vfp = mode->crtc_vsync_start - mode->crtc_vdisplay; + vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; + vbp = mode->crtc_vtotal - mode->crtc_vsync_end; dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H, FLD_VAL(hsw - 1, 7, 0) | @@ -1263,8 +1263,8 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, FLD_VAL(ivs, 12, 12)); dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN, - FLD_VAL(mode->hdisplay - 1, 11, 0) | - FLD_VAL(mode->vdisplay - 1, 27, 16)); + FLD_VAL(mode->crtc_hdisplay - 1, 11, 0) | + FLD_VAL(mode->crtc_vdisplay - 1, 27, 16)); VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0); } diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h index b8614f62186c..60c1b400eb89 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.h +++ b/drivers/gpu/drm/tidss/tidss_dispc.h @@ -7,11 +7,14 @@ #ifndef __TIDSS_DISPC_H__ #define __TIDSS_DISPC_H__ +#include <drm/drm_color_mgmt.h> + #include "tidss_drv.h" struct dispc_device; struct drm_crtc_state; +struct drm_plane_state; enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT }; diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index a1b12e52aca4..27d9a8fd541f 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -8,6 +8,7 @@ #include <linux/of.h> #include <linux/module.h> #include <linux/pm_runtime.h> +#include <linux/aperture.h> #include <drm/clients/drm_client_setup.h> #include <drm/drm_atomic.h> @@ -192,12 +193,20 @@ static int tidss_probe(struct platform_device *pdev) goto err_irq_uninstall; } + /* Remove possible early fb before setting up the fbdev */ + ret = aperture_remove_all_conflicting_devices(tidss_driver.name); + if (ret) + goto err_drm_dev_unreg; + drm_client_setup(ddev, NULL); dev_dbg(dev, "%s done\n", __func__); return 0; +err_drm_dev_unreg: + drm_dev_unregister(ddev); + err_irq_uninstall: tidss_irq_uninstall(ddev); diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h index d14d5d28f0a3..84454a4855d1 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.h +++ b/drivers/gpu/drm/tidss/tidss_drv.h @@ -9,6 +9,8 @@ #include <linux/spinlock.h> +#include <drm/drm_device.h> + #define TIDSS_MAX_PORTS 4 #define TIDSS_MAX_PLANES 4 #define TIDSS_MAX_OLDI_TXES 2 diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c index 8f25159d0666..7688251beba2 100644 --- a/drivers/gpu/drm/tidss/tidss_oldi.c +++ b/drivers/gpu/drm/tidss/tidss_oldi.c @@ -464,7 +464,6 @@ int tidss_oldi_init(struct tidss_device *tidss) * which may still be connected. * Continue to search for that. */ - ret = 0; continue; } goto err_put_node; diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h index aecaf2728406..92c560c3a621 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.h +++ b/drivers/gpu/drm/tidss/tidss_plane.h @@ -7,6 +7,8 @@ #ifndef __TIDSS_PLANE_H__ #define __TIDSS_PLANE_H__ +#include <drm/drm_plane.h> + #define to_tidss_plane(p) container_of((p), struct tidss_plane, plane) struct tidss_device; diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h index 9c560d0fdac0..9824d02d9d1f 100644 --- a/drivers/gpu/drm/tidss/tidss_scale_coefs.h +++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h @@ -9,6 +9,8 @@ #include <linux/types.h> +struct device; + struct tidss_scale_coefs { s16 c2[16]; s16 c1[16]; diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 5c3b51eb0a97..4824f863fdba 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -510,13 +510,12 @@ static void repaper_get_temperature(struct repaper_epd *epd) epd->factored_stage_time = epd->stage_time * factor10x / 10; } -static int repaper_fb_dirty(struct drm_framebuffer *fb, +static int repaper_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap, struct drm_format_conv_state *fmtcnv_state) { - struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0); struct repaper_epd *epd = drm_to_epd(fb->dev); unsigned int dst_pitch = 0; - struct iosys_map dst, vmap; + struct iosys_map dst; struct drm_rect clip; int idx, ret = 0; u8 *buf = NULL; @@ -546,8 +545,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb, goto out_free; iosys_map_set_vaddr(&dst, buf); - iosys_map_set_vaddr(&vmap, dma_obj->vaddr); - drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip, fmtcnv_state); + drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, &clip, fmtcnv_state); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); @@ -832,16 +830,15 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state) { struct drm_plane_state *state = pipe->plane.state; - struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); struct drm_rect rect; if (!pipe->crtc.state->active) return; if (drm_atomic_helper_damage_merged(old_state, state, &rect)) - repaper_fb_dirty(state->fb, &fmtcnv_state); - - drm_format_conv_state_release(&fmtcnv_state); + repaper_fb_dirty(state->fb, shadow_plane_state->data, + &shadow_plane_state->fmtcnv_state); } static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { @@ -849,6 +846,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { .enable = repaper_pipe_enable, .disable = repaper_pipe_disable, .update = repaper_pipe_update, + DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, }; static int repaper_connector_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c index 03d2850310c4..64272cd0f6e2 100644 --- a/drivers/gpu/drm/tiny/sharp-memory.c +++ b/drivers/gpu/drm/tiny/sharp-memory.c @@ -126,28 +126,28 @@ static inline void sharp_memory_set_tx_buffer_addresses(u8 *buffer, static void sharp_memory_set_tx_buffer_data(u8 *buffer, struct drm_framebuffer *fb, + const struct iosys_map *vmap, struct drm_rect clip, u32 pitch, struct drm_format_conv_state *fmtcnv_state) { int ret; - struct iosys_map dst, vmap; - struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0); + struct iosys_map dst; ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) return; iosys_map_set_vaddr(&dst, buffer); - iosys_map_set_vaddr(&vmap, dma_obj->vaddr); - drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state); + drm_fb_xrgb8888_to_mono(&dst, &pitch, vmap, fb, &clip, fmtcnv_state); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); } static int sharp_memory_update_display(struct sharp_memory_device *smd, struct drm_framebuffer *fb, + const struct iosys_map *vmap, struct drm_rect clip, struct drm_format_conv_state *fmtcnv_state) { @@ -163,7 +163,7 @@ static int sharp_memory_update_display(struct sharp_memory_device *smd, sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom); sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch); - sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state); + sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, vmap, clip, pitch, fmtcnv_state); ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size); @@ -206,7 +206,8 @@ static int sharp_memory_clear_display(struct sharp_memory_device *smd) return ret; } -static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect, +static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap, + struct drm_rect *rect, struct drm_format_conv_state *fmtconv_state) { struct drm_rect clip; @@ -218,7 +219,7 @@ static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *r clip.y1 = rect->y1; clip.y2 = rect->y2; - sharp_memory_update_display(smd, fb, clip, fmtconv_state); + sharp_memory_update_display(smd, fb, vmap, clip, fmtconv_state); } static int sharp_memory_plane_atomic_check(struct drm_plane *plane, @@ -242,7 +243,7 @@ static void sharp_memory_plane_atomic_update(struct drm_plane *plane, { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct drm_plane_state *plane_state = plane->state; - struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct sharp_memory_device *smd; struct drm_rect rect; @@ -251,15 +252,15 @@ static void sharp_memory_plane_atomic_update(struct drm_plane *plane, return; if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect)) - sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state); - - drm_format_conv_state_release(&fmtcnv_state); + sharp_memory_fb_dirty(plane_state->fb, shadow_plane_state->data, + &rect, &shadow_plane_state->fmtcnv_state); } static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = { .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = sharp_memory_plane_atomic_check, .atomic_update = sharp_memory_plane_atomic_update, + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, }; static bool sharp_memory_format_mod_supported(struct drm_plane *plane, @@ -273,9 +274,7 @@ static const struct drm_plane_funcs sharp_memory_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + DRM_GEM_SHADOW_PLANE_FUNCS, .format_mod_supported = sharp_memory_format_mod_supported, }; diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 5e997ae8bc9c..2def155ce496 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -46,6 +46,7 @@ MODULE_PARM_DESC(super_pages, "Enable/Disable Super Pages support."); static int v3d_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct v3d_dev *v3d = to_v3d_dev(dev); struct drm_v3d_get_param *args = data; static const u32 reg_map[] = { @@ -107,6 +108,16 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES: args->value = !!v3d->gemfs; return 0; + case DRM_V3D_PARAM_GLOBAL_RESET_COUNTER: + mutex_lock(&v3d->reset_lock); + args->value = v3d->reset_counter; + mutex_unlock(&v3d->reset_lock); + return 0; + case DRM_V3D_PARAM_CONTEXT_RESET_COUNTER: + mutex_lock(&v3d->reset_lock); + args->value = v3d_priv->reset_counter; + mutex_unlock(&v3d->reset_lock); + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 411e47702f8a..82d84a96235f 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -204,6 +204,11 @@ struct v3d_dev { * all jobs. */ struct v3d_perfmon *global_perfmon; + + /* Global reset counter. The counter must be incremented when + * a GPU reset happens. It must be protected by @reset_lock. + */ + unsigned int reset_counter; }; static inline struct v3d_dev * @@ -233,6 +238,12 @@ struct v3d_file_priv { /* Stores the GPU stats for a specific queue for this fd. */ struct v3d_stats stats[V3D_MAX_QUEUES]; + + /* Per-fd reset counter, must be incremented when a job submitted + * by this fd causes a GPU reset. It must be protected by + * &struct v3d_dev->reset_lock. + */ + unsigned int reset_counter; }; struct v3d_bo { diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index cb9df8822472..f9d9a198d718 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -721,6 +721,8 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job) static enum drm_gpu_sched_stat v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) { + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_file_priv *v3d_priv = job->file->driver_priv; enum v3d_queue q; mutex_lock(&v3d->reset_lock); @@ -735,6 +737,9 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) /* get the GPU back into the init state */ v3d_reset(v3d); + v3d->reset_counter++; + v3d_priv->reset_counter++; + for (q = 0; q < V3D_MAX_QUEUES; q++) drm_sched_resubmit_jobs(&v3d->queue[q].sched); diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index e5805ca646c7..c3315935d8bc 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -131,9 +131,8 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, * in the plane update callback, and here we just check * whenever we must force the modeset. */ - if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) output->needs_modeset = true; - } } static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 7dfb2006c561..1c15cbf326b7 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -162,18 +162,18 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) vgdev->has_virgl_3d = true; #endif - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) vgdev->has_edid = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) vgdev->has_indirect = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) vgdev->has_resource_assign_uuid = true; - } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) vgdev->has_resource_blob = true; - } + if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { if (!devm_request_mem_region(&vgdev->vdev->dev, @@ -193,9 +193,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) (unsigned long)vgdev->host_visible_region.addr, (unsigned long)vgdev->host_visible_region.len); } - if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) { + + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) vgdev->has_context_init = true; - } DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible", vgdev->has_virgl_3d ? '+' : '-', diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 5517cff8715c..e6363c887500 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -47,6 +47,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) *resid = handle + 1; } else { int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) return handle; *resid = handle + 1; @@ -56,9 +57,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) { - if (!virtio_gpu_virglrenderer_workaround) { + if (!virtio_gpu_virglrenderer_workaround) ida_free(&vgdev->resource_ida, id - 1); - } } void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 698ea7adb951..29e4b458ae57 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -120,7 +120,7 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + return PTR_ERR(crtc_state); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_NO_SCALING, diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 55a15e247dd1..8181b22b9b46 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -248,6 +248,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { struct virtio_gpu_ctrl_hdr *cmd; + cmd = virtio_gpu_vbuf_ctrl_hdr(entry); DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", le32_to_cpu(resp->type), @@ -468,6 +469,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, if (vbuf->data_size) { if (is_vmalloc_addr(vbuf->data_buf)) { int sg_ents; + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, &sg_ents); if (!sgt) { diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 8d7ca0cdd79f..2ee3749e2b28 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -77,9 +77,22 @@ int vkms_output_init(struct vkms_device *vkmsdev) return ret; } + encoder_cfg->encoder->possible_clones |= + drm_encoder_mask(encoder_cfg->encoder); + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) { encoder_cfg->encoder->possible_crtcs |= drm_crtc_mask(&possible_crtc->crtc->crtc); + + if (vkms_config_crtc_get_writeback(possible_crtc)) { + struct drm_encoder *wb_encoder = + &possible_crtc->crtc->wb_encoder; + + encoder_cfg->encoder->possible_clones |= + drm_encoder_mask(wb_encoder); + wb_encoder->possible_clones |= + drm_encoder_mask(encoder_cfg->encoder); + } } } diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index fe163271d5b5..45d69a3b85f6 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -174,6 +174,8 @@ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev, if (ret) return ret; vkms_output->wb_encoder.possible_crtcs |= drm_crtc_mask(&vkms_output->crtc); + vkms_output->wb_encoder.possible_clones |= + drm_encoder_mask(&vkms_output->wb_encoder); drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index c2294abbe753..00be92da5509 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -538,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct dma_fence *f, if (likely(eaction->tv_sec != NULL)) { struct timespec64 ts; - ktime_to_timespec64(f->timestamp); + ts = ktime_to_timespec64(f->timestamp); /* monotonic time, so no y2038 overflow */ *eaction->tv_sec = ts.tv_sec; *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC; diff --git a/drivers/gpu/drm/xe/regs/xe_bars.h b/drivers/gpu/drm/xe/regs/xe_bars.h index ce05b6ae832f..880140d6ccdc 100644 --- a/drivers/gpu/drm/xe/regs/xe_bars.h +++ b/drivers/gpu/drm/xe/regs/xe_bars.h @@ -7,5 +7,6 @@ #define GTTMMADR_BAR 0 /* MMIO + GTT */ #define LMEM_BAR 2 /* VRAM */ +#define VF_LMEM_BAR 9 /* VF VRAM */ #endif diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index f08fc4377d25..c17ed1ae8649 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -332,6 +332,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe int ret = 0; u32 reg_val, max; struct xe_reg rapl_limit; + u64 max_supp_power_limit = 0; mutex_lock(&hwmon->hwmon_lock); @@ -356,6 +357,20 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe goto unlock; } + /* + * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to + * the supported maximum (U12.3 format). + * This is to avoid truncation during reg_val calculation below and ensure the valid + * power limit is sent for pcode which would clamp it to card-supported value. + */ + max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER; + if (value > max_supp_power_limit) { + value = max_supp_power_limit; + drm_info(&hwmon->xe->drm, + "Power limit clamped as selected %s exceeds channel %d limit\n", + PWR_ATTR_TO_STR(attr), channel); + } + /* Computation in 64-bits to avoid overflow. Round to nearest. */ reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER); @@ -739,9 +754,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, { int ret; u32 uval; + u64 max_crit_power_curr = 0; mutex_lock(&hwmon->hwmon_lock); + /* + * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1 + * max supported value, clamp it to the command's max (U10.6 format). + * This is to avoid truncation during uval calculation below and ensure the valid power + * limit is sent for pcode which would clamp it to card-supported value. + */ + max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor; + if (value > max_crit_power_curr) { + value = max_crit_power_curr; + drm_info(&hwmon->xe->drm, + "Power limit clamped as selected exceeds channel %d limit\n", + channel); + } uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor); ret = xe_hwmon_pcode_write_i1(hwmon, uval); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index ba1cff2e4cda..7d20ac4bb633 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) || !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) { int buf_offset = 0; + void *bounce; + int err; + + BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES)); + bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL); + if (!bounce) + return -ENOMEM; /* * Less than ideal for large unaligned access but this should be * fairly rare, can fixup if this becomes common. */ do { - u8 bounce[XE_CACHELINE_BYTES]; - void *ptr = (void *)bounce; - int err; int copy_bytes = min_t(int, bytes_left, XE_CACHELINE_BYTES - (offset & XE_CACHELINE_MASK)); @@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, err = xe_migrate_access_memory(m, bo, offset & ~XE_CACHELINE_MASK, - (void *)ptr, - sizeof(bounce), 0); + bounce, + XE_CACHELINE_BYTES, 0); if (err) - return err; + break; if (write) { - memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes); + memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes); err = xe_migrate_access_memory(m, bo, offset & ~XE_CACHELINE_MASK, - (void *)ptr, - sizeof(bounce), write); + bounce, + XE_CACHELINE_BYTES, write); if (err) - return err; + break; } else { - memcpy(buf + buf_offset, ptr + ptr_offset, + memcpy(buf + buf_offset, bounce + ptr_offset, copy_bytes); } @@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, offset += copy_bytes; } while (bytes_left); - return 0; + kfree(bounce); + return err; } dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write); @@ -1882,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, else current_bytes = min_t(int, bytes_left, cursor.size); - if (fence) - dma_fence_put(fence); + if (current_bytes & ~PAGE_MASK) { + int pitch = 4; + + current_bytes = min_t(int, current_bytes, S16_MAX * pitch); + } __fence = xe_migrate_vram(m, current_bytes, (unsigned long)buf & ~PAGE_MASK, @@ -1892,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, XE_MIGRATE_COPY_TO_VRAM : XE_MIGRATE_COPY_TO_SRAM); if (IS_ERR(__fence)) { - if (fence) + if (fence) { dma_fence_wait(fence, false); + dma_fence_put(fence); + } fence = __fence; goto out_err; } + + dma_fence_put(fence); fence = __fence; buf += current_bytes; diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c index 447a7867eecb..af05db07162e 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.c +++ b/drivers/gpu/drm/xe/xe_pci_sriov.c @@ -3,6 +3,10 @@ * Copyright © 2023-2024 Intel Corporation */ +#include <linux/bitops.h> +#include <linux/pci.h> + +#include "regs/xe_bars.h" #include "xe_assert.h" #include "xe_device.h" #include "xe_gt_sriov_pf_config.h" @@ -128,6 +132,18 @@ static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, } } +static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + u32 sizes; + + sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs); + if (!sizes) + return 0; + + return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes)); +} + static int pf_enable_vfs(struct xe_device *xe, int num_vfs) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -158,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) if (err < 0) goto failed; + if (IS_DGFX(xe)) { + err = resize_vf_vram_bar(xe, num_vfs); + if (err) + xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err); + } + err = pci_enable_sriov(pdev, num_vfs); if (err < 0) goto failed; diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c index 1c3c04d52f55..90244fe59b59 100644 --- a/drivers/gpu/drm/xe/xe_shrinker.c +++ b/drivers/gpu/drm/xe/xe_shrinker.c @@ -54,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea write_unlock(&shrinker->lock); } -static s64 xe_shrinker_walk(struct xe_device *xe, - struct ttm_operation_ctx *ctx, - const struct xe_bo_shrink_flags flags, - unsigned long to_scan, unsigned long *scanned) +static s64 __xe_shrinker_walk(struct xe_device *xe, + struct ttm_operation_ctx *ctx, + const struct xe_bo_shrink_flags flags, + unsigned long to_scan, unsigned long *scanned) { unsigned int mem_type; s64 freed = 0, lret; @@ -93,6 +93,48 @@ static s64 xe_shrinker_walk(struct xe_device *xe, return freed; } +/* + * Try shrinking idle objects without writeback first, then if not sufficient, + * try also non-idle objects and finally if that's not sufficient either, + * add writeback. This avoids stalls and explicit writebacks with light or + * moderate memory pressure. + */ +static s64 xe_shrinker_walk(struct xe_device *xe, + struct ttm_operation_ctx *ctx, + const struct xe_bo_shrink_flags flags, + unsigned long to_scan, unsigned long *scanned) +{ + bool no_wait_gpu = true; + struct xe_bo_shrink_flags save_flags = flags; + s64 lret, freed; + + swap(no_wait_gpu, ctx->no_wait_gpu); + save_flags.writeback = false; + lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); + swap(no_wait_gpu, ctx->no_wait_gpu); + if (lret < 0 || *scanned >= to_scan) + return lret; + + freed = lret; + if (!ctx->no_wait_gpu) { + lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); + if (lret < 0) + return lret; + freed += lret; + if (*scanned >= to_scan) + return freed; + } + + if (flags.writeback) { + lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned); + if (lret < 0) + return lret; + freed += lret; + } + + return freed; +} + static unsigned long xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) { @@ -199,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup); shrink_flags.purge = false; + lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags, nr_to_scan, &nr_scanned); if (lret >= 0) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 2035604121e6..86842247e9d8 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, switch (operation) { case DRM_XE_VM_BIND_OP_MAP: - case DRM_XE_VM_BIND_OP_MAP_USERPTR: - ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, - obj, bo_offset_or_userptr); + case DRM_XE_VM_BIND_OP_MAP_USERPTR: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = addr, + .map.va.range = range, + .map.gem.obj = obj, + .map.gem.offset = bo_offset_or_userptr, + }; + + ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req); break; + } case DRM_XE_VM_BIND_OP_UNMAP: ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); break; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 73747d20df85..91a7b7e7c0c8 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1679,7 +1679,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { }; static const struct x86_cpu_id intel_mwait_ids[] __initconst = { - X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL), {} }; diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c index 76e490070e9c..c354abdafc1b 100644 --- a/drivers/net/dsa/microchip/ksz8.c +++ b/drivers/net/dsa/microchip/ksz8.c @@ -36,15 +36,14 @@ static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { - regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); + ksz_rmw8(dev, addr, bits, set ? bits : 0); } static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, bool set) { - regmap_update_bits(ksz_regmap_8(dev), - dev->dev_ops->get_port_addr(port, offset), - bits, set ? bits : 0); + ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits, + set ? bits : 0); } /** @@ -1955,16 +1954,19 @@ int ksz8_setup(struct dsa_switch *ds) ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true); /* Enable aggressive back off algorithm in half duplex mode. */ - regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1, - SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); + ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); + if (ret) + return ret; /* * Make sure unicast VLAN boundary is set as default and * enable no excessive collision drop. */ - regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2, - UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, - UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); + ret = ksz_rmw8(dev, REG_SW_CTRL_2, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); + if (ret) + return ret; ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 7292bfe2f7ca..4cb14288ff0f 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1447,6 +1447,7 @@ static const struct regmap_range ksz8873_valid_regs[] = { regmap_reg_range(0x3f, 0x3f), /* advanced control registers */ + regmap_reg_range(0x43, 0x43), regmap_reg_range(0x60, 0x6f), regmap_reg_range(0x70, 0x75), regmap_reg_range(0x76, 0x78), diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5578ddcb465d..2800a90fba1f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -926,15 +926,21 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, + unsigned int *offset, gfp_t gfp) { netmem_ref netmem; - netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { + netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp); + } else { + netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); + *offset = 0; + } if (!netmem) return 0; - *mapping = page_pool_get_dma_addr_netmem(netmem); + *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset; return netmem; } @@ -1029,7 +1035,7 @@ static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, dma_addr_t mapping; netmem_ref netmem; - netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp); + netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp); if (!netmem) return -ENOMEM; @@ -3819,7 +3825,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, if (BNXT_RX_PAGE_MODE(bp)) pp.pool_size += bp->rx_ring_size / rx_size_fac; pp.nid = numa_node; - pp.napi = &rxr->bnapi->napi; pp.netdev = bp->dev; pp.dev = &bp->pdev->dev; pp.dma_dir = bp->rx_dir; @@ -3851,6 +3856,12 @@ err_destroy_pp: return PTR_ERR(pool); } +static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr) +{ + page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi); + page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi); +} + static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { u16 mem_size; @@ -3889,6 +3900,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); if (rc) return rc; + bnxt_enable_rx_page_pool(rxr); rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); if (rc < 0) @@ -16031,6 +16043,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) goto err_reset; } + bnxt_enable_rx_page_pool(rxr); napi_enable_locked(&bnapi->napi); bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index 503cfbfb4a8a..83cf75bf7a17 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -53,9 +53,11 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) { int ret; - ASSERT_RTNL(); + if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state)) + return -EBUSY; if (netif_running(priv->netdev)) { + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); dev_warn(&priv->pdev->dev, "failed to reset because port is up\n"); return -EBUSY; @@ -64,7 +66,6 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) netif_device_detach(priv->netdev); priv->reset_type = type; - set_bit(HBG_NIC_STATE_RESETTING, &priv->state); clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET); if (ret) { @@ -84,29 +85,26 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type) type != priv->reset_type) return 0; - ASSERT_RTNL(); - - clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); ret = hbg_rebuild(priv); if (ret) { priv->stats.reset_fail_cnt++; set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); dev_err(&priv->pdev->dev, "failed to rebuild after reset\n"); return ret; } netif_device_attach(priv->netdev); + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); dev_info(&priv->pdev->dev, "reset done\n"); return ret; } -/* must be protected by rtnl lock */ int hbg_reset(struct hbg_priv *priv) { int ret; - ASSERT_RTNL(); ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION); if (ret) return ret; @@ -171,7 +169,6 @@ static void hbg_pci_err_reset_prepare(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct hbg_priv *priv = netdev_priv(netdev); - rtnl_lock(); hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR); } @@ -181,7 +178,6 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev) struct hbg_priv *priv = netdev_priv(netdev); hbg_reset_done(priv, HBG_RESET_TYPE_FLR); - rtnl_unlock(); } static const struct pci_error_handlers hbg_pci_err_handler = { diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index 8cca8316ba40..d0aa0661ecd4 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -12,6 +12,8 @@ #define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000) #define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000) +#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000) +#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000) /* little endian or big endian. * ctrl means packet description, data means skb packet data */ @@ -228,6 +230,9 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) { + u32 link_status; + int ret; + hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, @@ -239,8 +244,14 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); - if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR, - HBG_REG_AN_NEG_STATE_NP_LINK_OK_B)) + /* wait MAC link up */ + ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR, + link_status, + FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B, + link_status), + HBG_MAC_LINK_WAIT_INTERVAL_US, + HBG_MAC_LINK_WAIT_TIMEOUT_US); + if (ret) hbg_np_link_fail_task_schedule(priv); } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h index 2883a5899ae2..8b6110599e10 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h @@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir) static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring) { - return (ring->ntu + ring->len - ring->ntc) % ring->len; + u32 len = READ_ONCE(ring->len); + + if (!len) + return 0; + + return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len; } netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c index 54f1b83dfe42..d227f4d2a2d1 100644 --- a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c @@ -543,6 +543,7 @@ int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter) attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = adapter->hw.bus.func; + attrs.no_phys_port_name = 1; ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id); devlink_port_attrs_set(devlink_port, &attrs); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 09ae16e026eb..6c363f9b0ce2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -330,15 +330,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); - ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks); + ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks); if (ret < 0) - return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n"); + return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n"); plat_dat->num_clks = ret; - ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks); - if (ret) - return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n"); - plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat, data->stmmac_clk_name); @@ -346,7 +342,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) ret = data->probe(pdev, plat_dat, &stmmac_res); if (ret < 0) { dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n"); - clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); return ret; } @@ -370,15 +365,11 @@ remove: static void dwc_eth_dwmac_remove(struct platform_device *pdev) { const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev); - struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev); stmmac_dvr_remove(&pdev->dev); if (data->remove) data->remove(pdev); - - if (plat_dat) - clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); } static const struct of_device_id dwc_eth_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 79b92130a03f..f6687c2f30f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1765,11 +1765,15 @@ err_gmac_powerdown: static void rk_gmac_remove(struct platform_device *pdev) { - struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev); + struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev)); + struct rk_priv_data *bsp_priv = priv->plat->bsp_priv; stmmac_dvr_remove(&pdev->dev); rk_gmac_powerdown(bsp_priv); + + if (priv->plat->phy_node && bsp_priv->integrated_phy) + clk_put(bsp_priv->clk_phy); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c index c72ee759aae5..f2946bea0bc2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c @@ -211,6 +211,7 @@ static int thead_dwmac_probe(struct platform_device *pdev) struct stmmac_resources stmmac_res; struct plat_stmmacenet_data *plat; struct thead_dwmac *dwmac; + struct clk *apb_clk; void __iomem *apb; int ret; @@ -224,6 +225,19 @@ static int thead_dwmac_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(plat), "dt configuration failed\n"); + /* + * The APB clock is essential for accessing glue registers. However, + * old devicetrees don't describe it correctly. We continue to probe + * and emit a warning if it isn't present. + */ + apb_clk = devm_clk_get_enabled(&pdev->dev, "apb"); + if (PTR_ERR(apb_clk) == -ENOENT) + dev_warn(&pdev->dev, + "cannot get apb clock, link may break after speed changes\n"); + else if (IS_ERR(apb_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk), + "failed to get apb clock\n"); + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); if (!dwmac) return -ENOMEM; diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 50bfbc2779e4..d8c9fe1d98c4 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -621,7 +621,8 @@ exit: static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on) { - u32 val, cap, ret = 0; + u32 val, cap; + int ret = 0; mutex_lock(&iep->ptp_clk_mutex); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 2b973d6e2341..6c7d776ae4ee 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -50,6 +50,8 @@ /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) +static void emac_adjust_link(struct net_device *ndev); + static int emac_get_tx_ts(struct prueth_emac *emac, struct emac_tx_ts_response *rsp) { @@ -229,6 +231,10 @@ static int prueth_emac_common_start(struct prueth *prueth) ret = icssg_config(prueth, emac, slice); if (ret) goto disable_class; + + mutex_lock(&emac->ndev->phydev->lock); + emac_adjust_link(emac->ndev); + mutex_unlock(&emac->ndev->phydev->lock); } ret = prueth_emac_start(prueth); diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 0e0fe32d2da4..045c5177262e 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -138,7 +138,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev) static inline int dev_is_ethdev(struct net_device *dev) { - return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); + return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev); } /* ------------------------------------------------------------------------ */ diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index cb6f5482d203..7397c693f984 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -1061,6 +1061,7 @@ struct net_device_context { struct net_device __rcu *vf_netdev; struct netvsc_vf_pcpu_stats __percpu *vf_stats; struct delayed_work vf_takeover; + struct delayed_work vfns_work; /* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; @@ -1075,6 +1076,8 @@ struct net_device_context { struct netvsc_device_info *saved_netvsc_dev_info; }; +void netvsc_vfns_work(struct work_struct *w); + /* Azure hosts don't support non-TCP port numbers in hashing for fragmented * packets. We can use ethtool to change UDP hash level when necessary. */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f44753756358..39c892e46cb0 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2522,6 +2522,7 @@ static int netvsc_probe(struct hv_device *dev, spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); + INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work); net_device_ctx->vf_stats = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); @@ -2666,6 +2667,8 @@ static void netvsc_remove(struct hv_device *dev) cancel_delayed_work_sync(&ndev_ctx->dwork); rtnl_lock(); + cancel_delayed_work_sync(&ndev_ctx->vfns_work); + nvdev = rtnl_dereference(ndev_ctx->nvdev); if (nvdev) { cancel_work_sync(&nvdev->subchan_work); @@ -2707,6 +2710,7 @@ static int netvsc_suspend(struct hv_device *dev) cancel_delayed_work_sync(&ndev_ctx->dwork); rtnl_lock(); + cancel_delayed_work_sync(&ndev_ctx->vfns_work); nvdev = rtnl_dereference(ndev_ctx->nvdev); if (nvdev == NULL) { @@ -2800,6 +2804,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev) } } +void netvsc_vfns_work(struct work_struct *w) +{ + struct net_device_context *ndev_ctx = + container_of(w, struct net_device_context, vfns_work.work); + struct net_device *ndev; + + if (!rtnl_trylock()) { + schedule_delayed_work(&ndev_ctx->vfns_work, 1); + return; + } + + ndev = hv_get_drvdata(ndev_ctx->device_ctx); + if (!ndev) + goto out; + + netvsc_event_set_vf_ns(ndev); + +out: + rtnl_unlock(); +} + /* * On Hyper-V, every VF interface is matched with a corresponding * synthetic interface. The synthetic interface is presented first @@ -2810,10 +2835,12 @@ static int netvsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + struct net_device_context *ndev_ctx; int ret = 0; if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) { - netvsc_event_set_vf_ns(event_dev); + ndev_ctx = netdev_priv(event_dev); + schedule_delayed_work(&ndev_ctx->vfns_work, 0); return NOTIFY_DONE; } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 39fe28af48b9..0178219f0db5 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -710,9 +710,13 @@ static struct nsim_rq *nsim_queue_alloc(void) static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq) { hrtimer_cancel(&rq->napi_timer); - local_bh_disable(); - dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen); - local_bh_enable(); + + if (rq->skb_queue.qlen) { + local_bh_disable(); + dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen); + local_bh_enable(); + } + skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE); kfree(rq); } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index fda2e27c1810..cad6ed3aa10b 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -91,6 +91,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev) if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) return -EINVAL; + gpiod_put(mdiodev->reset_gpio); reset_control_put(mdiodev->reset_ctrl); mdiodev->bus->mdio_map[mdiodev->addr] = NULL; diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c index 48dc4bf85125..f43973e73ea3 100644 --- a/drivers/net/phy/mdio_bus_provider.c +++ b/drivers/net/phy/mdio_bus_provider.c @@ -443,9 +443,6 @@ void mdiobus_unregister(struct mii_bus *bus) if (!mdiodev) continue; - if (mdiodev->reset_gpio) - gpiod_put(mdiodev->reset_gpio); - mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); } diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 4c6d905f0a9f..87adb6508017 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -1965,24 +1965,27 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev) return macsec_ability; } +static bool tja11xx_phy_id_compare(struct phy_device *phydev, + const struct phy_driver *phydrv) +{ + u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] : + phydev->phy_id; + + return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask); +} + static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev, const struct phy_driver *phydrv) { - if (!phy_id_compare(phydev->phy_id, phydrv->phy_id, - phydrv->phy_id_mask)) - return 0; - - return !nxp_c45_macsec_ability(phydev); + return tja11xx_phy_id_compare(phydev, phydrv) && + !nxp_c45_macsec_ability(phydev); } static int tja11xx_macsec_match_phy_device(struct phy_device *phydev, const struct phy_driver *phydrv) { - if (!phy_id_compare(phydev->phy_id, phydrv->phy_id, - phydrv->phy_id_mask)) - return 0; - - return nxp_c45_macsec_ability(phydev); + return tja11xx_phy_id_compare(phydev, phydrv) && + nxp_c45_macsec_ability(phydev); } static const struct nxp_c45_regmap tja1120_regmap = { diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 9b0318fb50b5..d9f5942ccc44 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -676,6 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev) priv->mdio->read = &asix_mdio_bus_read; priv->mdio->write = &asix_mdio_bus_write; priv->mdio->name = "Asix MDIO Bus"; + priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR)); /* mii bus name is usb-<usb bus number>-<usb device number> */ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f5647ee0adde..e56901bb6ebc 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1361,6 +1361,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)}, /* Telit FN990A w/audio */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */ diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 995a7207bdf8..f357a7ac70ac 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -81,7 +81,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev) static __inline__ int dev_is_ethdev(struct net_device *dev) { - return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); + return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev); } /* ------------------------------------------------------------------------ */ diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index a6aad743c282..b352df4cd3f9 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -24,6 +24,11 @@ #define PTP_DEFAULT_MAX_VCLOCKS 20 #define PTP_MAX_CHANNELS 2048 +enum { + PTP_LOCK_PHYSICAL = 0, + PTP_LOCK_VIRTUAL, +}; + struct timestamp_event_queue { struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; int head; diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index 2fdeedd60e21..64c950456517 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp) return PTP_VCLOCK_REFRESH_INTERVAL; } +static void ptp_vclock_set_subclass(struct ptp_clock *ptp) +{ + lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL); +} + static const struct ptp_clock_info ptp_vclock_info = { .owner = THIS_MODULE, .name = "ptp virtual clock", @@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) return NULL; } + ptp_vclock_set_subclass(vclock->clock); + timecounter_init(&vclock->tc, &vclock->cc, 0); ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 2db8d9529b8f..7c4d7bb3a56f 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -6280,7 +6280,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } phba->nvmeio_trc_on = 1; phba->nvmeio_trc_output_idx = 0; - phba->nvmeio_trc = NULL; } else { nvmeio_off: phba->nvmeio_trc_size = 0; diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 2797aa75a689..aff6c9d5e7c2 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -666,7 +666,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) * Take early refcount for outstanding I/O requests we schedule during * delete processing for unreg_vpi. Always keep this before * scsi_remove_host() as we can no longer obtain a reference through - * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. + * scsi_host_get() after scsi_remove_host as shost is set to SHOST_DEL. */ if (!scsi_host_get(shost)) return VPORT_INVAL; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 0847767d4d43..353cb60e1abe 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2674,8 +2674,10 @@ static int resp_rsup_tmfs(struct scsi_cmnd *scp, static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) { /* Read-Write Error Recovery page for mode_sense */ - unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, - 5, 0, 0xff, 0xff}; + static const unsigned char err_recov_pg[] = { + 0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, + 5, 0, 0xff, 0xff + }; memcpy(p, err_recov_pg, sizeof(err_recov_pg)); if (1 == pcontrol) @@ -2685,8 +2687,10 @@ static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) { /* Disconnect-Reconnect page for mode_sense */ - unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; + static const unsigned char disconnect_pg[] = { + 0x2, 0xe, 128, 128, 0, 10, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 + }; memcpy(p, disconnect_pg, sizeof(disconnect_pg)); if (1 == pcontrol) @@ -2696,9 +2700,11 @@ static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) static int resp_format_pg(unsigned char *p, int pcontrol, int target) { /* Format device page for mode_sense */ - unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0x40, 0, 0, 0}; + static const unsigned char format_pg[] = { + 0x3, 0x16, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0x40, 0, 0, 0 + }; memcpy(p, format_pg, sizeof(format_pg)); put_unaligned_be16(sdebug_sectors_per, p + 10); @@ -2716,10 +2722,14 @@ static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, static int resp_caching_pg(unsigned char *p, int pcontrol, int target) { /* Caching page for mode_sense */ - unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, - 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; + static const unsigned char ch_caching_pg[] = { + /* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + static const unsigned char d_caching_pg[] = { + 0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, + 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0 + }; if (SDEBUG_OPT_N_WCE & sdebug_opts) caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ @@ -2738,8 +2748,10 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target) { /* Control mode page for mode_sense */ unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, - 0, 0, 0x2, 0x4b}; + static const unsigned char d_ctrl_m_pg[] = { + 0xa, 10, 2, 0, 0, 0, 0, 0, + 0, 0, 0x2, 0x4b + }; if (sdebug_dsense) ctrl_m_pg[2] |= 0x4; @@ -2794,10 +2806,14 @@ static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target) static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) { /* Informational Exceptions control mode page for mode_sense */ - unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, - 0, 0, 0x0, 0x0}; - unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, - 0, 0, 0x0, 0x0}; + static const unsigned char ch_iec_m_pg[] = { + /* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, + 0, 0, 0x0, 0x0 + }; + static const unsigned char d_iec_m_pg[] = { + 0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, + 0, 0, 0x0, 0x0 + }; memcpy(p, iec_m_pg, sizeof(iec_m_pg)); if (1 == pcontrol) @@ -2809,8 +2825,9 @@ static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target) { /* SAS SSP mode page - short format for mode_sense */ - unsigned char sas_sf_m_pg[] = {0x19, 0x6, - 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0}; + static const unsigned char sas_sf_m_pg[] = { + 0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0 + }; memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg)); if (1 == pcontrol) @@ -2854,9 +2871,10 @@ static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target, static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol) { /* SAS SSP shared protocol specific port mode subpage */ - unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - }; + static const unsigned char sas_sha_m_pg[] = { + 0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }; memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg)); if (1 == pcontrol) @@ -2923,8 +2941,10 @@ static int process_medium_part_m_pg(struct sdebug_dev_info *devip, static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target, unsigned char dce) { /* Compression page for mode_sense (tape) */ - unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 00, 00}; + static const unsigned char compression_pg[] = { + 0x0f, 14, 0x40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }; memcpy(p, compression_pg, sizeof(compression_pg)); if (dce) @@ -3282,9 +3302,10 @@ bad_pcode: static int resp_temp_l_pg(unsigned char *arr) { - unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38, - 0x0, 0x1, 0x3, 0x2, 0x0, 65, - }; + static const unsigned char temp_l_pg[] = { + 0x0, 0x0, 0x3, 0x2, 0x0, 38, + 0x0, 0x1, 0x3, 0x2, 0x0, 65, + }; memcpy(arr, temp_l_pg, sizeof(temp_l_pg)); return sizeof(temp_l_pg); @@ -3292,8 +3313,9 @@ static int resp_temp_l_pg(unsigned char *arr) static int resp_ie_l_pg(unsigned char *arr) { - unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, - }; + static const unsigned char ie_l_pg[] = { + 0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, + }; memcpy(arr, ie_l_pg, sizeof(ie_l_pg)); if (iec_m_pg[2] & 0x4) { /* TEST bit set */ @@ -3305,11 +3327,12 @@ static int resp_ie_l_pg(unsigned char *arr) static int resp_env_rep_l_spg(unsigned char *arr) { - unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8, - 0x0, 40, 72, 0xff, 45, 18, 0, 0, - 0x1, 0x0, 0x23, 0x8, - 0x0, 55, 72, 35, 55, 45, 0, 0, - }; + static const unsigned char env_rep_l_spg[] = { + 0x0, 0x0, 0x23, 0x8, + 0x0, 40, 72, 0xff, 45, 18, 0, 0, + 0x1, 0x0, 0x23, 0x8, + 0x0, 55, 72, 35, 55, 45, 0, 0, + }; memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg)); return sizeof(env_rep_l_spg); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 169af7d47ce7..15ba493d2138 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -265,7 +265,7 @@ show_shost_supported_mode(struct device *dev, struct device_attribute *attr, return show_shost_mode(supported_mode, buf); } -static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); +static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL); static ssize_t show_shost_active_mode(struct device *dev, @@ -279,7 +279,7 @@ show_shost_active_mode(struct device *dev, return show_shost_mode(shost->active_mode, buf); } -static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); +static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL); static int check_reset_type(const char *str) { diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 2a5f24ee858c..034a2a535a1e 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1232,7 +1232,7 @@ err: } static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, - struct device_node *np, bool off) + struct device_node *np) { struct device *dev = pg->pmc->dev; int err; @@ -1247,22 +1247,6 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, err = reset_control_acquire(pg->reset); if (err < 0) { pr_err("failed to acquire resets: %d\n", err); - goto out; - } - - if (off) { - err = reset_control_assert(pg->reset); - } else { - err = reset_control_deassert(pg->reset); - if (err < 0) - goto out; - - reset_control_release(pg->reset); - } - -out: - if (err) { - reset_control_release(pg->reset); reset_control_put(pg->reset); } @@ -1308,20 +1292,43 @@ static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) goto set_available; } - err = tegra_powergate_of_get_resets(pg, np, off); + err = tegra_powergate_of_get_resets(pg, np); if (err < 0) { dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err); goto remove_clks; } - if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { - if (off) - WARN_ON(tegra_powergate_power_up(pg, true)); + /* + * If the power-domain is off, then ensure the resets are asserted. + * If the power-domain is on, then power down to ensure that when is + * it turned on the power-domain, clocks and resets are all in the + * expected state. + */ + if (off) { + err = reset_control_assert(pg->reset); + if (err) { + pr_err("failed to assert resets: %d\n", err); + goto remove_resets; + } + } else { + err = tegra_powergate_power_down(pg); + if (err) { + dev_err(dev, "failed to turn off PM domain %s: %d\n", + pg->genpd.name, err); + goto remove_resets; + } + } + /* + * If PM_GENERIC_DOMAINS is not enabled, power-on + * the domain and skip the genpd registration. + */ + if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { + WARN_ON(tegra_powergate_power_up(pg, true)); goto remove_resets; } - err = pm_genpd_init(&pg->genpd, NULL, off); + err = pm_genpd_init(&pg->genpd, NULL, true); if (err < 0) { dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np, err); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 96ad57c3144b..efd7a811a002 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -7138,14 +7138,19 @@ static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba) static irqreturn_t ufshcd_intr(int irq, void *__hba) { struct ufs_hba *hba = __hba; + u32 intr_status, enabled_intr_status; /* Move interrupt handling to thread when MCQ & ESI are not enabled */ if (!hba->mcq_enabled || !hba->mcq_esi_enabled) return IRQ_WAKE_THREAD; + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); + /* Directly handle interrupts since MCQ ESI handlers does the hard job */ - return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) & - ufshcd_readl(hba, REG_INTERRUPT_ENABLE)); + return ufshcd_sl_intr(hba, enabled_intr_status); } static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) @@ -10516,8 +10521,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) err = devm_add_action_or_reset(dev, ufshcd_devres_release, host); if (err) - return dev_err_probe(dev, err, - "failed to add ufshcd dealloc action\n"); + return err; host->nr_maps = HCTX_TYPE_POLL + 1; hba = shost_priv(host); diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 86ae73b89d4d..f902ce08c95a 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -818,7 +818,7 @@ static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu) unsigned int q_index; q_index = map->mq_map[cpu]; - if (q_index > nr) { + if (q_index >= nr) { dev_err(hba->dev, "hwq index %d exceed %d\n", q_index, nr); return MTK_MCQ_INVALID_IRQ; diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index d2b3ae7113ab..b01ec99106cd 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -116,13 +116,11 @@ e_free: static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { + struct snp_derived_key_resp *derived_key_resp __free(kfree) = NULL; struct snp_derived_key_req *derived_key_req __free(kfree) = NULL; - struct snp_derived_key_resp derived_key_resp = {0}; struct snp_msg_desc *mdesc = snp_dev->msg_desc; struct snp_guest_req req = {}; int rc, resp_len; - /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ - u8 buf[64 + 16]; if (!arg->req_data || !arg->resp_data) return -EINVAL; @@ -132,8 +130,9 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque * response payload. Make sure that it has enough space to cover the * authtag. */ - resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; - if (sizeof(buf) < resp_len) + resp_len = sizeof(derived_key_resp->data) + mdesc->ctx->authsize; + derived_key_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); + if (!derived_key_resp) return -ENOMEM; derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT); @@ -149,23 +148,21 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque req.vmpck_id = mdesc->vmpck_id; req.req_buf = derived_key_req; req.req_sz = sizeof(*derived_key_req); - req.resp_buf = buf; + req.resp_buf = derived_key_resp; req.resp_sz = resp_len; req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; rc = snp_send_guest_request(mdesc, &req); arg->exitinfo2 = req.exitinfo2; - if (rc) - return rc; - - memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data)); - if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp, - sizeof(derived_key_resp))) - rc = -EFAULT; + if (!rc) { + if (copy_to_user((void __user *)arg->resp_data, derived_key_resp, + sizeof(derived_key_resp->data))) + rc = -EFAULT; + } /* The response buffer contains the sensitive data, explicitly clear it. */ - memzero_explicit(buf, sizeof(buf)); - memzero_explicit(&derived_key_resp, sizeof(derived_key_resp)); + memzero_explicit(derived_key_resp, sizeof(*derived_key_resp)); + return rc; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 835b0deef9bb..f23d75986947 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4331,15 +4331,18 @@ static int try_release_subpage_extent_buffer(struct folio *folio) unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1; int ret; - xa_lock_irq(&fs_info->buffer_tree); + rcu_read_lock(); xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) { /* * The same as try_release_extent_buffer(), to ensure the eb * won't disappear out from under us. */ spin_lock(&eb->refs_lock); + rcu_read_unlock(); + if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { spin_unlock(&eb->refs_lock); + rcu_read_lock(); continue; } @@ -4358,11 +4361,10 @@ static int try_release_subpage_extent_buffer(struct folio *folio) * check the folio private at the end. And * release_extent_buffer() will release the refs_lock. */ - xa_unlock_irq(&fs_info->buffer_tree); release_extent_buffer(eb); - xa_lock_irq(&fs_info->buffer_tree); + rcu_read_lock(); } - xa_unlock_irq(&fs_info->buffer_tree); + rcu_read_unlock(); /* * Finally to check if we have cleared folio private, as if we have @@ -4375,7 +4377,6 @@ static int try_release_subpage_extent_buffer(struct folio *folio) ret = 0; spin_unlock(&folio->mapping->i_private_lock); return ret; - } int try_release_extent_buffer(struct folio *folio) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b77dd22b8cdb..d740910e071a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -401,10 +401,12 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, while (index <= end_index) { folio = filemap_get_folio(inode->vfs_inode.i_mapping, index); - index++; - if (IS_ERR(folio)) + if (IS_ERR(folio)) { + index++; continue; + } + index = folio_end(folio) >> PAGE_SHIFT; /* * Here we just clear all Ordered bits for every page in the * range, then btrfs_mark_ordered_io_finished() will handle @@ -2013,7 +2015,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio * cleaered by the caller. */ if (ret < 0) - btrfs_cleanup_ordered_extents(inode, file_pos, end); + btrfs_cleanup_ordered_extents(inode, file_pos, len); return ret; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 1a5972178b3a..ccaa9a3cf1ce 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1453,7 +1453,6 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root, struct btrfs_qgroup *src, int sign) { struct btrfs_qgroup *qgroup; - struct btrfs_qgroup *cur; LIST_HEAD(qgroup_list); u64 num_bytes = src->excl; int ret = 0; @@ -1463,7 +1462,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root, goto out; qgroup_iterator_add(&qgroup_list, qgroup); - list_for_each_entry(cur, &qgroup_list, iterator) { + list_for_each_entry(qgroup, &qgroup_list, iterator) { struct btrfs_qgroup_list *glist; qgroup->rfer += sign * num_bytes; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e58151933844..7256f6748c8f 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -602,6 +602,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, if (btrfs_root_id(root) == objectid) { u64 commit_root_gen; + /* + * Relocation will wait for cleaner thread, and any half-dropped + * subvolume will be fully cleaned up at mount time. + * So here we shouldn't hit a subvolume with non-zero drop_progress. + * + * If this isn't the case, error out since it can make us attempt to + * drop references for extents that were already dropped before. + */ + if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) { + struct btrfs_key cpu_key; + + btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress); + btrfs_err(fs_info, + "cannot relocate partially dropped subvolume %llu, drop progress key (%llu %u %llu)", + objectid, cpu_key.objectid, cpu_key.type, cpu_key.offset); + ret = -EUCLEAN; + goto fail; + } + /* called by btrfs_init_reloc_root */ ret = btrfs_copy_root(trans, root, root->commit_root, &eb, BTRFS_TREE_RELOC_OBJECTID); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 2186e87fb61b..69e11557fd13 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2605,14 +2605,14 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, /* * Correctly adjust the reserved bytes occupied by a log tree extent buffer */ -static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) +static int unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) { struct btrfs_block_group *cache; cache = btrfs_lookup_block_group(fs_info, start); if (!cache) { btrfs_err(fs_info, "unable to find block group for %llu", start); - return; + return -ENOENT; } spin_lock(&cache->space_info->lock); @@ -2623,27 +2623,22 @@ static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) spin_unlock(&cache->space_info->lock); btrfs_put_block_group(cache); + + return 0; } static int clean_log_buffer(struct btrfs_trans_handle *trans, struct extent_buffer *eb) { - int ret; - btrfs_tree_lock(eb); btrfs_clear_buffer_dirty(trans, eb); wait_on_extent_buffer_writeback(eb); btrfs_tree_unlock(eb); - if (trans) { - ret = btrfs_pin_reserved_extent(trans, eb); - if (ret) - return ret; - } else { - unaccount_log_buffer(eb->fs_info, eb->start); - } + if (trans) + return btrfs_pin_reserved_extent(trans, eb); - return 0; + return unaccount_log_buffer(eb->fs_info, eb->start); } static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 245e813ecd78..db11b5b5f0e6 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -2650,7 +2650,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info) spin_lock(&block_group->lock); if (block_group->reserved || block_group->alloc_offset == 0 || - (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) || + !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) || test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { spin_unlock(&block_group->lock); continue; @@ -1743,6 +1743,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, loff_t done = 0; int ret; + if (WARN_ON_ONCE(iocb->ki_flags & IOCB_ATOMIC)) + return -EIO; + if (!iomi.len) return 0; diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index 7b26efc271ee..d81f3318417d 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -3,8 +3,18 @@ config EROFS_FS tristate "EROFS filesystem support" depends on BLOCK + select CACHEFILES if EROFS_FS_ONDEMAND select CRC32 + select CRYPTO if EROFS_FS_ZIP_ACCEL + select CRYPTO_DEFLATE if EROFS_FS_ZIP_ACCEL select FS_IOMAP + select LZ4_DECOMPRESS if EROFS_FS_ZIP + select NETFS_SUPPORT if EROFS_FS_ONDEMAND + select XXHASH if EROFS_FS_XATTR + select XZ_DEC if EROFS_FS_ZIP_LZMA + select XZ_DEC_MICROLZMA if EROFS_FS_ZIP_LZMA + select ZLIB_INFLATE if EROFS_FS_ZIP_DEFLATE + select ZSTD_DECOMPRESS if EROFS_FS_ZIP_ZSTD help EROFS (Enhanced Read-Only File System) is a lightweight read-only file system with modern designs (e.g. no buffer heads, inline @@ -38,7 +48,6 @@ config EROFS_FS_DEBUG config EROFS_FS_XATTR bool "EROFS extended attributes" depends on EROFS_FS - select XXHASH default y help Extended attributes are name:value pairs associated with inodes by @@ -94,7 +103,6 @@ config EROFS_FS_BACKED_BY_FILE config EROFS_FS_ZIP bool "EROFS Data Compression Support" depends on EROFS_FS - select LZ4_DECOMPRESS default y help Enable transparent compression support for EROFS file systems. @@ -104,8 +112,6 @@ config EROFS_FS_ZIP config EROFS_FS_ZIP_LZMA bool "EROFS LZMA compressed data support" depends on EROFS_FS_ZIP - select XZ_DEC - select XZ_DEC_MICROLZMA help Saying Y here includes support for reading EROFS file systems containing LZMA compressed data, specifically called microLZMA. It @@ -117,7 +123,6 @@ config EROFS_FS_ZIP_LZMA config EROFS_FS_ZIP_DEFLATE bool "EROFS DEFLATE compressed data support" depends on EROFS_FS_ZIP - select ZLIB_INFLATE help Saying Y here includes support for reading EROFS file systems containing DEFLATE compressed data. It gives better compression @@ -132,7 +137,6 @@ config EROFS_FS_ZIP_DEFLATE config EROFS_FS_ZIP_ZSTD bool "EROFS Zstandard compressed data support" depends on EROFS_FS_ZIP - select ZSTD_DECOMPRESS help Saying Y here includes support for reading EROFS file systems containing Zstandard compressed data. It gives better compression @@ -147,8 +151,6 @@ config EROFS_FS_ZIP_ZSTD config EROFS_FS_ZIP_ACCEL bool "EROFS hardware decompression support" depends on EROFS_FS_ZIP - select CRYPTO - select CRYPTO_DEFLATE help Saying Y here includes hardware accelerator support for reading EROFS file systems containing compressed data. It gives better @@ -163,9 +165,7 @@ config EROFS_FS_ZIP_ACCEL config EROFS_FS_ONDEMAND bool "EROFS fscache-based on-demand read support (deprecated)" depends on EROFS_FS - select NETFS_SUPPORT select FSCACHE - select CACHEFILES select CACHEFILES_ONDEMAND help This permits EROFS to use fscache-backed data blobs with on-demand diff --git a/fs/erofs/super.c b/fs/erofs/super.c index e1020aa60771..1b529ace4db0 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -174,6 +174,11 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, if (!erofs_is_fileio_mode(sbi)) { dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file), &dif->dax_part_off, NULL, NULL); + if (!dif->dax_dev && test_opt(&sbi->opt, DAX_ALWAYS)) { + erofs_info(sb, "DAX unsupported by %s. Turning off DAX.", + dif->path); + clear_opt(&sbi->opt, DAX_ALWAYS); + } } else if (!S_ISREG(file_inode(file)->i_mode)) { fput(file); return -EINVAL; @@ -210,8 +215,13 @@ static int erofs_scan_devices(struct super_block *sb, ondisk_extradevs, sbi->devs->extra_devices); return -EINVAL; } - if (!ondisk_extradevs) + if (!ondisk_extradevs) { + if (test_opt(&sbi->opt, DAX_ALWAYS) && !sbi->dif0.dax_dev) { + erofs_info(sb, "DAX unsupported by block device. Turning off DAX."); + clear_opt(&sbi->opt, DAX_ALWAYS); + } return 0; + } if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb)) sbi->devs->flatdev = true; @@ -313,8 +323,8 @@ static int erofs_read_superblock(struct super_block *sb) sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); if (erofs_sb_has_48bit(sbi) && dsb->rootnid_8b) { sbi->root_nid = le64_to_cpu(dsb->rootnid_8b); - sbi->dif0.blocks = (sbi->dif0.blocks << 32) | - le16_to_cpu(dsb->rb.blocks_hi); + sbi->dif0.blocks = sbi->dif0.blocks | + ((u64)le16_to_cpu(dsb->rb.blocks_hi) << 32); } else { sbi->root_nid = le16_to_cpu(dsb->rb.rootnid_2b); } @@ -338,7 +348,6 @@ static int erofs_read_superblock(struct super_block *sb) if (ret < 0) goto out; - /* handle multiple devices */ ret = erofs_scan_devices(sb, dsb); if (erofs_sb_has_48bit(sbi)) @@ -671,14 +680,9 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) return invalfc(fc, "cannot use fsoffset in fscache mode"); } - if (test_opt(&sbi->opt, DAX_ALWAYS)) { - if (!sbi->dif0.dax_dev) { - errorfc(fc, "DAX unsupported by block device. Turning off DAX."); - clear_opt(&sbi->opt, DAX_ALWAYS); - } else if (sbi->blkszbits != PAGE_SHIFT) { - errorfc(fc, "unsupported blocksize for DAX"); - clear_opt(&sbi->opt, DAX_ALWAYS); - } + if (test_opt(&sbi->opt, DAX_ALWAYS) && sbi->blkszbits != PAGE_SHIFT) { + erofs_info(sb, "unsupported blocksize for DAX"); + clear_opt(&sbi->opt, DAX_ALWAYS); } sb->s_time_gran = 1; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 792f20888a8f..2d73297003d2 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1432,6 +1432,16 @@ static void z_erofs_decompressqueue_kthread_work(struct kthread_work *work) } #endif +/* Use (kthread_)work in atomic contexts to minimize scheduling overhead */ +static inline bool z_erofs_in_atomic(void) +{ + if (IS_ENABLED(CONFIG_PREEMPTION) && rcu_preempt_depth()) + return true; + if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) + return true; + return !preemptible(); +} + static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, int bios) { @@ -1446,8 +1456,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, if (atomic_add_return(bios, &io->pending_bios)) return; - /* Use (kthread_)work and sync decompression for atomic contexts only */ - if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) { + if (z_erofs_in_atomic()) { #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD struct kthread_worker *worker; diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c index 4f6468eb2adf..cb237f1b902a 100644 --- a/fs/nfsd/localio.c +++ b/fs/nfsd/localio.c @@ -103,10 +103,11 @@ nfsd_open_local_fh(struct net *net, struct auth_domain *dom, if (nfsd_file_get(new) == NULL) goto again; /* - * Drop the ref we were going to install and the - * one we were going to return. + * Drop the ref we were going to install (both file and + * net) and the one we were going to return (only file). */ nfsd_file_put(localio); + nfsd_net_put(net); nfsd_file_put(localio); localio = new; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 98ab55ba3ced..edf050766e57 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -470,7 +470,15 @@ static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap) if (!iap->ia_valid) return 0; - iap->ia_valid |= ATTR_CTIME; + /* + * If ATTR_DELEG is set, then this is an update from a client that + * holds a delegation. If this is an update for only the atime, the + * ctime should not be changed. If the update contains the mtime + * too, then ATTR_CTIME should already be set. + */ + if (!(iap->ia_valid & ATTR_DELEG)) + iap->ia_valid |= ATTR_CTIME; + return notify_change(&nop_mnt_idmap, dentry, iap, NULL); } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 3d6d8a9f13fc..29cca0e6d0ff 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -340,8 +340,8 @@ static int proc_maps_open(struct inode *inode, struct file *file, priv->inode = inode; priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); - if (IS_ERR_OR_NULL(priv->mm)) { - int err = priv->mm ? PTR_ERR(priv->mm) : -ESRCH; + if (IS_ERR(priv->mm)) { + int err = PTR_ERR(priv->mm); seq_release_private(inode, file); return err; @@ -1148,10 +1148,13 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; - pte_t ptent = huge_ptep_get(walk->mm, addr, pte); struct folio *folio = NULL; bool present = false; + spinlock_t *ptl; + pte_t ptent; + ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); + ptent = huge_ptep_get(walk->mm, addr, pte); if (pte_present(ptent)) { folio = page_folio(pte_page(ptent)); present = true; @@ -1170,6 +1173,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, else mss->private_hugetlb += huge_page_size(hstate_vma(vma)); } + spin_unlock(ptl); return 0; } #else @@ -2017,12 +2021,14 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, struct pagemapread *pm = walk->private; struct vm_area_struct *vma = walk->vma; u64 flags = 0, frame = 0; + spinlock_t *ptl; int err = 0; pte_t pte; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; + ptl = huge_pte_lock(hstate_vma(vma), walk->mm, ptep); pte = huge_ptep_get(walk->mm, addr, ptep); if (pte_present(pte)) { struct folio *folio = page_folio(pte_page(pte)); @@ -2050,11 +2056,12 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, err = add_to_pagemap(&pme, pm); if (err) - return err; + break; if (pm->show_pfn && (flags & PM_PRESENT)) frame++; } + spin_unlock(ptl); cond_resched(); return err; @@ -3128,17 +3135,22 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { - pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte); + pte_t huge_pte; struct numa_maps *md; struct page *page; + spinlock_t *ptl; + ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); + huge_pte = huge_ptep_get(walk->mm, addr, pte); if (!pte_present(huge_pte)) - return 0; + goto out; page = pte_page(huge_pte); md = walk->private; gather_stats(page, md, pte_dirty(huge_pte), 1); +out: + spin_unlock(ptl); return 0; } diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c index bc1c1e9b288a..43b86fa4d695 100644 --- a/fs/smb/client/cifs_spnego.c +++ b/fs/smb/client/cifs_spnego.c @@ -124,55 +124,44 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo, dp = description; /* start with version and hostname portion of UNC string */ spnego_key = ERR_PTR(-EINVAL); - sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION, - hostname); - dp = description + strlen(description); + dp += sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION, + hostname); /* add the server address */ if (server->dstaddr.ss_family == AF_INET) - sprintf(dp, "ip4=%pI4", &sa->sin_addr); + dp += sprintf(dp, "ip4=%pI4", &sa->sin_addr); else if (server->dstaddr.ss_family == AF_INET6) - sprintf(dp, "ip6=%pI6", &sa6->sin6_addr); + dp += sprintf(dp, "ip6=%pI6", &sa6->sin6_addr); else goto out; - dp = description + strlen(description); - /* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */ if (server->sec_kerberos) - sprintf(dp, ";sec=krb5"); + dp += sprintf(dp, ";sec=krb5"); else if (server->sec_mskerberos) - sprintf(dp, ";sec=mskrb5"); + dp += sprintf(dp, ";sec=mskrb5"); else if (server->sec_iakerb) - sprintf(dp, ";sec=iakerb"); + dp += sprintf(dp, ";sec=iakerb"); else { cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n"); - sprintf(dp, ";sec=krb5"); + dp += sprintf(dp, ";sec=krb5"); } - dp = description + strlen(description); - sprintf(dp, ";uid=0x%x", - from_kuid_munged(&init_user_ns, sesInfo->linux_uid)); + dp += sprintf(dp, ";uid=0x%x", + from_kuid_munged(&init_user_ns, sesInfo->linux_uid)); - dp = description + strlen(description); - sprintf(dp, ";creduid=0x%x", + dp += sprintf(dp, ";creduid=0x%x", from_kuid_munged(&init_user_ns, sesInfo->cred_uid)); - if (sesInfo->user_name) { - dp = description + strlen(description); - sprintf(dp, ";user=%s", sesInfo->user_name); - } + if (sesInfo->user_name) + dp += sprintf(dp, ";user=%s", sesInfo->user_name); - dp = description + strlen(description); - sprintf(dp, ";pid=0x%x", current->pid); + dp += sprintf(dp, ";pid=0x%x", current->pid); - if (sesInfo->upcall_target == UPTARGET_MOUNT) { - dp = description + strlen(description); - sprintf(dp, ";upcall_target=mount"); - } else { - dp = description + strlen(description); - sprintf(dp, ";upcall_target=app"); - } + if (sesInfo->upcall_target == UPTARGET_MOUNT) + dp += sprintf(dp, ";upcall_target=mount"); + else + dp += sprintf(dp, ";upcall_target=app"); cifs_dbg(FYI, "key description = %s\n", description); saved_cred = override_creds(spnego_cred); diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h index 487f39cff77e..3ce7c614ccc0 100644 --- a/fs/smb/client/cifsfs.h +++ b/fs/smb/client/cifsfs.h @@ -145,6 +145,6 @@ extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ /* when changing internal version - update following two lines at same time */ -#define SMB3_PRODUCT_BUILD 55 -#define CIFS_VERSION "2.55" +#define SMB3_PRODUCT_BUILD 56 +#define CIFS_VERSION "2.56" #endif /* _CIFSFS_H */ diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index e6830ab3a546..1e64a4fb6af0 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -1732,6 +1732,7 @@ struct mid_q_entry { int mid_rc; /* rc for MID_RC */ __le16 command; /* smb command code */ unsigned int optype; /* operation type */ + spinlock_t mid_lock; bool wait_cancelled:1; /* Cancelled while waiting for response */ bool deleted_from_q:1; /* Whether Mid has been dequeued frem pending_mid_q */ bool large_buf:1; /* if valid response, is pointer to large buf */ @@ -2036,6 +2037,9 @@ require use of the stronger protocol */ * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo * ->invalidHandle initiate_cifs_search * ->oplock_break_cancelled + * mid_q_entry->mid_lock mid_q_entry->callback alloc_mid + * smb2_mid_entry_alloc + * (Any fields of mid_q_entry that will need protection) ****************************************************************************/ #ifdef DECLARE_GLOBALS_HERE @@ -2375,6 +2379,23 @@ static inline bool cifs_netbios_name(const char *name, size_t namelen) return ret; } +/* + * Execute mid callback atomically - ensures callback runs exactly once + * and prevents sleeping in atomic context. + */ +static inline void mid_execute_callback(struct mid_q_entry *mid) +{ + void (*callback)(struct mid_q_entry *mid); + + spin_lock(&mid->mid_lock); + callback = mid->callback; + mid->callback = NULL; /* Mark as executed, */ + spin_unlock(&mid->mid_lock); + + if (callback) + callback(mid); +} + #define CIFS_REPARSE_SUPPORT(tcon) \ ((tcon)->posix_extensions || \ (le32_to_cpu((tcon)->fsAttrInfo.Attributes) & \ diff --git a/fs/smb/client/cifstransport.c b/fs/smb/client/cifstransport.c index 352dafb888dd..e98b95eff8c9 100644 --- a/fs/smb/client/cifstransport.c +++ b/fs/smb/client/cifstransport.c @@ -46,6 +46,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); memset(temp, 0, sizeof(struct mid_q_entry)); kref_init(&temp->refcount); + spin_lock_init(&temp->mid_lock); temp->mid = get_mid(smb_buffer); temp->pid = current->pid; temp->command = cpu_to_le16(smb_buffer->Command); @@ -345,16 +346,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = wait_for_response(server, midQ); if (rc != 0) { send_cancel(server, &rqst, midQ); - spin_lock(&server->mid_queue_lock); - if (midQ->mid_state == MID_REQUEST_SUBMITTED || - midQ->mid_state == MID_RESPONSE_RECEIVED) { + spin_lock(&midQ->mid_lock); + if (midQ->callback) { /* no longer considered to be "in-flight" */ midQ->callback = release_mid; - spin_unlock(&server->mid_queue_lock); + spin_unlock(&midQ->mid_lock); add_credits(server, &credits, 0); return rc; } - spin_unlock(&server->mid_queue_lock); + spin_unlock(&midQ->mid_lock); } rc = cifs_sync_mid_result(midQ, server); @@ -527,15 +527,14 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, rc = wait_for_response(server, midQ); if (rc) { send_cancel(server, &rqst, midQ); - spin_lock(&server->mid_queue_lock); - if (midQ->mid_state == MID_REQUEST_SUBMITTED || - midQ->mid_state == MID_RESPONSE_RECEIVED) { + spin_lock(&midQ->mid_lock); + if (midQ->callback) { /* no longer considered to be "in-flight" */ midQ->callback = release_mid; - spin_unlock(&server->mid_queue_lock); + spin_unlock(&midQ->mid_lock); return rc; } - spin_unlock(&server->mid_queue_lock); + spin_unlock(&midQ->mid_lock); } /* We got the response - restart system call. */ diff --git a/fs/smb/client/compress.c b/fs/smb/client/compress.c index 766b4de13da7..db709f5cd2e1 100644 --- a/fs/smb/client/compress.c +++ b/fs/smb/client/compress.c @@ -155,58 +155,29 @@ static int cmp_bkt(const void *_a, const void *_b) } /* - * TODO: - * Support other iter types, if required. - * Only ITER_XARRAY is supported for now. + * Collect some 2K samples with 2K gaps between. */ -static int collect_sample(const struct iov_iter *iter, ssize_t max, u8 *sample) +static int collect_sample(const struct iov_iter *source, ssize_t max, u8 *sample) { - struct folio *folios[16], *folio; - unsigned int nr, i, j, npages; - loff_t start = iter->xarray_start + iter->iov_offset; - pgoff_t last, index = start / PAGE_SIZE; - size_t len, off, foff; - void *p; - int s = 0; - - last = (start + max - 1) / PAGE_SIZE; - do { - nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios), - XA_PRESENT); - if (nr == 0) - return -EIO; - - for (i = 0; i < nr; i++) { - folio = folios[i]; - npages = folio_nr_pages(folio); - foff = start - folio_pos(folio); - off = foff % PAGE_SIZE; - - for (j = foff / PAGE_SIZE; j < npages; j++) { - size_t len2; - - len = min_t(size_t, max, PAGE_SIZE - off); - len2 = min_t(size_t, len, SZ_2K); - - p = kmap_local_page(folio_page(folio, j)); - memcpy(&sample[s], p, len2); - kunmap_local(p); - - s += len2; - - if (len2 < SZ_2K || s >= max - SZ_2K) - return s; - - max -= len; - if (max <= 0) - return s; - - start += len; - off = 0; - index++; - } - } - } while (nr == ARRAY_SIZE(folios)); + struct iov_iter iter = *source; + size_t s = 0; + + while (iov_iter_count(&iter) >= SZ_2K) { + size_t part = umin(umin(iov_iter_count(&iter), SZ_2K), max); + size_t n; + + n = copy_from_iter(sample + s, part, &iter); + if (n != part) + return -EFAULT; + + s += n; + max -= n; + + if (iov_iter_count(&iter) < PAGE_SIZE - SZ_2K) + break; + + iov_iter_advance(&iter, SZ_2K); + } return s; } diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 587845a2452d..dd12f3eb61dc 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -335,7 +335,7 @@ cifs_abort_connection(struct TCP_Server_Info *server) cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { list_del_init(&mid->qhead); - mid->callback(mid); + mid_execute_callback(mid); release_mid(mid); } @@ -919,7 +919,7 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type) list_del_init(&mid->qhead); mid->mid_rc = mid_rc; mid->mid_state = MID_RC; - mid->callback(mid); + mid_execute_callback(mid); release_mid(mid); } @@ -1117,7 +1117,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server) mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); + mid_execute_callback(mid_entry); release_mid(mid_entry); } /* 1/8th of sec is more than enough time for them to exit */ @@ -1394,7 +1394,7 @@ next_pdu: } if (!mids[i]->multiRsp || mids[i]->multiEnd) - mids[i]->callback(mids[i]); + mid_execute_callback(mids[i]); release_mid(mids[i]); } else if (server->ops->is_oplock_break && @@ -4205,7 +4205,6 @@ retry: return 0; } - server->lstrp = jiffies; server->tcpStatus = CifsInNegotiate; server->neg_start = jiffies; spin_unlock(&server->srv_lock); diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index 75be4b46bc6f..fe453a4b3dc8 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -1943,15 +1943,24 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *tcon; + __u32 dosattr = 0, origattr = 0; struct TCP_Server_Info *server; struct iattr *attrs = NULL; - __u32 dosattr = 0, origattr = 0; + bool rehash = false; cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry); if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; + /* Unhash dentry in advance to prevent any concurrent opens */ + spin_lock(&dentry->d_lock); + if (!d_unhashed(dentry)) { + __d_drop(dentry); + rehash = true; + } + spin_unlock(&dentry->d_lock); + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -2003,7 +2012,8 @@ psx_del_no_retry: cifs_drop_nlink(inode); } } else if (rc == -ENOENT) { - d_drop(dentry); + if (simple_positive(dentry)) + d_delete(dentry); } else if (rc == -EBUSY) { if (server->ops->rename_pending_delete) { rc = server->ops->rename_pending_delete(full_path, @@ -2056,6 +2066,8 @@ unlink_out: kfree(attrs); free_xid(xid); cifs_put_tlink(tlink); + if (rehash) + d_rehash(dentry); return rc; } @@ -2462,6 +2474,7 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; + bool rehash = false; unsigned int xid; int rc, tmprc; int retry_count = 0; @@ -2477,6 +2490,17 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; + /* + * Prevent any concurrent opens on the target by unhashing the dentry. + * VFS already unhashes the target when renaming directories. + */ + if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) { + if (!d_unhashed(target_dentry)) { + d_drop(target_dentry); + rehash = true; + } + } + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -2518,6 +2542,8 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, } } + if (!rc) + rehash = false; /* * No-replace is the natural behavior for CIFS, so skip unlink hacks. */ @@ -2576,12 +2602,16 @@ unlink_target: goto cifs_rename_exit; rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, to_name); + if (!rc) + rehash = false; } /* force revalidate to go get info when needed */ CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; cifs_rename_exit: + if (rehash) + d_rehash(target_dentry); kfree(info_buf_source); free_dentry_path(page2); free_dentry_path(page1); diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index ad8947434b71..3b251de874ec 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -772,6 +772,13 @@ next_iface: bytes_left -= sizeof(*p); break; } + /* Validate that Next doesn't point beyond the buffer */ + if (next > bytes_left) { + cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n", + __func__, next, bytes_left); + rc = -EINVAL; + goto out; + } p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next); bytes_left -= next; } @@ -783,7 +790,9 @@ next_iface: } /* Azure rounds the buffer size up 8, to a 16 byte boundary */ - if ((bytes_left > 8) || p->Next) + if ((bytes_left > 8) || + (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next) + + sizeof(p->Next) && p->Next)) cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); ses->iface_last_update = jiffies; @@ -4805,7 +4814,7 @@ static void smb2_decrypt_offload(struct work_struct *work) dw->server->ops->is_network_name_deleted(dw->buf, dw->server); - mid->callback(mid); + mid_execute_callback(mid); } else { spin_lock(&dw->server->srv_lock); if (dw->server->tcpStatus == CifsNeedReconnect) { @@ -4813,7 +4822,7 @@ static void smb2_decrypt_offload(struct work_struct *work) mid->mid_state = MID_RETRY_NEEDED; spin_unlock(&dw->server->mid_queue_lock); spin_unlock(&dw->server->srv_lock); - mid->callback(mid); + mid_execute_callback(mid); } else { spin_lock(&dw->server->mid_queue_lock); mid->mid_state = MID_REQUEST_SUBMITTED; diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c index ff9ef7fcd010..bc0e92eb2b64 100644 --- a/fs/smb/client/smb2transport.c +++ b/fs/smb/client/smb2transport.c @@ -771,6 +771,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *shdr, temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); memset(temp, 0, sizeof(struct mid_q_entry)); kref_init(&temp->refcount); + spin_lock_init(&temp->mid_lock); temp->mid = le64_to_cpu(shdr->MessageId); temp->credits = credits > 0 ? credits : 1; temp->pid = current->pid; diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c index c628e91c328b..02d6db431fd4 100644 --- a/fs/smb/client/smbdirect.c +++ b/fs/smb/client/smbdirect.c @@ -1337,10 +1337,6 @@ void smbd_destroy(struct TCP_Server_Info *server) log_rdma_event(INFO, "cancelling idle timer\n"); cancel_delayed_work_sync(&info->idle_timer_work); - log_rdma_event(INFO, "wait for all send posted to IB to finish\n"); - wait_event(info->wait_send_pending, - atomic_read(&info->send_pending) == 0); - /* It's not possible for upper layer to get to reassembly */ log_rdma_event(INFO, "drain the reassembly queue\n"); do { @@ -1986,7 +1982,11 @@ int smbd_send(struct TCP_Server_Info *server, */ wait_event(info->wait_send_pending, - atomic_read(&info->send_pending) == 0); + atomic_read(&info->send_pending) == 0 || + sc->status != SMBDIRECT_SOCKET_CONNECTED); + + if (sc->status != SMBDIRECT_SOCKET_CONNECTED && rc == 0) + rc = -EAGAIN; return rc; } diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c index 32d528b4dd83..a61ba7f3fb86 100644 --- a/fs/smb/client/transport.c +++ b/fs/smb/client/transport.c @@ -1005,15 +1005,14 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", midQ[i]->mid, le16_to_cpu(midQ[i]->command)); send_cancel(server, &rqst[i], midQ[i]); - spin_lock(&server->mid_queue_lock); + spin_lock(&midQ[i]->mid_lock); midQ[i]->wait_cancelled = true; - if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED || - midQ[i]->mid_state == MID_RESPONSE_RECEIVED) { + if (midQ[i]->callback) { midQ[i]->callback = cifs_cancelled_callback; cancelled_mid[i] = true; credits[i].value = 0; } - spin_unlock(&server->mid_queue_lock); + spin_unlock(&midQ[i]->mid_lock); } } diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 1e6e9c10cea2..a8187281eb96 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -479,7 +479,7 @@ DECLARE_EVENT_CLASS(xchk_dqiter_class, __field(xfs_exntst_t, state) ), TP_fast_assign( - __entry->dev = cursor->sc->ip->i_mount->m_super->s_dev; + __entry->dev = cursor->sc->mp->m_super->s_dev; __entry->dqtype = cursor->dqtype; __entry->ino = cursor->quota_ip->i_ino; __entry->cur_id = cursor->id; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 55a304cb3aef..f96fbf5c54c9 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1101,9 +1101,6 @@ xfs_file_write_iter( if (xfs_is_shutdown(ip->i_mount)) return -EIO; - if (IS_DAX(inode)) - return xfs_file_dax_write(iocb, from); - if (iocb->ki_flags & IOCB_ATOMIC) { if (ocount < xfs_get_atomic_write_min(ip)) return -EINVAL; @@ -1116,6 +1113,9 @@ xfs_file_write_iter( return ret; } + if (IS_DAX(inode)) + return xfs_file_dax_write(iocb, from); + if (iocb->ki_flags & IOCB_DIRECT) { /* * Allow a directio write to fall back to a buffered diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 07fbdcc4cbf5..bd6d33557194 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -358,9 +358,20 @@ static inline bool xfs_inode_has_bigrtalloc(const struct xfs_inode *ip) static inline bool xfs_inode_can_hw_atomic_write(const struct xfs_inode *ip) { + if (IS_DAX(VFS_IC(ip))) + return false; + return xfs_inode_buftarg(ip)->bt_awu_max > 0; } +static inline bool xfs_inode_can_sw_atomic_write(const struct xfs_inode *ip) +{ + if (IS_DAX(VFS_IC(ip))) + return false; + + return xfs_can_sw_atomic_write(ip->i_mount); +} + /* * In-core inode flags. */ diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index fe1f74a3b6a3..e1051a530a50 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -219,7 +219,7 @@ xfs_bulk_ireq_setup( else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno) return -EINVAL; - breq->flags |= XFS_IBULK_SAME_AG; + breq->iwalk_flags |= XFS_IWALK_SAME_AG; /* Asking for an inode past the end of the AG? We're done! */ if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno) diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 149b5460fbfd..603effabe1ee 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -616,7 +616,8 @@ xfs_get_atomic_write_min( * write of exactly one single fsblock if the bdev will make that * guarantee for us. */ - if (xfs_inode_can_hw_atomic_write(ip) || xfs_can_sw_atomic_write(mp)) + if (xfs_inode_can_hw_atomic_write(ip) || + xfs_inode_can_sw_atomic_write(ip)) return mp->m_sb.sb_blocksize; return 0; @@ -633,7 +634,7 @@ xfs_get_atomic_write_max( * write of exactly one single fsblock if the bdev will make that * guarantee for us. */ - if (!xfs_can_sw_atomic_write(mp)) { + if (!xfs_inode_can_sw_atomic_write(ip)) { if (xfs_inode_can_hw_atomic_write(ip)) return mp->m_sb.sb_blocksize; return 0; diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index c8c9b8d8309f..2aa37a4d2706 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -307,7 +307,6 @@ xfs_bulkstat( .breq = breq, }; struct xfs_trans *tp; - unsigned int iwalk_flags = 0; int error; if (breq->idmap != &nop_mnt_idmap) { @@ -328,10 +327,7 @@ xfs_bulkstat( * locking abilities to detect cycles in the inobt without deadlocking. */ tp = xfs_trans_alloc_empty(breq->mp); - if (breq->flags & XFS_IBULK_SAME_AG) - iwalk_flags |= XFS_IWALK_SAME_AG; - - error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags, + error = xfs_iwalk(breq->mp, tp, breq->startino, breq->iwalk_flags, xfs_bulkstat_iwalk, breq->icount, &bc); xfs_trans_cancel(tp); kfree(bc.buf); @@ -457,7 +453,7 @@ xfs_inumbers( * locking abilities to detect cycles in the inobt without deadlocking. */ tp = xfs_trans_alloc_empty(breq->mp); - error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags, + error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->iwalk_flags, xfs_inumbers_walk, breq->icount, &ic); xfs_trans_cancel(tp); diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index f10e8f8f2335..2d0612f14d6e 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h @@ -13,17 +13,15 @@ struct xfs_ibulk { xfs_ino_t startino; /* start with this inode */ unsigned int icount; /* number of elements in ubuffer */ unsigned int ocount; /* number of records returned */ - unsigned int flags; /* see XFS_IBULK_FLAG_* */ + unsigned int flags; /* XFS_IBULK_FLAG_* */ + unsigned int iwalk_flags; /* XFS_IWALK_FLAG_* */ }; -/* Only iterate within the same AG as startino */ -#define XFS_IBULK_SAME_AG (1U << 0) - /* Fill out the bs_extents64 field if set. */ -#define XFS_IBULK_NREXT64 (1U << 1) +#define XFS_IBULK_NREXT64 (1U << 0) /* Signal that we can return metadata directories. */ -#define XFS_IBULK_METADIR (1U << 2) +#define XFS_IBULK_METADIR (1U << 1) /* * Advance the user buffer pointer by one record of the given size. If the diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 2133fbaf1766..dc32c5e34d81 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -779,6 +779,25 @@ xfs_set_max_atomic_write_opt( return -EINVAL; } + if (xfs_has_reflink(mp)) + goto set_limit; + + if (new_max_fsbs == 1) { + if (mp->m_ddev_targp->bt_awu_max || + (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_awu_max)) { + } else { + xfs_warn(mp, + "cannot support atomic writes of size %lluk with no reflink or HW support", + new_max_bytes >> 10); + return -EINVAL; + } + } else { + xfs_warn(mp, + "cannot support atomic writes of size %lluk with no reflink support", + new_max_bytes >> 10); + return -EINVAL; + } + set_limit: error = xfs_calc_atomic_write_reservation(mp, new_max_fsbs); if (error) { diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index e1794e3e3156..ac344e42846c 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -455,6 +455,7 @@ DEFINE_EVENT(xfs_zone_alloc_class, name, \ xfs_extlen_t len), \ TP_ARGS(oz, rgbno, len)) DEFINE_ZONE_ALLOC_EVENT(xfs_zone_record_blocks); +DEFINE_ZONE_ALLOC_EVENT(xfs_zone_skip_blocks); DEFINE_ZONE_ALLOC_EVENT(xfs_zone_alloc_blocks); TRACE_EVENT(xfs_zone_gc_select_victim, diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index ece374d622b3..575e7028f423 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -253,8 +253,8 @@ xfs_trans_alloc( * by doing GFP_KERNEL allocations inside sb_start_intwrite(). */ retry: - WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); tp = __xfs_trans_alloc(mp, flags); + WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); error = xfs_trans_reserve(tp, resp, blocks, rtextents); if (error == -ENOSPC && want_retry) { xfs_trans_cancel(tp); diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c index 33f7eee521a8..f8bd6d741755 100644 --- a/fs/xfs/xfs_zone_alloc.c +++ b/fs/xfs/xfs_zone_alloc.c @@ -166,10 +166,9 @@ xfs_open_zone_mark_full( static void xfs_zone_record_blocks( struct xfs_trans *tp, - xfs_fsblock_t fsbno, - xfs_filblks_t len, struct xfs_open_zone *oz, - bool used) + xfs_fsblock_t fsbno, + xfs_filblks_t len) { struct xfs_mount *mp = tp->t_mountp; struct xfs_rtgroup *rtg = oz->oz_rtg; @@ -179,18 +178,37 @@ xfs_zone_record_blocks( xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP); - if (used) { - rmapip->i_used_blocks += len; - ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg)); - } else { - xfs_add_frextents(mp, len); - } + rmapip->i_used_blocks += len; + ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg)); oz->oz_written += len; if (oz->oz_written == rtg_blocks(rtg)) xfs_open_zone_mark_full(oz); xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE); } +/* + * Called for blocks that have been written to disk, but not actually linked to + * an inode, which can happen when garbage collection races with user data + * writes to a file. + */ +static void +xfs_zone_skip_blocks( + struct xfs_open_zone *oz, + xfs_filblks_t len) +{ + struct xfs_rtgroup *rtg = oz->oz_rtg; + + trace_xfs_zone_skip_blocks(oz, 0, len); + + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); + oz->oz_written += len; + if (oz->oz_written == rtg_blocks(rtg)) + xfs_open_zone_mark_full(oz); + xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP); + + xfs_add_frextents(rtg_mount(rtg), len); +} + static int xfs_zoned_map_extent( struct xfs_trans *tp, @@ -250,8 +268,7 @@ xfs_zoned_map_extent( } } - xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz, - true); + xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount); /* Map the new blocks into the data fork. */ xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new); @@ -259,8 +276,7 @@ xfs_zoned_map_extent( skip: trace_xfs_reflink_cow_remap_skip(ip, new); - xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz, - false); + xfs_zone_skip_blocks(oz, new->br_blockcount); return 0; } diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 8ed80cad77ec..8d9d4fd078e7 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -23,6 +23,7 @@ #ifndef __DRM_BRIDGE_H__ #define __DRM_BRIDGE_H__ +#include <linux/cleanup.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/mutex.h> @@ -866,13 +867,61 @@ struct drm_bridge_funcs { struct drm_connector *connector, bool enable, int direction); + /** + * @hdmi_cec_init: + * + * Initialize CEC part of the bridge. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their + * &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ int (*hdmi_cec_init)(struct drm_bridge *bridge, struct drm_connector *connector); + /** + * @hdmi_cec_enable: + * + * Enable or disable the CEC adapter inside the bridge. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their + * &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable); + /** + * @hdmi_cec_log_addr: + * + * Set the logical address of the CEC adapter inside the bridge. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their + * &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ int (*hdmi_cec_log_addr)(struct drm_bridge *bridge, u8 logical_addr); + /** + * @hdmi_cec_transmit: + * + * Transmit the message using the CEC adapter inside the bridge. + * + * This callback is optional, it can be implemented by bridges that + * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their + * &drm_bridge->ops. + * + * Returns: + * 0 on success, a negative error code otherwise + */ int (*hdmi_cec_transmit)(struct drm_bridge *bridge, u8 attempts, u32 signal_free_time, struct cec_msg *msg); @@ -1123,6 +1172,10 @@ struct drm_bridge { */ bool pre_enable_prev_first; /** + * @support_hdcp: Indicate that the bridge supports HDCP. + */ + bool support_hdcp; + /** * @ddc: Associated I2C adapter for DDC access, if any. */ struct i2c_adapter *ddc; @@ -1228,6 +1281,9 @@ drm_priv_to_bridge(struct drm_private_obj *priv) struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge); void drm_bridge_put(struct drm_bridge *bridge); +/* Cleanup action for use with __free() */ +DEFINE_FREE(drm_bridge_put, struct drm_bridge *, if (_T) drm_bridge_put(_T)) + void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, const struct drm_bridge_funcs *funcs); @@ -1317,6 +1373,13 @@ drm_bridge_get_next_bridge(struct drm_bridge *bridge) * drm_bridge_get_prev_bridge() - Get the previous bridge in the chain * @bridge: bridge object * + * The caller is responsible of having a reference to @bridge via + * drm_bridge_get() or equivalent. This function leaves the refcount of + * @bridge unmodified. + * + * The refcount of the returned bridge is incremented. Use drm_bridge_put() + * when done with it. + * * RETURNS: * the previous bridge in the chain, or NULL if @bridge is the first. */ @@ -1326,13 +1389,16 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge) if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) return NULL; - return list_prev_entry(bridge, chain_node); + return drm_bridge_get(list_prev_entry(bridge, chain_node)); } /** * drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain * @encoder: encoder object * + * The refcount of the returned bridge is incremented. Use drm_bridge_put() + * when done with it. + * * RETURNS: * the first bridge in the chain, or NULL if @encoder has no bridge attached * to it. @@ -1340,8 +1406,8 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge) static inline struct drm_bridge * drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder) { - return list_first_entry_or_null(&encoder->bridge_chain, - struct drm_bridge, chain_node); + return drm_bridge_get(list_first_entry_or_null(&encoder->bridge_chain, + struct drm_bridge, chain_node)); } /** diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h index 4aedc5423aff..142fc2af1716 100644 --- a/include/drm/drm_gpusvm.h +++ b/include/drm/drm_gpusvm.h @@ -282,6 +282,10 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start, unsigned long end); +struct drm_gpusvm_notifier * +drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start, + unsigned long end); + struct drm_gpusvm_range * drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, unsigned long end); @@ -434,4 +438,70 @@ __drm_gpusvm_range_next(struct drm_gpusvm_range *range) (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ (range__) = __drm_gpusvm_range_next(range__)) +/** + * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier + * @range__: Iterator variable for the ranges + * @next__: Iterator variable for the ranges temporay storage + * @notifier__: Pointer to the GPU SVM notifier + * @start__: Start address of the range + * @end__: End address of the range + * + * This macro is used to iterate over GPU SVM ranges in a notifier while + * removing ranges from it. + */ +#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \ + for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \ + (next__) = __drm_gpusvm_range_next(range__); \ + (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ + (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__)) + +/** + * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list + * @notifier: a pointer to the current drm_gpusvm_notifier + * + * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if + * the current notifier is the last one or if the input notifier is + * NULL. + */ +static inline struct drm_gpusvm_notifier * +__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier) +{ + if (notifier && !list_is_last(¬ifier->entry, + ¬ifier->gpusvm->notifier_list)) + return list_next_entry(notifier, entry); + + return NULL; +} + +/** + * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm + * @notifier__: Iterator variable for the notifiers + * @gpusvm__: Pointer to the GPU SVM notifier + * @start__: Start address of the notifier + * @end__: End address of the notifier + * + * This macro is used to iterate over GPU SVM notifiers in a gpusvm. + */ +#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \ + for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \ + (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ + (notifier__) = __drm_gpusvm_notifier_next(notifier__)) + +/** + * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm + * @notifier__: Iterator variable for the notifiers + * @next__: Iterator variable for the notifiers temporay storage + * @gpusvm__: Pointer to the GPU SVM notifier + * @start__: Start address of the notifier + * @end__: End address of the notifier + * + * This macro is used to iterate over GPU SVM notifiers in a gpusvm while + * removing notifiers from it. + */ +#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \ + for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \ + (next__) = __drm_gpusvm_notifier_next(notifier__); \ + (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ + (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__)) + #endif /* __DRM_GPUSVM_H__ */ diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 274532facfd6..4a22b9d848f7 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -160,15 +160,6 @@ struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm, struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start); struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end); -static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset) -{ - va->va.addr = addr; - va->va.range = range; - va->gem.obj = obj; - va->gem.offset = offset; -} - /** * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is * invalidated @@ -1058,10 +1049,23 @@ struct drm_gpuva_ops { */ #define drm_gpuva_next_op(op) list_next_entry(op, entry) +/** + * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]() + */ +struct drm_gpuvm_map_req { + /** + * @op_map: struct drm_gpuva_op_map + */ + struct drm_gpuva_op_map map; +}; + struct drm_gpuva_ops * drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset); + const struct drm_gpuvm_map_req *req); +struct drm_gpuva_ops * +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req); + struct drm_gpuva_ops * drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range); @@ -1079,8 +1083,10 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, struct drm_gpuva_op_map *op) { - drm_gpuva_init(va, op->va.addr, op->va.range, - op->gem.obj, op->gem.offset); + va->va.addr = op->va.addr; + va->va.range = op->va.range; + va->gem.obj = op->gem.obj; + va->gem.offset = op->gem.offset; } /** @@ -1205,16 +1211,14 @@ struct drm_gpuvm_ops { }; int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset); + const struct drm_gpuvm_map_req *req); int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, u64 addr, u64 range); int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, unsigned int num_fences, - u64 req_addr, u64 req_range, - struct drm_gem_object *obj, u64 offset); + struct drm_gpuvm_map_req *req); int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, u64 req_addr, u64 req_range); diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 57a869a6f6e8..3aba7b380c8d 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -288,10 +288,12 @@ void mipi_dsi_picture_parameter_set_multi(struct mipi_dsi_multi_context *ctx, ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, size_t size); -int mipi_dsi_generic_write_chatty(struct mipi_dsi_device *dsi, - const void *payload, size_t size); void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx, const void *payload, size_t size); +void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx, + struct mipi_dsi_device *dsi1, + struct mipi_dsi_device *dsi2, + const void *payload, size_t size); ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, size_t num_params, void *data, size_t size); u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format); @@ -332,10 +334,16 @@ int mipi_dsi_dcs_write_buffer_chatty(struct mipi_dsi_device *dsi, const void *data, size_t len); void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx, const void *data, size_t len); +void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx, + struct mipi_dsi_device *dsi1, + struct mipi_dsi_device *dsi2, + const void *data, size_t len); ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd, const void *data, size_t len); ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, size_t len); +void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd, + void *data, size_t len); int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi); int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi); int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode); @@ -383,28 +391,23 @@ void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx, void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx); /** - * mipi_dsi_generic_write_seq - transmit data using a generic write packet - * - * This macro will print errors for you and will RETURN FROM THE CALLING - * FUNCTION (yes this is non-intuitive) upon error. + * mipi_dsi_generic_write_seq_multi - transmit data using a generic write packet * - * Because of the non-intuitive return behavior, THIS MACRO IS DEPRECATED. - * Please replace calls of it with mipi_dsi_generic_write_seq_multi(). + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. * - * @dsi: DSI peripheral device + * @ctx: Context for multiple DSI transactions * @seq: buffer containing the payload */ -#define mipi_dsi_generic_write_seq(dsi, seq...) \ - do { \ - static const u8 d[] = { seq }; \ - int ret; \ - ret = mipi_dsi_generic_write_chatty(dsi, d, ARRAY_SIZE(d)); \ - if (ret < 0) \ - return ret; \ +#define mipi_dsi_generic_write_seq_multi(ctx, seq...) \ + do { \ + static const u8 d[] = { seq }; \ + mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \ } while (0) /** - * mipi_dsi_generic_write_seq_multi - transmit data using a generic write packet + * mipi_dsi_generic_write_var_seq_multi - transmit non-constant data using a + * generic write packet * * This macro will print errors for you and error handling is optimized for * callers that call this multiple times in a row. @@ -412,9 +415,9 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx); * @ctx: Context for multiple DSI transactions * @seq: buffer containing the payload */ -#define mipi_dsi_generic_write_seq_multi(ctx, seq...) \ - do { \ - static const u8 d[] = { seq }; \ +#define mipi_dsi_generic_write_var_seq_multi(ctx, seq...) \ + do { \ + const u8 d[] = { seq }; \ mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \ } while (0) @@ -435,6 +438,110 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx); } while (0) /** + * mipi_dsi_dcs_write_var_seq_multi - transmit a DCS command with non-constant + * payload + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @ctx: Context for multiple DSI transactions + * @cmd: Command + * @seq: buffer containing data to be transmitted + */ +#define mipi_dsi_dcs_write_var_seq_multi(ctx, cmd, seq...) \ + do { \ + const u8 d[] = { cmd, seq }; \ + mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \ + } while (0) + +/** + * mipi_dsi_dual - send the same MIPI DSI command to two interfaces + * + * This macro will send the specified MIPI DSI command twice, once per each of + * the two interfaces supplied. This is useful for reducing duplication of code + * in panel drivers which use two parallel serial interfaces. + * + * Note that the _func parameter cannot accept a macro such as + * mipi_dsi_generic_write_multi() or mipi_dsi_dcs_write_buffer_multi(). See + * mipi_dsi_dual_generic_write_multi() and + * mipi_dsi_dual_dcs_write_buffer_multi() instead. + * + * WARNING: This macro reuses the _func argument and the optional trailing + * arguments twice each, which may cause unintended side effects. For example, + * adding the postfix increment ++ operator to one of the arguments to be + * passed to _func will cause the variable to be incremented twice instead of + * once and the variable will be its original value + 1 when sent to _dsi2. + * + * @_func: MIPI DSI function to pass context and arguments into + * @_ctx: Context for multiple DSI transactions + * @_dsi1: First DSI interface to act as recipient of the MIPI DSI command + * @_dsi2: Second DSI interface to act as recipient of the MIPI DSI command + * @...: Arguments to pass to MIPI DSI function or macro + */ + +#define mipi_dsi_dual(_func, _ctx, _dsi1, _dsi2, ...) \ + do { \ + struct mipi_dsi_multi_context *_ctxcpy = (_ctx); \ + _ctxcpy->dsi = (_dsi1); \ + (_func)(_ctxcpy, ##__VA_ARGS__); \ + _ctxcpy->dsi = (_dsi2); \ + (_func)(_ctxcpy, ##__VA_ARGS__); \ + } while (0) + +/** + * mipi_dsi_dual_generic_write_seq_multi - transmit data using a generic write + * packet to two dsi interfaces, one after the other + * + * This macro will send the specified generic packet twice, once per each of + * the two interfaces supplied. This is useful for reducing duplication of code + * in panel drivers which use two parallel serial interfaces. + * + * Note that if an error occurs while transmitting the packet to the first DSI + * interface, the packet will not be sent to the second DSI interface. + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @_ctx: Context for multiple DSI transactions + * @_dsi1: First DSI interface to act as recipient of packet + * @_dsi2: Second DSI interface to act as recipient of packet + * @_seq: buffer containing the payload + */ +#define mipi_dsi_dual_generic_write_seq_multi(_ctx, _dsi1, _dsi2, _seq...) \ + do { \ + static const u8 d[] = { _seq }; \ + mipi_dsi_dual_generic_write_multi(_ctx, _dsi1, _dsi2, d, \ + ARRAY_SIZE(d)); \ + } while (0) + +/** + * mipi_dsi_dual_dcs_write_seq_multi - transmit a DCS command with payload to + * two dsi interfaces, one after the other + * + * This macro will send the specified DCS command with payload twice, once per + * each of the two interfaces supplied. This is useful for reducing duplication + * of code in panel drivers which use two parallel serial interfaces. + * + * Note that if an error occurs while transmitting the payload to the first DSI + * interface, the payload will not be sent to the second DSI interface. + * + * This macro will print errors for you and error handling is optimized for + * callers that call this multiple times in a row. + * + * @_ctx: Context for multiple DSI transactions + * @_dsi1: First DSI interface to act as recipient of packet + * @_dsi2: Second DSI interface to act as recipient of packet + * @_cmd: Command + * @_seq: buffer containing the payload + */ +#define mipi_dsi_dual_dcs_write_seq_multi(_ctx, _dsi1, _dsi2, _cmd, _seq...) \ + do { \ + static const u8 d[] = { _cmd, _seq }; \ + mipi_dsi_dual_dcs_write_buffer_multi(_ctx, _dsi1, _dsi2, d, \ + ARRAY_SIZE(d)); \ + } while (0) + +/** * struct mipi_dsi_driver - DSI driver * @driver: device driver model driver * @probe: callback for device binding diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h index 2382dec6d6ab..81f0e698acbf 100644 --- a/include/linux/fbcon.h +++ b/include/linux/fbcon.h @@ -1,6 +1,13 @@ #ifndef _LINUX_FBCON_H #define _LINUX_FBCON_H +#include <linux/compiler_types.h> + +struct fb_blit_caps; +struct fb_info; +struct fb_var_screeninfo; +struct fb_videomode; + #ifdef CONFIG_FRAMEBUFFER_CONSOLE void __init fb_console_init(void); void __exit fb_console_exit(void); diff --git a/include/linux/firewire.h b/include/linux/firewire.h index cceb70415ed2..d38c6e538e5c 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -341,7 +341,11 @@ struct fw_address_handler { u64 length; fw_address_callback_t address_callback; void *callback_data; + + // Only for core functions. struct list_head link; + struct kref kref; + struct completion done; }; struct fw_address_region { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5e5de4b0a433..f3a3b761abfb 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2071,6 +2071,8 @@ enum netdev_reg_state { * @max_pacing_offload_horizon: max EDT offload horizon in nsec. * @napi_config: An array of napi_config structures containing per-NAPI * settings. + * @num_napi_configs: number of allocated NAPI config structs, + * always >= max(num_rx_queues, num_tx_queues). * @gro_flush_timeout: timeout for GRO layer in NAPI * @napi_defer_hard_irqs: If not zero, provides a counter that would * allow to avoid NIC hard IRQ, on busy queues. @@ -2482,8 +2484,9 @@ struct net_device { u64 max_pacing_offload_horizon; struct napi_config *napi_config; - unsigned long gro_flush_timeout; + u32 num_napi_configs; u32 napi_defer_hard_irqs; + unsigned long gro_flush_timeout; /** * @up: copy of @state's IFF_UP, but safe to read with just @lock. diff --git a/include/linux/sched.h b/include/linux/sched.h index 2b272382673d..f8188b833350 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2152,6 +2152,8 @@ static inline struct mutex *__get_task_blocked_on(struct task_struct *p) static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m) { + struct mutex *blocked_on = READ_ONCE(p->blocked_on); + WARN_ON_ONCE(!m); /* The task should only be setting itself as blocked */ WARN_ON_ONCE(p != current); @@ -2162,8 +2164,8 @@ static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m) * with a different mutex. Note, setting it to the same * lock repeatedly is ok. */ - WARN_ON_ONCE(p->blocked_on && p->blocked_on != m); - p->blocked_on = m; + WARN_ON_ONCE(blocked_on && blocked_on != m); + WRITE_ONCE(p->blocked_on, m); } static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m) @@ -2174,16 +2176,19 @@ static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m) static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m) { - WARN_ON_ONCE(!m); - /* Currently we serialize blocked_on under the mutex::wait_lock */ - lockdep_assert_held_once(&m->wait_lock); - /* - * There may be cases where we re-clear already cleared - * blocked_on relationships, but make sure we are not - * clearing the relationship with a different lock. - */ - WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m); - p->blocked_on = NULL; + if (m) { + struct mutex *blocked_on = READ_ONCE(p->blocked_on); + + /* Currently we serialize blocked_on under the mutex::wait_lock */ + lockdep_assert_held_once(&m->wait_lock); + /* + * There may be cases where we re-clear already cleared + * blocked_on relationships, but make sure we are not + * clearing the relationship with a different lock. + */ + WARN_ON_ONCE(blocked_on && blocked_on != m); + } + WRITE_ONCE(p->blocked_on, NULL); } static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m) diff --git a/include/net/devlink.h b/include/net/devlink.h index 93640a29427c..b32c9ceeb81d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -78,6 +78,9 @@ struct devlink_port_pci_sf_attrs { * @flavour: flavour of the port * @split: indicates if this is split port * @splittable: indicates if the port can be split. + * @no_phys_port_name: skip automatic phys_port_name generation; for + * compatibility only, newly added driver/port instance + * should never set this. * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink. * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL * @phys: physical port attributes @@ -87,7 +90,8 @@ struct devlink_port_pci_sf_attrs { */ struct devlink_port_attrs { u8 split:1, - splittable:1; + splittable:1, + no_phys_port_name:1; u32 lanes; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index ff406ef4fd4a..29a36709e7f3 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1163,6 +1163,14 @@ static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs) return housekeeping_cpumask(HK_TYPE_KTHREAD); } +static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs) +{ + if (ipvs->est_cpulist_valid) + return ipvs->sysctl_est_cpulist; + else + return NULL; +} + static inline int sysctl_est_nice(struct netns_ipvs *ipvs) { return ipvs->sysctl_est_nice; @@ -1270,6 +1278,11 @@ static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs) return housekeeping_cpumask(HK_TYPE_KTHREAD); } +static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs) +{ + return NULL; +} + static inline int sysctl_est_nice(struct netns_ipvs *ipvs) { return IPVS_EST_NICE; diff --git a/include/net/kcm.h b/include/net/kcm.h index 441e993be634..d9c35e71ecea 100644 --- a/include/net/kcm.h +++ b/include/net/kcm.h @@ -71,7 +71,6 @@ struct kcm_sock { struct list_head wait_psock_list; struct sk_buff *seq_skb; struct mutex tx_mutex; - u32 tx_stopped : 1; /* Don't use bit fields here, these are set under different locks */ bool tx_wait; diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 431b593de709..1509a536cb85 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -265,6 +265,8 @@ struct page_pool *page_pool_create_percpu(const struct page_pool_params *params, struct xdp_mem_info; #ifdef CONFIG_PAGE_POOL +void page_pool_enable_direct_recycling(struct page_pool *pool, + struct napi_struct *napi); void page_pool_disable_direct_recycling(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h index a706ead39082..ce523e9ccc52 100644 --- a/include/uapi/drm/amdxdna_accel.h +++ b/include/uapi/drm/amdxdna_accel.h @@ -154,6 +154,31 @@ enum amdxdna_bo_type { }; /** + * struct amdxdna_drm_va_entry + * @vaddr: Virtual address. + * @len: Size of entry. + */ +struct amdxdna_drm_va_entry { + __u64 vaddr; + __u64 len; +}; + +/** + * struct amdxdna_drm_va_tbl + * @dmabuf_fd: The fd of dmabuf. + * @num_entries: Number of va entries. + * @va_entries: Array of va entries. + * + * The input can be either a dmabuf fd or a virtual address entry table. + * When dmabuf_fd is used, num_entries must be zero. + */ +struct amdxdna_drm_va_tbl { + __s32 dmabuf_fd; + __u32 num_entries; + struct amdxdna_drm_va_entry va_entries[]; +}; + +/** * struct amdxdna_drm_create_bo - Create a buffer object. * @flags: Buffer flags. MBZ. * @vaddr: User VA of buffer if applied. MBZ. diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index e63a71d3c607..3cd5cf15e3c9 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -597,35 +597,66 @@ struct drm_set_version { int drm_dd_minor; }; -/* DRM_IOCTL_GEM_CLOSE ioctl argument type */ +/** + * struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl. + * @handle: Handle of the object to be closed. + * @pad: Padding. + * + * Releases the handle to an mm object. + */ struct drm_gem_close { - /** Handle of the object to be closed. */ __u32 handle; __u32 pad; }; -/* DRM_IOCTL_GEM_FLINK ioctl argument type */ +/** + * struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl. + * @handle: Handle for the object being named. + * @name: Returned global name. + * + * Create a global name for an object, returning the name. + * + * Note that the name does not hold a reference; when the object + * is freed, the name goes away. + */ struct drm_gem_flink { - /** Handle for the object being named */ __u32 handle; - - /** Returned global name */ __u32 name; }; -/* DRM_IOCTL_GEM_OPEN ioctl argument type */ +/** + * struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl. + * @name: Name of object being opened. + * @handle: Returned handle for the object. + * @size: Returned size of the object + * + * Open an object using the global name, returning a handle and the size. + * + * This handle (of course) holds a reference to the object, so the object + * will not go away until the handle is deleted. + */ struct drm_gem_open { - /** Name of object being opened */ __u32 name; - - /** Returned handle for the object */ __u32 handle; - - /** Returned size of the object */ __u64 size; }; /** + * struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl. + * @handle: The handle of a gem object. + * @new_handle: An available gem handle. + * + * This ioctl changes the handle of a GEM object to the specified one. + * The new handle must be unused. On success the old handle is closed + * and all further IOCTL should refer to the new handle only. + * Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle. + */ +struct drm_gem_change_handle { + __u32 handle; + __u32 new_handle; +}; + +/** * DRM_CAP_DUMB_BUFFER * * If set to 1, the driver supports creating dumb buffers via the @@ -1309,6 +1340,14 @@ extern "C" { */ #define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name) +/** + * DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle + * + * Some applications (notably CRIU) need objects to have specific gem handles. + * This ioctl changes the object at one gem handle to use a new gem handle. + */ +#define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle) + /* * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index c082810c08a8..a122bea25593 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -962,6 +962,14 @@ struct hdr_output_metadata { * Request that the kernel sends back a vblank event (see * struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the * page-flip is done. + * + * When used with atomic uAPI, one event will be delivered per CRTC included in + * the atomic commit. A CRTC is included in an atomic commit if one of its + * properties is set, or if a property is set on a connector or plane linked + * via the CRTC_ID property to the CRTC. At least one CRTC must be included, + * and all pulled in CRTCs must be either previously or newly powered on (in + * other words, a powered off CRTC which stays off cannot be included in the + * atomic commit). */ #define DRM_MODE_PAGE_FLIP_EVENT 0x01 /** diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index e1f43deb7eca..467d365ed7ba 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -327,6 +327,9 @@ struct drm_panthor_gpu_info { /** @pad: MBZ. */ __u32 pad; + + /** @gpu_features: Bitmask describing supported GPU-wide features */ + __u64 gpu_features; }; /** diff --git a/include/uapi/drm/rocket_accel.h b/include/uapi/drm/rocket_accel.h new file mode 100644 index 000000000000..14b2e12b7c49 --- /dev/null +++ b/include/uapi/drm/rocket_accel.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Tomeu Vizoso + */ +#ifndef __DRM_UAPI_ROCKET_ACCEL_H__ +#define __DRM_UAPI_ROCKET_ACCEL_H__ + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#define DRM_ROCKET_CREATE_BO 0x00 +#define DRM_ROCKET_SUBMIT 0x01 +#define DRM_ROCKET_PREP_BO 0x02 +#define DRM_ROCKET_FINI_BO 0x03 + +#define DRM_IOCTL_ROCKET_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_ROCKET_CREATE_BO, struct drm_rocket_create_bo) +#define DRM_IOCTL_ROCKET_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_SUBMIT, struct drm_rocket_submit) +#define DRM_IOCTL_ROCKET_PREP_BO DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_PREP_BO, struct drm_rocket_prep_bo) +#define DRM_IOCTL_ROCKET_FINI_BO DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_FINI_BO, struct drm_rocket_fini_bo) + +/** + * struct drm_rocket_create_bo - ioctl argument for creating Rocket BOs. + * + */ +struct drm_rocket_create_bo { + /** Input: Size of the requested BO. */ + __u32 size; + + /** Output: GEM handle for the BO. */ + __u32 handle; + + /** + * Output: DMA address for the BO in the NPU address space. This address + * is private to the DRM fd and is valid for the lifetime of the GEM + * handle. + */ + __u64 dma_address; + + /** Output: Offset into the drm node to use for subsequent mmap call. */ + __u64 offset; +}; + +/** + * struct drm_rocket_prep_bo - ioctl argument for starting CPU ownership of the BO. + * + * Takes care of waiting for any NPU jobs that might still use the NPU and performs cache + * synchronization. + */ +struct drm_rocket_prep_bo { + /** Input: GEM handle of the buffer object. */ + __u32 handle; + + /** Reserved, must be zero. */ + __u32 reserved; + + /** Input: Amount of time to wait for NPU jobs. */ + __s64 timeout_ns; +}; + +/** + * struct drm_rocket_fini_bo - ioctl argument for finishing CPU ownership of the BO. + * + * Synchronize caches for NPU access. + */ +struct drm_rocket_fini_bo { + /** Input: GEM handle of the buffer object. */ + __u32 handle; + + /** Reserved, must be zero. */ + __u32 reserved; +}; + +/** + * struct drm_rocket_task - A task to be run on the NPU + * + * A task is the smallest unit of work that can be run on the NPU. + */ +struct drm_rocket_task { + /** Input: DMA address to NPU mapping of register command buffer */ + __u32 regcmd; + + /** Input: Number of commands in the register command buffer */ + __u32 regcmd_count; +}; + +/** + * struct drm_rocket_job - A job to be run on the NPU + * + * The kernel will schedule the execution of this job taking into account its + * dependencies with other jobs. All tasks in the same job will be executed + * sequentially on the same core, to benefit from memory residency in SRAM. + */ +struct drm_rocket_job { + /** Input: Pointer to an array of struct drm_rocket_task. */ + __u64 tasks; + + /** Input: Pointer to a u32 array of the BOs that are read by the job. */ + __u64 in_bo_handles; + + /** Input: Pointer to a u32 array of the BOs that are written to by the job. */ + __u64 out_bo_handles; + + /** Input: Number of tasks passed in. */ + __u32 task_count; + + /** Input: Size in bytes of the structs in the @tasks field. */ + __u32 task_struct_size; + + /** Input: Number of input BO handles passed in (size is that times 4). */ + __u32 in_bo_handle_count; + + /** Input: Number of output BO handles passed in (size is that times 4). */ + __u32 out_bo_handle_count; +}; + +/** + * struct drm_rocket_submit - ioctl argument for submitting commands to the NPU. + * + * The kernel will schedule the execution of these jobs in dependency order. + */ +struct drm_rocket_submit { + /** Input: Pointer to an array of struct drm_rocket_job. */ + __u64 jobs; + + /** Input: Number of jobs passed in. */ + __u32 job_count; + + /** Input: Size in bytes of the structs in the @jobs field. */ + __u32 job_struct_size; + + /** Reserved, must be zero. */ + __u64 reserved; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* __DRM_UAPI_ROCKET_ACCEL_H__ */ diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index dbbc404d2b3d..d9b01f4c3a04 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -294,6 +294,8 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE, DRM_V3D_PARAM_MAX_PERF_COUNTERS, DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES, + DRM_V3D_PARAM_GLOBAL_RESET_COUNTER, + DRM_V3D_PARAM_CONTEXT_RESET_COUNTER, }; struct drm_v3d_get_param { diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index be91edf34f01..17dfaa0395c4 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -357,6 +357,13 @@ static void create_worker_cb(struct callback_head *cb) worker = container_of(cb, struct io_worker, create_work); wq = worker->wq; acct = worker->acct; + + rcu_read_lock(); + do_create = !io_acct_activate_free_worker(acct); + rcu_read_unlock(); + if (!do_create) + goto no_need_create; + raw_spin_lock(&acct->workers_lock); if (acct->nr_workers < acct->max_workers) { @@ -367,6 +374,7 @@ static void create_worker_cb(struct callback_head *cb) if (do_create) { create_io_worker(wq, acct); } else { +no_need_create: atomic_dec(&acct->nr_running); io_worker_ref_put(wq); } diff --git a/io_uring/net.c b/io_uring/net.c index dd96e355982f..d69f2afa4f7a 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -494,6 +494,15 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret) return nbufs; } +static int io_net_kbuf_recyle(struct io_kiocb *req, + struct io_async_msghdr *kmsg, int len) +{ + req->flags |= REQ_F_BL_NO_RECYCLE; + if (req->flags & REQ_F_BUFFERS_COMMIT) + io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len)); + return IOU_RETRY; +} + static inline bool io_send_finish(struct io_kiocb *req, int *ret, struct io_async_msghdr *kmsg, unsigned issue_flags) @@ -562,8 +571,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg->msg.msg_controllen = 0; kmsg->msg.msg_control = NULL; sr->done_io += ret; - req->flags |= REQ_F_BL_NO_RECYCLE; - return -EAGAIN; + return io_net_kbuf_recyle(req, kmsg, ret); } if (ret == -ERESTARTSYS) ret = -EINTR; @@ -674,8 +682,7 @@ retry_bundle: sr->len -= ret; sr->buf += ret; sr->done_io += ret; - req->flags |= REQ_F_BL_NO_RECYCLE; - return -EAGAIN; + return io_net_kbuf_recyle(req, kmsg, ret); } if (ret == -ERESTARTSYS) ret = -EINTR; @@ -1071,8 +1078,7 @@ retry_multishot: } if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; - req->flags |= REQ_F_BL_NO_RECYCLE; - return IOU_RETRY; + return io_net_kbuf_recyle(req, kmsg, ret); } if (ret == -ERESTARTSYS) ret = -EINTR; @@ -1218,8 +1224,7 @@ retry_multishot: sr->len -= ret; sr->buf += ret; sr->done_io += ret; - req->flags |= REQ_F_BL_NO_RECYCLE; - return -EAGAIN; + return io_net_kbuf_recyle(req, kmsg, ret); } if (ret == -ERESTARTSYS) ret = -EINTR; @@ -1500,8 +1505,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) zc->len -= ret; zc->buf += ret; zc->done_io += ret; - req->flags |= REQ_F_BL_NO_RECYCLE; - return -EAGAIN; + return io_net_kbuf_recyle(req, kmsg, ret); } if (ret == -ERESTARTSYS) ret = -EINTR; @@ -1571,8 +1575,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; - req->flags |= REQ_F_BL_NO_RECYCLE; - return -EAGAIN; + return io_net_kbuf_recyle(req, kmsg, ret); } if (ret == -ERESTARTSYS) ret = -EINTR; diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h index c74eac572acd..2cd57096c38e 100644 --- a/kernel/futex/futex.h +++ b/kernel/futex/futex.h @@ -319,13 +319,13 @@ static __always_inline int futex_put_value(u32 val, u32 __user *to) { if (can_do_masked_user_access()) to = masked_user_access_begin(to); - else if (!user_read_access_begin(to, sizeof(*to))) + else if (!user_write_access_begin(to, sizeof(*to))) return -EFAULT; unsafe_put_user(val, to, Efault); - user_read_access_end(); + user_write_access_end(); return 0; Efault: - user_read_access_end(); + user_write_access_end(); return -EFAULT; } diff --git a/kernel/kthread.c b/kernel/kthread.c index 0e98b228a8ef..31b072e8d427 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -893,6 +893,7 @@ out: return ret; } +EXPORT_SYMBOL_GPL(kthread_affine_preferred); /* * Re-affine kthreads according to their preferences diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h index 086fd5487ca7..31a785afee6c 100644 --- a/kernel/locking/ww_mutex.h +++ b/kernel/locking/ww_mutex.h @@ -342,8 +342,12 @@ static bool __ww_mutex_wound(struct MUTEX *lock, * When waking up the task to wound, be sure to clear the * blocked_on pointer. Otherwise we can see circular * blocked_on relationships that can't resolve. + * + * NOTE: We pass NULL here instead of lock, because we + * are waking the mutex owner, who may be currently + * blocked on a different mutex. */ - __clear_task_blocked_on(owner, lock); + __clear_task_blocked_on(owner, NULL); wake_q_add(wake_q, owner); } return true; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 174ee243b349..8eff357b0436 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4262,6 +4262,8 @@ int rcutree_prepare_cpu(unsigned int cpu) rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + + rcu_preempt_deferred_qs_init(rdp); rcu_spawn_rnp_kthreads(rnp); rcu_spawn_cpu_nocb_kthread(cpu); ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index de6ca13a7b5f..b8bbe7960cda 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -488,6 +488,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); static void rcu_flavor_sched_clock_irq(int user); static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); +static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index fc14adf15cbb..4cd170b2d655 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -763,8 +763,6 @@ static void rcu_read_unlock_special(struct task_struct *t) cpu_online(rdp->cpu)) { // Get scheduler to re-evaluate and call hooks. // If !IRQ_WORK, FQS scan will eventually IPI. - rdp->defer_qs_iw = - IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler); rdp->defer_qs_iw_pending = DEFER_QS_PENDING; irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); } @@ -904,6 +902,10 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) } } +static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) +{ + rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler); +} #else /* #ifdef CONFIG_PREEMPT_RCU */ /* @@ -1103,6 +1105,8 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck) WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); } +static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) { } + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index a9e6ffcff04b..cce12287708e 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -434,7 +434,7 @@ void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir) if (dentry && !xa_is_err(dentry)) return; - ret = snprintf(name, sizeof(name), "%s@%px", dir->class, dir); + ret = snprintf(name, sizeof(name), "%s@%p", dir->class, dir); name[sizeof(name) - 1] = '\0'; if (ret < sizeof(name)) { diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c index 2aa12dfa427a..e0968acc03aa 100644 --- a/mm/kasan/kasan_test_c.c +++ b/mm/kasan/kasan_test_c.c @@ -47,7 +47,7 @@ static struct { * Some tests use these global variables to store return values from function * calls that could otherwise be eliminated by the compiler as dead code. */ -static volatile void *kasan_ptr_result; +static void *volatile kasan_ptr_result; static volatile int kasan_int_result; /* Probe for console output: obtains test_status lines of interest. */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 374a6a5193a7..6b40bdfd224c 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1172,11 +1172,11 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, if (result != SCAN_SUCCEED) goto out_up_write; /* check if the pmd is still valid */ + vma_start_write(vma); result = check_pmd_still_valid(mm, address, pmd); if (result != SCAN_SUCCEED) goto out_up_write; - vma_start_write(vma); anon_vma_lock_write(vma->anon_vma); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address, diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 8d588e685311..84265983f239 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -470,6 +470,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; + bool warn = false; /* try the slab allocator first */ if (object_cache) { @@ -488,8 +489,10 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) else if (mem_pool_free_count) object = &mem_pool[--mem_pool_free_count]; else - pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); + warn = true; raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + if (warn) + pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); return object; } @@ -2181,6 +2184,7 @@ static const struct file_operations kmemleak_fops = { static void __kmemleak_do_cleanup(void) { struct kmemleak_object *object, *tmp; + unsigned int cnt = 0; /* * Kmemleak has already been disabled, no need for RCU list traversal @@ -2189,6 +2193,10 @@ static void __kmemleak_do_cleanup(void) list_for_each_entry_safe(object, tmp, &object_list, object_list) { __remove_object(object); __delete_object(object); + + /* Call cond_resched() once per 64 iterations to avoid soft lockup */ + if (!(++cnt & 0x3f)) + cond_resched(); } } diff --git a/mm/mprotect.c b/mm/mprotect.c index 78bded7acf79..113b48985834 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -120,9 +120,8 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep, static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr, pte_t oldpte, pte_t *pte, int target_node, - struct folio **foliop) + struct folio *folio) { - struct folio *folio = NULL; bool ret = true; bool toptier; int nid; @@ -131,7 +130,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr, if (pte_protnone(oldpte)) goto skip; - folio = vm_normal_folio(vma, addr, oldpte); if (!folio) goto skip; @@ -173,7 +171,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr, folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); skip: - *foliop = folio; return ret; } @@ -231,10 +228,9 @@ static int page_anon_exclusive_sub_batch(int start_idx, int max_len, * retrieve sub-batches. */ static void commit_anon_folio_batch(struct vm_area_struct *vma, - struct folio *folio, unsigned long addr, pte_t *ptep, + struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) { - struct page *first_page = folio_page(folio, 0); bool expected_anon_exclusive; int sub_batch_idx = 0; int len; @@ -251,7 +247,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma, } static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma, - struct folio *folio, unsigned long addr, pte_t *ptep, + struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) { bool set_write; @@ -270,7 +266,7 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma, /* idx = */ 0, set_write, tlb); return; } - commit_anon_folio_batch(vma, folio, addr, ptep, oldpte, ptent, nr_ptes, tlb); + commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb); } static long change_pte_range(struct mmu_gather *tlb, @@ -305,15 +301,19 @@ static long change_pte_range(struct mmu_gather *tlb, const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE; int max_nr_ptes = (end - addr) >> PAGE_SHIFT; struct folio *folio = NULL; + struct page *page; pte_t ptent; + page = vm_normal_page(vma, addr, oldpte); + if (page) + folio = page_folio(page); /* * Avoid trapping faults against the zero or KSM * pages. See similar comment in change_huge_pmd. */ if (prot_numa) { int ret = prot_numa_skip(vma, addr, oldpte, pte, - target_node, &folio); + target_node, folio); if (ret) { /* determine batch to skip */ @@ -323,9 +323,6 @@ static long change_pte_range(struct mmu_gather *tlb, } } - if (!folio) - folio = vm_normal_folio(vma, addr, oldpte); - nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags); oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes); @@ -351,7 +348,7 @@ static long change_pte_range(struct mmu_gather *tlb, */ if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pte_write(ptent)) - set_write_prot_commit_flush_ptes(vma, folio, + set_write_prot_commit_flush_ptes(vma, folio, page, addr, pte, oldpte, ptent, nr_ptes, tlb); else prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent, diff --git a/mm/mremap.c b/mm/mremap.c index 677a4d744df9..9afa8cd524f5 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -179,6 +179,10 @@ static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr if (max_nr == 1) return 1; + /* Avoid expensive folio lookup if we stand no chance of benefit. */ + if (pte_batch_hint(ptep, pte) == 1) + return 1; + folio = vm_normal_folio(vma, addr, pte); if (!folio || !folio_test_large(folio)) return 1; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index cbed91b09640..45e6290e2e8b 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1821,13 +1821,16 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start, /* Check if we can move the pmd without splitting it. */ if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || !pmd_none(dst_pmdval)) { - struct folio *folio = pmd_folio(*src_pmd); - - if (!folio || (!is_huge_zero_folio(folio) && - !PageAnonExclusive(&folio->page))) { - spin_unlock(ptl); - err = -EBUSY; - break; + /* Can be a migration entry */ + if (pmd_present(*src_pmd)) { + struct folio *folio = pmd_folio(*src_pmd); + + if (!is_huge_zero_folio(folio) && + !PageAnonExclusive(&folio->page)) { + spin_unlock(ptl); + err = -EBUSY; + break; + } } spin_unlock(ptl); diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig index 60f28e4fb5c0..4fd5a6ea26b4 100644 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig @@ -43,6 +43,7 @@ config NF_CONNTRACK_BRIDGE config BRIDGE_NF_EBTABLES_LEGACY tristate "Legacy EBTABLES support" depends on BRIDGE && NETFILTER_XTABLES_LEGACY + depends on NETFILTER_XTABLES default n help Legacy ebtables packet/frame classifier. diff --git a/net/core/dev.c b/net/core/dev.c index 68dc47d7e700..5a3c0f40a93f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6999,7 +6999,7 @@ int netif_set_threaded(struct net_device *dev, enum netdev_napi_threaded threaded) { struct napi_struct *napi; - int err = 0; + int i, err = 0; netdev_assert_locked_or_invisible(dev); @@ -7021,6 +7021,10 @@ int netif_set_threaded(struct net_device *dev, list_for_each_entry(napi, &dev->napi_list, dev_list) WARN_ON_ONCE(napi_set_threaded(napi, threaded)); + /* Override the config for all NAPIs even if currently not listed */ + for (i = 0; i < dev->num_napi_configs; i++) + dev->napi_config[i].threaded = threaded; + return err; } @@ -7353,8 +7357,9 @@ void netif_napi_add_weight_locked(struct net_device *dev, * Clear dev->threaded if kthread creation failed so that * threaded mode will not be enabled in napi_enable(). */ - if (dev->threaded && napi_kthread_create(napi)) - dev->threaded = NETDEV_NAPI_THREADED_DISABLED; + if (napi_get_threaded_config(dev, napi)) + if (napi_kthread_create(napi)) + dev->threaded = NETDEV_NAPI_THREADED_DISABLED; netif_napi_set_irq_locked(napi, -1); } EXPORT_SYMBOL(netif_napi_add_weight_locked); @@ -11873,6 +11878,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, goto free_all; dev->cfg_pending = dev->cfg; + dev->num_napi_configs = maxqs; napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config)); dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT); if (!dev->napi_config) diff --git a/net/core/dev.h b/net/core/dev.h index ab69edc0c3e3..d6b08d435479 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -323,6 +323,14 @@ static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n) return NETDEV_NAPI_THREADED_DISABLED; } +static inline enum netdev_napi_threaded +napi_get_threaded_config(struct net_device *dev, struct napi_struct *n) +{ + if (n->config) + return n->config->threaded; + return dev->threaded; +} + int napi_set_threaded(struct napi_struct *n, enum netdev_napi_threaded threaded); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 05e2e22a8f7c..343a6cac21e3 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -1201,6 +1201,35 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), pool->xdp_mem_id = mem->id; } +/** + * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI + * @pool: page pool to modify + * @napi: NAPI instance to associate the page pool with + * + * Associate a page pool with a NAPI instance for lockless page recycling. + * This is useful when a new page pool has to be added to a NAPI instance + * without disabling that NAPI instance, to mark the point at which control + * path "hands over" the page pool to the NAPI instance. In most cases driver + * can simply set the @napi field in struct page_pool_params, and does not + * have to call this helper. + * + * The function is idempotent, but does not implement any refcounting. + * Single page_pool_disable_direct_recycling() will disable recycling, + * no matter how many times enable was called. + */ +void page_pool_enable_direct_recycling(struct page_pool *pool, + struct napi_struct *napi) +{ + if (READ_ONCE(pool->p.napi) == napi) + return; + WARN_ON(!napi || pool->p.napi); + + mutex_lock(&page_pools_lock); + WRITE_ONCE(pool->p.napi, napi); + mutex_unlock(&page_pools_lock); +} +EXPORT_SYMBOL(page_pool_enable_direct_recycling); + void page_pool_disable_direct_recycling(struct page_pool *pool) { /* Disable direct recycling based on pool->cpuid. diff --git a/net/devlink/port.c b/net/devlink/port.c index 939081a0e615..cb8d4df61619 100644 --- a/net/devlink/port.c +++ b/net/devlink/port.c @@ -1519,7 +1519,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, struct devlink_port_attrs *attrs = &devlink_port->attrs; int n = 0; - if (!devlink_port->attrs_set) + if (!devlink_port->attrs_set || devlink_port->attrs.no_phys_port_name) return -EOPNOTSUPP; switch (attrs->flavour) { diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 2c438b140e88..7dc9772fe2d8 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -14,6 +14,7 @@ config NF_DEFRAG_IPV4 config IP_NF_IPTABLES_LEGACY tristate "Legacy IP tables support" depends on NETFILTER_XTABLES_LEGACY + depends on NETFILTER_XTABLES default m if NETFILTER_XTABLES_LEGACY help iptables is a legacy packet classifier. @@ -326,6 +327,7 @@ endif # IP_NF_IPTABLES config IP_NF_ARPTABLES tristate "Legacy ARPTABLES support" depends on NETFILTER_XTABLES_LEGACY + depends on NETFILTER_XTABLES default n help arptables is a legacy packet classifier. @@ -343,6 +345,7 @@ config IP_NF_ARPFILTER select IP_NF_ARPTABLES select NETFILTER_FAMILY_ARP depends on NETFILTER_XTABLES_LEGACY + depends on NETFILTER_XTABLES help ARP packet filtering defines a table `filter', which has a series of rules for simple ARP packet filtering at local input and diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 5128e2a5b00a..b1f3fd302e9d 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -217,7 +217,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); skb->remcsum_offload = remcsum; - need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); + need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb); /* Try to offload checksum if possible */ offload_csum = !!(need_csum && !need_ipsec && diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 276860f65baa..81daf82ddc2d 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -10,6 +10,7 @@ menu "IPv6: Netfilter Configuration" config IP6_NF_IPTABLES_LEGACY tristate "Legacy IP6 tables support" depends on INET && IPV6 && NETFILTER_XTABLES_LEGACY + depends on NETFILTER_XTABLES default m if NETFILTER_XTABLES_LEGACY help ip6tables is a legacy packet classifier. diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 5120a763da0d..0a0eeaed0591 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -334,7 +334,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net) struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); unsigned int i; - xfrm_state_flush(net, IPSEC_PROTO_ANY, false); + xfrm_state_flush(net, 0, false); xfrm_flush_gc(); for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index a4971e6fa943..b4f01cb07561 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -430,7 +430,7 @@ static void psock_write_space(struct sock *sk) /* Check if the socket is reserved so someone is waiting for sending. */ kcm = psock->tx_kcm; - if (kcm && !unlikely(kcm->tx_stopped)) + if (kcm) queue_work(kcm_wq, &kcm->tx_work); spin_unlock_bh(&mux->lock); @@ -1693,12 +1693,6 @@ static int kcm_release(struct socket *sock) */ __skb_queue_purge(&sk->sk_write_queue); - /* Set tx_stopped. This is checked when psock is bound to a kcm and we - * get a writespace callback. This prevents further work being queued - * from the callback (unbinding the psock occurs after canceling work. - */ - kcm->tx_stopped = 1; - release_sock(sk); spin_lock_bh(&mux->lock); @@ -1714,7 +1708,7 @@ static int kcm_release(struct socket *sock) /* Cancel work. After this point there should be no outside references * to the kcm socket. */ - cancel_work_sync(&kcm->tx_work); + disable_work_sync(&kcm->tx_work); lock_sock(sk); psock = kcm->tx_psock; diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index fb6b46a952cb..69a3ccfc6310 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -1586,7 +1586,6 @@ static void mctp_test_bind_lookup(struct kunit *test) cleanup: kfree_skb(skb_sock); - kfree_skb(skb_pkt); /* Drop all binds */ for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++) diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index f821ad2e19b3..15049b826732 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c @@ -265,7 +265,8 @@ int ip_vs_est_kthread_start(struct netns_ipvs *ipvs, } set_user_nice(kd->task, sysctl_est_nice(ipvs)); - set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs)); + if (sysctl_est_preferred_cpulist(ipvs)) + kthread_affine_preferred(kd->task, sysctl_est_preferred_cpulist(ipvs)); pr_info("starting estimator thread %d...\n", kd->id); wake_up_process(kd->task); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 486d52b45fe5..50fd6809380f 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -884,8 +884,6 @@ errout: static int ctnetlink_done(struct netlink_callback *cb) { - if (cb->args[1]) - nf_ct_put((struct nf_conn *)cb->args[1]); kfree(cb->data); return 0; } @@ -1208,19 +1206,26 @@ ignore_entry: return 0; } +static unsigned long ctnetlink_get_id(const struct nf_conn *ct) +{ + unsigned long id = nf_ct_get_id(ct); + + return id ? id : 1; +} + static int ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0; struct net *net = sock_net(skb->sk); - struct nf_conn *ct, *last; + unsigned long last_id = cb->args[1]; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; struct nf_conn *nf_ct_evict[8]; + struct nf_conn *ct; int res, i; spinlock_t *lockp; - last = (struct nf_conn *)cb->args[1]; i = 0; local_bh_disable(); @@ -1257,7 +1262,7 @@ restart: continue; if (cb->args[1]) { - if (ct != last) + if (ctnetlink_get_id(ct) != last_id) continue; cb->args[1] = 0; } @@ -1270,8 +1275,7 @@ restart: NFNL_MSG_TYPE(cb->nlh->nlmsg_type), ct, true, flags); if (res < 0) { - nf_conntrack_get(&ct->ct_general); - cb->args[1] = (unsigned long)ct; + cb->args[1] = ctnetlink_get_id(ct); spin_unlock(lockp); goto out; } @@ -1284,12 +1288,10 @@ restart: } out: local_bh_enable(); - if (last) { + if (last_id) { /* nf ct hash resize happened, now clear the leftover. */ - if ((struct nf_conn *)cb->args[1] == last) + if (cb->args[1] == last_id) cb->args[1] = 0; - - nf_ct_put(last); } while (i) { @@ -3168,23 +3170,27 @@ errout: return 0; } #endif -static int ctnetlink_exp_done(struct netlink_callback *cb) + +static unsigned long ctnetlink_exp_id(const struct nf_conntrack_expect *exp) { - if (cb->args[1]) - nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]); - return 0; + unsigned long id = (unsigned long)exp; + + id += nf_ct_get_id(exp->master); + id += exp->class; + + return id ? id : 1; } static int ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); - struct nf_conntrack_expect *exp, *last; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); u_int8_t l3proto = nfmsg->nfgen_family; + unsigned long last_id = cb->args[1]; + struct nf_conntrack_expect *exp; rcu_read_lock(); - last = (struct nf_conntrack_expect *)cb->args[1]; for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { restart: hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]], @@ -3196,7 +3202,7 @@ restart: continue; if (cb->args[1]) { - if (exp != last) + if (ctnetlink_exp_id(exp) != last_id) continue; cb->args[1] = 0; } @@ -3205,9 +3211,7 @@ restart: cb->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp) < 0) { - if (!refcount_inc_not_zero(&exp->use)) - continue; - cb->args[1] = (unsigned long)exp; + cb->args[1] = ctnetlink_exp_id(exp); goto out; } } @@ -3218,32 +3222,30 @@ restart: } out: rcu_read_unlock(); - if (last) - nf_ct_expect_put(last); - return skb->len; } static int ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { - struct nf_conntrack_expect *exp, *last; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); struct nf_conn *ct = cb->data; struct nf_conn_help *help = nfct_help(ct); u_int8_t l3proto = nfmsg->nfgen_family; + unsigned long last_id = cb->args[1]; + struct nf_conntrack_expect *exp; if (cb->args[0]) return 0; rcu_read_lock(); - last = (struct nf_conntrack_expect *)cb->args[1]; + restart: hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; if (cb->args[1]) { - if (exp != last) + if (ctnetlink_exp_id(exp) != last_id) continue; cb->args[1] = 0; } @@ -3251,9 +3253,7 @@ restart: cb->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp) < 0) { - if (!refcount_inc_not_zero(&exp->use)) - continue; - cb->args[1] = (unsigned long)exp; + cb->args[1] = ctnetlink_exp_id(exp); goto out; } } @@ -3264,9 +3264,6 @@ restart: cb->args[0] = 1; out: rcu_read_unlock(); - if (last) - nf_ct_expect_put(last); - return skb->len; } @@ -3285,7 +3282,6 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, struct nf_conntrack_zone zone; struct netlink_dump_control c = { .dump = ctnetlink_exp_ct_dump_table, - .done = ctnetlink_exp_done, }; err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, @@ -3335,7 +3331,6 @@ static int ctnetlink_get_expect(struct sk_buff *skb, else { struct netlink_dump_control c = { .dump = ctnetlink_exp_dump_table, - .done = ctnetlink_exp_done, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 9b8b10a85233..1f14ef0436c6 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -567,16 +567,16 @@ nf_conntrack_log_invalid_sysctl(const struct ctl_table *table, int write, return ret; if (*(u8 *)table->data == 0) - return ret; + return 0; /* Load nf_log_syslog only if no logger is currently registered */ for (i = 0; i < NFPROTO_NUMPROTO; i++) { if (nf_log_is_registered(i)) - return ret; + return 0; } request_module("%s", "nf_log_syslog"); - return ret; + return 0; } static struct ctl_table_header *nf_ct_netfilter_header; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 13d0ed9d1895..58c5425d61c2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2803,6 +2803,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, struct nft_chain *chain = ctx->chain; struct nft_chain_hook hook = {}; struct nft_stats __percpu *stats = NULL; + struct nftables_pernet *nft_net; struct nft_hook *h, *next; struct nf_hook_ops *ops; struct nft_trans *trans; @@ -2845,6 +2846,20 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, if (nft_hook_list_find(&basechain->hook_list, h)) { list_del(&h->list); nft_netdev_hook_free(h); + continue; + } + + nft_net = nft_pernet(ctx->net); + list_for_each_entry(trans, &nft_net->commit_list, list) { + if (trans->msg_type != NFT_MSG_NEWCHAIN || + trans->table != ctx->table || + !nft_trans_chain_update(trans)) + continue; + + if (nft_hook_list_find(&nft_trans_chain_hooks(trans), h)) { + nft_chain_release_hook(&hook); + return -EEXIST; + } } } } else { @@ -9060,6 +9075,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, { const struct nlattr * const *nla = ctx->nla; struct nft_flowtable_hook flowtable_hook; + struct nftables_pernet *nft_net; struct nft_hook *hook, *next; struct nf_hook_ops *ops; struct nft_trans *trans; @@ -9076,6 +9092,20 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, if (nft_hook_list_find(&flowtable->hook_list, hook)) { list_del(&hook->list); nft_netdev_hook_free(hook); + continue; + } + + nft_net = nft_pernet(ctx->net); + list_for_each_entry(trans, &nft_net->commit_list, list) { + if (trans->msg_type != NFT_MSG_NEWFLOWTABLE || + trans->table != ctx->table || + !nft_trans_flowtable_update(trans)) + continue; + + if (nft_hook_list_find(&nft_trans_flowtable_hooks(trans), hook)) { + err = -EEXIST; + goto err_flowtable_update_hook; + } } } diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 1a19649c2851..9a10251228fd 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -426,10 +426,9 @@ static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m, local_bh_disable(); - if (unlikely(!raw_cpu_ptr(m->scratch))) - goto out; - scratch = *raw_cpu_ptr(m->scratch); + if (unlikely(!scratch)) + goto out; map_index = scratch->map_index; diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c index db5d367e43c4..2f090e253caf 100644 --- a/net/netfilter/nft_set_pipapo_avx2.c +++ b/net/netfilter/nft_set_pipapo_avx2.c @@ -1150,12 +1150,12 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, const u32 *key) { struct nft_pipapo *priv = nft_set_priv(set); + const struct nft_set_ext *ext = NULL; struct nft_pipapo_scratch *scratch; u8 genmask = nft_genmask_cur(net); const struct nft_pipapo_match *m; const struct nft_pipapo_field *f; const u8 *rp = (const u8 *)key; - const struct nft_set_ext *ext; unsigned long *res, *fill; bool map_index; int i; @@ -1246,13 +1246,13 @@ next_match: goto out; if (last) { - ext = &f->mt[ret].e->ext; - if (unlikely(nft_set_elem_expired(ext) || - !nft_set_elem_active(ext, genmask))) { - ext = NULL; + const struct nft_set_ext *e = &f->mt[ret].e->ext; + + if (unlikely(nft_set_elem_expired(e) || + !nft_set_elem_active(e, genmask))) goto next_match; - } + ext = e; goto out; } diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index 35d0409b0095..36affbb697c2 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -217,7 +217,7 @@ static int nft_socket_init(const struct nft_ctx *ctx, level += err; /* Implies a giant cgroup tree */ - if (WARN_ON_ONCE(level > 255)) + if (level > 255) return -EOPNOTSUPP; priv->level = level; diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 037f764822b9..82635dd2cfa5 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -651,6 +651,12 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch); + for (i = nbands; i < oldbands; i++) { + if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) + list_del_init(&q->classes[i].alist); + qdisc_purge_queue(q->classes[i].qdisc); + } + WRITE_ONCE(q->nbands, nbands); for (i = nstrict; i < q->nstrict; i++) { if (q->classes[i].qdisc->q.qlen) { @@ -658,11 +664,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, q->classes[i].deficit = quanta[i]; } } - for (i = q->nbands; i < oldbands; i++) { - if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) - list_del_init(&q->classes[i].alist); - qdisc_purge_queue(q->classes[i].qdisc); - } WRITE_ONCE(q->nstrict, nstrict); memcpy(q->prio2band, priomap, sizeof(priomap)); diff --git a/net/sctp/input.c b/net/sctp/input.c index 2dc2666988fb..7e99894778d4 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb) * it's better to just linearize it otherwise crc computing * takes longer. */ - if ((!is_gso && skb_linearize(skb)) || + if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) || !pskb_may_pull(skb, sizeof(struct sctphdr))) goto discard_it; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 46c156b121db..e2c5e0e626f9 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -257,20 +257,47 @@ svc_tcp_sock_process_cmsg(struct socket *sock, struct msghdr *msg, } static int -svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg) +svc_tcp_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags) { union { struct cmsghdr cmsg; u8 buf[CMSG_SPACE(sizeof(u8))]; } u; - struct socket *sock = svsk->sk_sock; + u8 alert[2]; + struct kvec alert_kvec = { + .iov_base = alert, + .iov_len = sizeof(alert), + }; + struct msghdr msg = { + .msg_flags = *msg_flags, + .msg_control = &u, + .msg_controllen = sizeof(u), + }; + int ret; + + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1, + alert_kvec.iov_len); + ret = sock_recvmsg(sock, &msg, MSG_DONTWAIT); + if (ret > 0 && + tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) { + iov_iter_revert(&msg.msg_iter, ret); + ret = svc_tcp_sock_process_cmsg(sock, &msg, &u.cmsg, -EAGAIN); + } + return ret; +} + +static int +svc_tcp_sock_recvmsg(struct svc_sock *svsk, struct msghdr *msg) +{ int ret; + struct socket *sock = svsk->sk_sock; - msg->msg_control = &u; - msg->msg_controllen = sizeof(u); ret = sock_recvmsg(sock, msg, MSG_DONTWAIT); - if (unlikely(msg->msg_controllen != sizeof(u))) - ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret); + if (msg->msg_flags & MSG_CTRUNC) { + msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR); + if (ret == 0 || ret == -EIO) + ret = svc_tcp_sock_recv_cmsg(sock, &msg->msg_flags); + } return ret; } @@ -321,7 +348,7 @@ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen, iov_iter_advance(&msg.msg_iter, seek); buflen -= seek; } - len = svc_tcp_sock_recv_cmsg(svsk, &msg); + len = svc_tcp_sock_recvmsg(svsk, &msg); if (len > 0) svc_flush_bvec(bvec, len, seek); @@ -1018,7 +1045,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk, iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen; iov.iov_len = want; iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want); - len = svc_tcp_sock_recv_cmsg(svsk, &msg); + len = svc_tcp_sock_recvmsg(svsk, &msg); if (len < 0) return len; svsk->sk_tcplen += len; diff --git a/net/tls/tls.h b/net/tls/tls.h index 774859b63f0d..4e077068e6d9 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -196,7 +196,7 @@ void tls_strp_msg_done(struct tls_strparser *strp); int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb); void tls_rx_msg_ready(struct tls_strparser *strp); -void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); +bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx); int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst); diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 095cf31bae0b..d71643b494a1 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -475,7 +475,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) strp->stm.offset = offset; } -void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) +bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) { struct strp_msg *rxm; struct tls_msg *tlm; @@ -484,8 +484,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len); if (!strp->copy_mode && force_refresh) { - if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len)) - return; + if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) { + WRITE_ONCE(strp->msg_ready, 0); + memset(&strp->stm, 0, sizeof(strp->stm)); + return false; + } tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); } @@ -495,6 +498,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) rxm->offset = strp->stm.offset; tlm = tls_msg(strp->anchor); tlm->control = strp->mark; + + return true; } /* Called with lock held on lower socket */ diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 549d1ea01a72..51c98a007dda 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1384,7 +1384,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, return sock_intr_errno(timeo); } - tls_strp_msg_load(&ctx->strp, released); + if (unlikely(!tls_strp_msg_load(&ctx->strp, released))) + return tls_rx_rec_wait(sk, psock, nonblock, false); return 1; } diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ead6a3c14b87..bebb355f3ffe 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -689,7 +689,8 @@ static int __vsock_bind_connectible(struct vsock_sock *vsk, unsigned int i; for (i = 0; i < MAX_PORT_RETRIES; i++) { - if (port <= LAST_RESERVED_PORT) + if (port == VMADDR_PORT_ANY || + port <= LAST_RESERVED_PORT) port = LAST_RESERVED_PORT + 1; new_addr.svm_port = port++; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index d2819baea414..c7a1f080d2de 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -155,7 +155,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } - if (skb_is_gso(skb) && unlikely(xmit_xfrm_check_overflow(skb))) { + if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || + unlikely(xmit_xfrm_check_overflow(skb)))) { struct sk_buff *segs; /* Packet got rerouted, fixup features and segment it. */ @@ -415,10 +416,12 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) struct net_device *dev = x->xso.dev; bool check_tunnel_size; - if (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED) + if (!x->type_offload || + (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap)) return false; - if ((dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) { + if ((!dev || dev == xfrm_dst_path(dst)->dev) && + !xdst->child->xfrm) { mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); if (skb->len <= mtu) goto ok; @@ -430,6 +433,9 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) return false; ok: + if (!dev) + return true; + check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->props.mode == XFRM_MODE_TUNNEL; switch (x->props.family) { diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 77db3b5fe4ac..78fcbb89cf32 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -3297,7 +3297,7 @@ void xfrm_state_fini(struct net *net) unsigned int sz; flush_work(&net->xfrm.state_hash_work); - xfrm_state_flush(net, IPSEC_PROTO_ANY, false); + xfrm_state_flush(net, 0, false); flush_work(&xfrm_state_gc_work); WARN_ON(!list_empty(&net->xfrm.state_all)); diff --git a/rust/Makefile b/rust/Makefile index 4263462b8470..bfa915b0e588 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -65,6 +65,10 @@ core-cfgs = \ core-edition := $(if $(call rustc-min-version,108700),2024,2021) +# `rustdoc` did not save the target modifiers, thus workaround for +# the time being (https://github.com/rust-lang/rust/issues/144521). +rustdoc_modifiers_workaround := $(if $(call rustc-min-version,108800),-Cunsafe-allow-abi-mismatch=fixed-x18) + # `rustc` recognizes `--remap-path-prefix` since 1.26.0, but `rustdoc` only # since Rust 1.81.0. Moreover, `rustdoc` ICEs on out-of-tree builds since Rust # 1.82.0 (https://github.com/rust-lang/rust/issues/138520). Thus workaround both @@ -77,6 +81,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $< -Zunstable-options --generate-link-to-definition \ --output $(rustdoc_output) \ --crate-name $(subst rustdoc-,,$@) \ + $(rustdoc_modifiers_workaround) \ $(if $(rustdoc_host),,--sysroot=/dev/null) \ @$(objtree)/include/generated/rustc_cfg $< @@ -106,14 +111,14 @@ rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \ rustdoc-macros: private rustdoc_host = yes rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \ --extern proc_macro -rustdoc-macros: $(src)/macros/lib.rs FORCE +rustdoc-macros: $(src)/macros/lib.rs rustdoc-clean FORCE +$(call if_changed,rustdoc) # Starting with Rust 1.82.0, skipping `-Wrustdoc::unescaped_backticks` should # not be needed -- see https://github.com/rust-lang/rust/pull/128307. rustdoc-core: private skip_flags = --edition=2021 -Wrustdoc::unescaped_backticks rustdoc-core: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs) -rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE +rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs rustdoc-clean FORCE +$(call if_changed,rustdoc) rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE @@ -125,7 +130,8 @@ rustdoc-ffi: $(src)/ffi.rs rustdoc-core FORCE rustdoc-pin_init_internal: private rustdoc_host = yes rustdoc-pin_init_internal: private rustc_target_flags = --cfg kernel \ --extern proc_macro --crate-type proc-macro -rustdoc-pin_init_internal: $(src)/pin-init/internal/src/lib.rs FORCE +rustdoc-pin_init_internal: $(src)/pin-init/internal/src/lib.rs \ + rustdoc-clean FORCE +$(call if_changed,rustdoc) rustdoc-pin_init: private rustdoc_host = yes @@ -143,6 +149,9 @@ rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-ffi rustdoc-macros \ $(obj)/bindings.o FORCE +$(call if_changed,rustdoc) +rustdoc-clean: FORCE + $(Q)rm -rf $(rustdoc_output) + quiet_cmd_rustc_test_library = $(RUSTC_OR_CLIPPY_QUIET) TL $< cmd_rustc_test_library = \ OBJTREE=$(abspath $(objtree)) \ @@ -215,6 +224,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $< --extern bindings --extern uapi \ --no-run --crate-name kernel -Zunstable-options \ --sysroot=/dev/null \ + $(rustdoc_modifiers_workaround) \ --test-builder $(objtree)/scripts/rustdoc_test_builder \ $< $(rustdoc_test_kernel_quiet); \ $(objtree)/scripts/rustdoc_test_gen diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs index 3bb7c83966cf..4a62f9fd88b7 100644 --- a/rust/kernel/drm/device.rs +++ b/rust/kernel/drm/device.rs @@ -10,7 +10,8 @@ use crate::{ error::from_err_ptr, error::Result, prelude::*, - types::{ARef, AlwaysRefCounted, Opaque}, + sync::aref::{ARef, AlwaysRefCounted}, + types::Opaque, }; use core::{mem, ops::Deref, ptr, ptr::NonNull}; diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs index dae0f4d1bbe3..91e13b6ca26a 100644 --- a/rust/kernel/drm/driver.rs +++ b/rust/kernel/drm/driver.rs @@ -8,7 +8,7 @@ use crate::{ bindings, device, devres, drm, error::{to_result, Result}, prelude::*, - types::ARef, + sync::aref::ARef, }; use macros::vtable; diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs index fd872de3b669..6ccbb25628a1 100644 --- a/rust/kernel/drm/gem/mod.rs +++ b/rust/kernel/drm/gem/mod.rs @@ -10,7 +10,8 @@ use crate::{ drm::driver::{AllocImpl, AllocOps}, error::{to_result, Result}, prelude::*, - types::{ARef, AlwaysRefCounted, Opaque}, + sync::aref::{ARef, AlwaysRefCounted}, + types::Opaque, }; use core::{ops::Deref, ptr::NonNull}; diff --git a/rust/kernel/drm/ioctl.rs b/rust/kernel/drm/ioctl.rs index fdec01c37168..af1bb29cf06d 100644 --- a/rust/kernel/drm/ioctl.rs +++ b/rust/kernel/drm/ioctl.rs @@ -83,7 +83,7 @@ pub mod internal { /// /// ```ignore /// fn foo(device: &kernel::drm::Device<Self>, -/// data: &Opaque<uapi::argument_type>, +/// data: &mut uapi::argument_type, /// file: &kernel::drm::File<Self::File>, /// ) -> Result<u32> /// ``` @@ -138,9 +138,12 @@ macro_rules! declare_drm_ioctls { // SAFETY: The ioctl argument has size `_IOC_SIZE(cmd)`, which we // asserted above matches the size of this type, and all bit patterns of // UAPI structs must be valid. - let data = unsafe { - &*(raw_data as *const $crate::types::Opaque<$crate::uapi::$struct>) - }; + // The `ioctl` argument is exclusively owned by the handler + // and guaranteed by the C implementation (`drm_ioctl()`) to remain + // valid for the entire lifetime of the reference taken here. + // There is no concurrent access or aliasing; no other references + // to this object exist during this call. + let data = unsafe { &mut *(raw_data.cast::<$crate::uapi::$struct>()) }; // SAFETY: This is just the DRM file structure let file = unsafe { $crate::drm::File::from_raw(raw_file) }; diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index e27a36e4e92a..e90c4047ea62 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -7140,6 +7140,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x1ee7, 0x2078, "HONOR BRB-X M1010", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -7158,6 +7159,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0xf111, 0x000b, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), #if 0 diff --git a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c index 45ac5e41bd4f..06c7bc2b9e9d 100644 --- a/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c +++ b/sound/hda/codecs/side-codecs/tas2781_hda_i2c.c @@ -265,7 +265,7 @@ static const struct snd_kcontrol_new tas2770_snd_controls[] = { }; static const struct snd_kcontrol_new tas2781_snd_controls[] = { - ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain", TAS2781_AMP_LEVEL, + ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Volume", TAS2781_AMP_LEVEL, 1, 0, 20, 0, tas2781_amp_getvol, tas2781_amp_putvol, amp_vol_tlv), ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load", 0, diff --git a/sound/hda/controllers/intel.c b/sound/hda/controllers/intel.c index fcf67e97a546..1bb3ff55b115 100644 --- a/sound/hda/controllers/intel.c +++ b/sound/hda/controllers/intel.c @@ -2077,7 +2077,6 @@ static const struct pci_device_id driver_denylist[] = { { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */ { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */ { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */ - { PCI_DEVICE_SUB(0x1022, 0x15e3, 0x1022, 0xd601) }, /* ASRock X670E Taichi */ {} }; diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c index 4418b9ae33e6..b33344f65b8c 100644 --- a/sound/pci/azt3328.c +++ b/sound/pci/azt3328.c @@ -412,25 +412,25 @@ snd_azf3328_ctrl_outl(const struct snd_azf3328 *chip, unsigned reg, u32 value) outl(value, chip->ctrl_io + reg); } -static inline void +static inline void __maybe_unused snd_azf3328_game_outb(const struct snd_azf3328 *chip, unsigned reg, u8 value) { outb(value, chip->game_io + reg); } -static inline void +static inline void __maybe_unused snd_azf3328_game_outw(const struct snd_azf3328 *chip, unsigned reg, u16 value) { outw(value, chip->game_io + reg); } -static inline u8 +static inline u8 __maybe_unused snd_azf3328_game_inb(const struct snd_azf3328 *chip, unsigned reg) { return inb(chip->game_io + reg); } -static inline u16 +static inline u16 __maybe_unused snd_azf3328_game_inw(const struct snd_azf3328 *chip, unsigned reg) { return inw(chip->game_io + reg); diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index bf362bfca456..ce74818bd715 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -111,7 +111,6 @@ source "sound/soc/bcm/Kconfig" source "sound/soc/cirrus/Kconfig" source "sound/soc/dwc/Kconfig" source "sound/soc/fsl/Kconfig" -source "sound/soc/generic/Kconfig" source "sound/soc/google/Kconfig" source "sound/soc/hisilicon/Kconfig" source "sound/soc/jz4740/Kconfig" @@ -149,5 +148,8 @@ source "sound/soc/codecs/Kconfig" source "sound/soc/sdw_utils/Kconfig" +# generic frame-work +source "sound/soc/generic/Kconfig" + endif # SND_SOC diff --git a/sound/soc/codecs/aw87390.c b/sound/soc/codecs/aw87390.c index 110009616966..ef6f64856988 100644 --- a/sound/soc/codecs/aw87390.c +++ b/sound/soc/codecs/aw87390.c @@ -177,7 +177,7 @@ static int aw87390_profile_info(struct snd_kcontrol *kcontrol, { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw87390 *aw87390 = snd_soc_component_get_drvdata(codec); - char *prof_name, *name; + char *prof_name; int count, ret; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; @@ -194,17 +194,15 @@ static int aw87390_profile_info(struct snd_kcontrol *kcontrol, if (uinfo->value.enumerated.item >= count) uinfo->value.enumerated.item = count - 1; - name = uinfo->value.enumerated.name; count = uinfo->value.enumerated.item; ret = aw87390_dev_get_prof_name(aw87390->aw_pa, count, &prof_name); if (ret) { - strscpy(uinfo->value.enumerated.name, "null", - strlen("null") + 1); + strscpy(uinfo->value.enumerated.name, "null"); return 0; } - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); + strscpy(uinfo->value.enumerated.name, prof_name); return 0; } diff --git a/sound/soc/codecs/aw88081.c b/sound/soc/codecs/aw88081.c index 3dd8428f08cc..d61a7b8c5470 100644 --- a/sound/soc/codecs/aw88081.c +++ b/sound/soc/codecs/aw88081.c @@ -914,12 +914,11 @@ static int aw88081_profile_info(struct snd_kcontrol *kcontrol, ret = aw88081_dev_get_prof_name(aw88081->aw_pa, count, &prof_name); if (ret) { - strscpy(uinfo->value.enumerated.name, "null", - sizeof(uinfo->value.enumerated.name)); + strscpy(uinfo->value.enumerated.name, "null"); return 0; } - strscpy(uinfo->value.enumerated.name, prof_name, sizeof(uinfo->value.enumerated.name)); + strscpy(uinfo->value.enumerated.name, prof_name); return 0; } diff --git a/sound/soc/codecs/aw88166.c b/sound/soc/codecs/aw88166.c index 4f76ebe11cc7..28f62b991ef2 100644 --- a/sound/soc/codecs/aw88166.c +++ b/sound/soc/codecs/aw88166.c @@ -1478,7 +1478,7 @@ static int aw88166_profile_info(struct snd_kcontrol *kcontrol, { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88166 *aw88166 = snd_soc_component_get_drvdata(codec); - char *prof_name, *name; + char *prof_name; int count, ret; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; @@ -1495,17 +1495,15 @@ static int aw88166_profile_info(struct snd_kcontrol *kcontrol, if (uinfo->value.enumerated.item >= count) uinfo->value.enumerated.item = count - 1; - name = uinfo->value.enumerated.name; count = uinfo->value.enumerated.item; ret = aw88166_dev_get_prof_name(aw88166->aw_pa, count, &prof_name); if (ret) { - strscpy(uinfo->value.enumerated.name, "null", - strlen("null") + 1); + strscpy(uinfo->value.enumerated.name, "null"); return 0; } - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); + strscpy(uinfo->value.enumerated.name, prof_name); return 0; } diff --git a/sound/soc/codecs/aw88261.c b/sound/soc/codecs/aw88261.c index fb99871578c5..de11ae8dd9d9 100644 --- a/sound/soc/codecs/aw88261.c +++ b/sound/soc/codecs/aw88261.c @@ -819,7 +819,7 @@ static int aw88261_profile_info(struct snd_kcontrol *kcontrol, { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88261 *aw88261 = snd_soc_component_get_drvdata(codec); - char *prof_name, *name; + char *prof_name; int count, ret; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; @@ -836,17 +836,15 @@ static int aw88261_profile_info(struct snd_kcontrol *kcontrol, if (uinfo->value.enumerated.item >= count) uinfo->value.enumerated.item = count - 1; - name = uinfo->value.enumerated.name; count = uinfo->value.enumerated.item; ret = aw88261_dev_get_prof_name(aw88261->aw_pa, count, &prof_name); if (ret) { - strscpy(uinfo->value.enumerated.name, "null", - strlen("null") + 1); + strscpy(uinfo->value.enumerated.name, "null"); return 0; } - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); + strscpy(uinfo->value.enumerated.name, prof_name); return 0; } diff --git a/sound/soc/codecs/aw88395/aw88395.c b/sound/soc/codecs/aw88395/aw88395.c index aea44a199b98..fb563b4c6971 100644 --- a/sound/soc/codecs/aw88395/aw88395.c +++ b/sound/soc/codecs/aw88395/aw88395.c @@ -175,7 +175,7 @@ static int aw88395_profile_info(struct snd_kcontrol *kcontrol, { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88395 *aw88395 = snd_soc_component_get_drvdata(codec); - char *prof_name, *name; + char *prof_name; int count, ret; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; @@ -192,17 +192,15 @@ static int aw88395_profile_info(struct snd_kcontrol *kcontrol, if (uinfo->value.enumerated.item >= count) uinfo->value.enumerated.item = count - 1; - name = uinfo->value.enumerated.name; count = uinfo->value.enumerated.item; ret = aw88395_dev_get_prof_name(aw88395->aw_pa, count, &prof_name); if (ret) { - strscpy(uinfo->value.enumerated.name, "null", - strlen("null") + 1); + strscpy(uinfo->value.enumerated.name, "null"); return 0; } - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); + strscpy(uinfo->value.enumerated.name, prof_name); return 0; } diff --git a/sound/soc/codecs/aw88399.c b/sound/soc/codecs/aw88399.c index c23e70d64d0c..58846feb013d 100644 --- a/sound/soc/codecs/aw88399.c +++ b/sound/soc/codecs/aw88399.c @@ -1831,7 +1831,7 @@ static int aw88399_profile_info(struct snd_kcontrol *kcontrol, { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct aw88399 *aw88399 = snd_soc_component_get_drvdata(codec); - char *prof_name, *name; + char *prof_name; int count, ret; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; @@ -1848,17 +1848,15 @@ static int aw88399_profile_info(struct snd_kcontrol *kcontrol, if (uinfo->value.enumerated.item >= count) uinfo->value.enumerated.item = count - 1; - name = uinfo->value.enumerated.name; count = uinfo->value.enumerated.item; ret = aw88399_dev_get_prof_name(aw88399->aw_pa, count, &prof_name); if (ret) { - strscpy(uinfo->value.enumerated.name, "null", - strlen("null") + 1); + strscpy(uinfo->value.enumerated.name, "null"); return 0; } - strscpy(name, prof_name, sizeof(uinfo->value.enumerated.name)); + strscpy(uinfo->value.enumerated.name, prof_name); return 0; } diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c index 40d79bee4584..1da34cb3505f 100644 --- a/sound/soc/codecs/lpass-tx-macro.c +++ b/sound/soc/codecs/lpass-tx-macro.c @@ -2229,7 +2229,7 @@ static int tx_macro_register_mclk_output(struct tx_macro *tx) } static const struct snd_soc_component_driver tx_macro_component_drv = { - .name = "RX-MACRO", + .name = "TX-MACRO", .probe = tx_macro_component_probe, .controls = tx_macro_snd_controls, .num_controls = ARRAY_SIZE(tx_macro_snd_controls), diff --git a/sound/soc/codecs/rt1320-sdw.c b/sound/soc/codecs/rt1320-sdw.c index b13d7a99bf63..dcddc28e8856 100644 --- a/sound/soc/codecs/rt1320-sdw.c +++ b/sound/soc/codecs/rt1320-sdw.c @@ -109,6 +109,7 @@ static const struct reg_sequence rt1320_blind_write[] = { { 0x0000d540, 0x01 }, { 0xd172, 0x2a }, { 0xc5d6, 0x01 }, + { 0xd478, 0xff }, }; static const struct reg_sequence rt1320_vc_blind_write[] = { @@ -159,7 +160,7 @@ static const struct reg_sequence rt1320_vc_blind_write[] = { { 0xd471, 0x3a }, { 0xd474, 0x11 }, { 0xd475, 0x32 }, - { 0xd478, 0x64 }, + { 0xd478, 0xff }, { 0xd479, 0x20 }, { 0xd47a, 0x10 }, { 0xd47c, 0xff }, diff --git a/sound/soc/codecs/rt721-sdca.c b/sound/soc/codecs/rt721-sdca.c index f6f7c2ffde1c..a4bd29d7220b 100644 --- a/sound/soc/codecs/rt721-sdca.c +++ b/sound/soc/codecs/rt721-sdca.c @@ -278,6 +278,8 @@ static void rt721_sdca_jack_preset(struct rt721_sdca_priv *rt721) RT721_ENT_FLOAT_CTL1, 0x4040); rt_sdca_index_write(rt721->mbq_regmap, RT721_HDA_SDCA_FLOAT, RT721_ENT_FLOAT_CTL4, 0x1201); + rt_sdca_index_write(rt721->mbq_regmap, RT721_BOOST_CTRL, + RT721_BST_4CH_TOP_GATING_CTRL1, 0x002a); regmap_write(rt721->regmap, 0x2f58, 0x07); } diff --git a/sound/soc/codecs/rt721-sdca.h b/sound/soc/codecs/rt721-sdca.h index 0a82c107b19a..71fac9cd8739 100644 --- a/sound/soc/codecs/rt721-sdca.h +++ b/sound/soc/codecs/rt721-sdca.h @@ -56,6 +56,7 @@ struct rt721_sdca_dmic_kctrl_priv { #define RT721_CBJ_CTRL 0x0a #define RT721_CAP_PORT_CTRL 0x0c #define RT721_CLASD_AMP_CTRL 0x0d +#define RT721_BOOST_CTRL 0x0f #define RT721_VENDOR_REG 0x20 #define RT721_RC_CALIB_CTRL 0x40 #define RT721_VENDOR_EQ_L 0x53 @@ -93,6 +94,9 @@ struct rt721_sdca_dmic_kctrl_priv { /* Index (NID:0dh) */ #define RT721_CLASD_AMP_2CH_CAL 0x14 +/* Index (NID:0fh) */ +#define RT721_BST_4CH_TOP_GATING_CTRL1 0x05 + /* Index (NID:20h) */ #define RT721_JD_PRODUCT_NUM 0x00 #define RT721_ANALOG_BIAS_CTL3 0x04 diff --git a/sound/soc/codecs/sma1307.c b/sound/soc/codecs/sma1307.c index b3d401ada176..6a601e7134ea 100644 --- a/sound/soc/codecs/sma1307.c +++ b/sound/soc/codecs/sma1307.c @@ -1749,7 +1749,7 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil sma1307->set.header_size * sizeof(int)); if ((sma1307->set.checksum >> 8) != SMA1307_SETTING_CHECKSUM) { - dev_err(sma1307->dev, "%s: failed by dismatch \"%s\"\n", + dev_err(sma1307->dev, "%s: checksum failed \"%s\"\n", __func__, setting_file); sma1307->set.status = false; return; diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c index 9f4d965a1335..676130f4cf3e 100644 --- a/sound/soc/codecs/tas2781-i2c.c +++ b/sound/soc/codecs/tas2781-i2c.c @@ -908,10 +908,10 @@ static const struct snd_kcontrol_new tasdevice_cali_controls[] = { }; static const struct snd_kcontrol_new tas2781_snd_controls[] = { - SOC_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain", TAS2781_AMP_LEVEL, + SOC_SINGLE_RANGE_EXT_TLV("Speaker Analog Volume", TAS2781_AMP_LEVEL, 1, 0, 20, 0, tas2781_amp_getvol, tas2781_amp_putvol, amp_vol_tlv), - SOC_SINGLE_RANGE_EXT_TLV("Speaker Digital Gain", TAS2781_DVC_LVL, + SOC_SINGLE_RANGE_EXT_TLV("Speaker Digital Volume", TAS2781_DVC_LVL, 0, 0, 200, 1, tas2781_digital_getvol, tas2781_digital_putvol, dvc_tlv), }; @@ -1480,7 +1480,7 @@ static ssize_t acoustic_ctl_write(struct file *file, return PTR_ERR(src); if (src[0] > max_pkg_len && src[0] != count) { - dev_err(priv->dev, "pkg(%u), max(%u), count(%u) dismatch.\n", + dev_err(priv->dev, "pkg(%u), max(%u), count(%u) mismatch.\n", src[0], max_pkg_len, (unsigned int)count); ret = 0; goto exit; diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index c313b654236c..d0367b21f775 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -809,9 +809,9 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir) * are running concurrently. */ /* Software Reset */ - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); /* Clear SR bit to finish the reset */ - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0); + regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR, 0); } static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, @@ -930,11 +930,11 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai) unsigned int ofs = sai->soc_data->reg_offset; /* Software Reset for both Tx and Rx */ - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR); - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); /* Clear SR bit to finish the reset */ - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0); - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0); + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0); regmap_update_bits(sai->regmap, FSL_SAI_TCR1(ofs), FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth), @@ -1824,11 +1824,11 @@ static int fsl_sai_runtime_resume(struct device *dev) regcache_cache_only(sai->regmap, false); regcache_mark_dirty(sai->regmap); - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR); - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, FSL_SAI_CSR_SR); usleep_range(1000, 2000); - regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0); - regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0); + regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs), FSL_SAI_CSR_SR, 0); + regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs), FSL_SAI_CSR_SR, 0); ret = regcache_sync(sai->regmap); if (ret) diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c index 0e489097d9c1..6ca21780f21d 100644 --- a/sound/soc/stm/stm32_i2s.c +++ b/sound/soc/stm/stm32_i2s.c @@ -469,11 +469,8 @@ static int stm32_i2smclk_determine_rate(struct clk_hw *hw, int ret; ret = stm32_i2s_calc_clk_div(i2s, req->best_parent_rate, req->rate); - if (ret) { - req->rate = ret; - - return 0; - } + if (ret) + return ret; mclk->freq = req->best_parent_rate / i2s->divider; diff --git a/sound/usb/stream.c b/sound/usb/stream.c index ad6ced780634..acf3dc2d79e0 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -341,20 +341,28 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor len = le16_to_cpu(cluster->wLength); c = 0; - p += sizeof(struct uac3_cluster_header_descriptor); + p += sizeof(*cluster); + len -= sizeof(*cluster); - while (((p - (void *)cluster) < len) && (c < channels)) { + while (len > 0 && (c < channels)) { struct uac3_cluster_segment_descriptor *cs_desc = p; u16 cs_len; u8 cs_type; + if (len < sizeof(*p)) + break; cs_len = le16_to_cpu(cs_desc->wLength); + if (len < cs_len) + break; cs_type = cs_desc->bSegmentType; if (cs_type == UAC3_CHANNEL_INFORMATION) { struct uac3_cluster_information_segment_descriptor *is = p; unsigned char map; + if (cs_len < sizeof(*is)) + break; + /* * TODO: this conversion is not complete, update it * after adding UAC3 values to asound.h @@ -456,6 +464,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor chmap->map[c++] = map; } p += cs_len; + len -= cs_len; } if (channels < c) @@ -881,7 +890,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, u64 badd_formats = 0; unsigned int num_channels; struct audioformat *fp; - u16 cluster_id, wLength; + u16 cluster_id, wLength, cluster_wLength; int clock = 0; int err; @@ -1011,6 +1020,16 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip, return ERR_PTR(-EIO); } + cluster_wLength = le16_to_cpu(cluster->wLength); + if (cluster_wLength < sizeof(*cluster) || + cluster_wLength > wLength) { + dev_err(&dev->dev, + "%u:%d : invalid Cluster Descriptor size\n", + iface_no, altno); + kfree(cluster); + return ERR_PTR(-EIO); + } + num_channels = cluster->bNrChannels; chmap = convert_chmap_v3(cluster); kfree(cluster); diff --git a/sound/usb/validate.c b/sound/usb/validate.c index 6fe206f6e911..4f4e8e87a14c 100644 --- a/sound/usb/validate.c +++ b/sound/usb/validate.c @@ -221,6 +221,17 @@ static bool validate_uac3_feature_unit(const void *p, return d->bLength >= sizeof(*d) + 4 + 2; } +static bool validate_uac3_power_domain_unit(const void *p, + const struct usb_desc_validator *v) +{ + const struct uac3_power_domain_descriptor *d = p; + + if (d->bLength < sizeof(*d)) + return false; + /* baEntities[] + wPDomainDescrStr */ + return d->bLength >= sizeof(*d) + d->bNrEntities + 2; +} + static bool validate_midi_out_jack(const void *p, const struct usb_desc_validator *v) { @@ -285,6 +296,7 @@ static const struct usb_desc_validator audio_validators[] = { struct uac3_clock_multiplier_descriptor), /* UAC_VERSION_3, UAC3_SAMPLE_RATE_CONVERTER: not implemented yet */ /* UAC_VERSION_3, UAC3_CONNECTORS: not implemented yet */ + FUNC(UAC_VERSION_3, UAC3_POWER_DOMAIN, validate_uac3_power_domain_unit), { } /* terminator */ }; diff --git a/tools/testing/selftests/drivers/net/napi_threaded.py b/tools/testing/selftests/drivers/net/napi_threaded.py index b2698db39817..9699a100a87d 100755 --- a/tools/testing/selftests/drivers/net/napi_threaded.py +++ b/tools/testing/selftests/drivers/net/napi_threaded.py @@ -35,6 +35,8 @@ def _setup_deferred_cleanup(cfg) -> None: threaded = cmd(f"cat /sys/class/net/{cfg.ifname}/threaded").stdout defer(_set_threaded_state, cfg, threaded) + return combined + def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None: """ @@ -49,7 +51,7 @@ def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None: napi0_id = napis[0]['id'] napi1_id = napis[1]['id'] - _setup_deferred_cleanup(cfg) + qcnt = _setup_deferred_cleanup(cfg) # set threaded _set_threaded_state(cfg, 1) @@ -62,7 +64,7 @@ def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None: nl.napi_set({'id': napi1_id, 'threaded': 'disabled'}) cmd(f"ethtool -L {cfg.ifname} combined 1") - cmd(f"ethtool -L {cfg.ifname} combined 2") + cmd(f"ethtool -L {cfg.ifname} combined {qcnt}") _assert_napi_threaded_enabled(nl, napi0_id) _assert_napi_threaded_disabled(nl, napi1_id) @@ -80,7 +82,7 @@ def change_num_queues(cfg, nl) -> None: napi0_id = napis[0]['id'] napi1_id = napis[1]['id'] - _setup_deferred_cleanup(cfg) + qcnt = _setup_deferred_cleanup(cfg) # set threaded _set_threaded_state(cfg, 1) @@ -90,7 +92,7 @@ def change_num_queues(cfg, nl) -> None: _assert_napi_threaded_enabled(nl, napi1_id) cmd(f"ethtool -L {cfg.ifname} combined 1") - cmd(f"ethtool -L {cfg.ifname} combined 2") + cmd(f"ethtool -L {cfg.ifname} combined {qcnt}") # check napi threaded is set for both napis _assert_napi_threaded_enabled(nl, napi0_id) diff --git a/tools/testing/selftests/net/forwarding/sch_ets.sh b/tools/testing/selftests/net/forwarding/sch_ets.sh index 1f6f53e284b5..6269d5e23487 100755 --- a/tools/testing/selftests/net/forwarding/sch_ets.sh +++ b/tools/testing/selftests/net/forwarding/sch_ets.sh @@ -11,6 +11,7 @@ ALL_TESTS=" ets_test_strict ets_test_mixed ets_test_dwrr + ets_test_plug classifier_mode ets_test_strict ets_test_mixed diff --git a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh index 08240d3e3c87..79d837a2868a 100644 --- a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh +++ b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh @@ -224,3 +224,11 @@ ets_test_dwrr() ets_set_dwrr_two_bands xfail_on_slow ets_dwrr_test_01 } + +ets_test_plug() +{ + ets_change_qdisc $put 2 "3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3" "1514 1514" + tc qdisc add dev $put handle 20: parent 10:4 plug + start_traffic_pktsize 100 $h1.10 192.0.2.1 192.0.2.2 00:c1:a0:c1:a0:00 "-c 1" + ets_qdisc_setup $put 2 +} diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 5ded3b3a7538..d8cfcf9bb825 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -2708,6 +2708,69 @@ TEST(prequeue) { close(cfd); } +TEST(data_steal) { + struct tls_crypto_info_keys tls; + char buf[20000], buf2[20000]; + struct sockaddr_in addr; + int sfd, cfd, ret, fd; + int pid, status; + socklen_t len; + + len = sizeof(addr); + memrnd(buf, sizeof(buf)); + + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_256, &tls, 0); + + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl(INADDR_ANY); + addr.sin_port = 0; + + fd = socket(AF_INET, SOCK_STREAM, 0); + sfd = socket(AF_INET, SOCK_STREAM, 0); + + ASSERT_EQ(bind(sfd, &addr, sizeof(addr)), 0); + ASSERT_EQ(listen(sfd, 10), 0); + ASSERT_EQ(getsockname(sfd, &addr, &len), 0); + ASSERT_EQ(connect(fd, &addr, sizeof(addr)), 0); + ASSERT_GE(cfd = accept(sfd, &addr, &len), 0); + close(sfd); + + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + if (ret) { + ASSERT_EQ(errno, ENOENT); + SKIP(return, "no TLS support"); + } + ASSERT_EQ(setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")), 0); + + /* Spawn a child and get it into the read wait path of the underlying + * TCP socket. + */ + pid = fork(); + ASSERT_GE(pid, 0); + if (!pid) { + EXPECT_EQ(recv(cfd, buf, sizeof(buf), MSG_WAITALL), + sizeof(buf)); + exit(!__test_passed(_metadata)); + } + + usleep(2000); + ASSERT_EQ(setsockopt(fd, SOL_TLS, TLS_TX, &tls, tls.len), 0); + ASSERT_EQ(setsockopt(cfd, SOL_TLS, TLS_RX, &tls, tls.len), 0); + + EXPECT_EQ(send(fd, buf, sizeof(buf), 0), sizeof(buf)); + usleep(2000); + EXPECT_EQ(recv(cfd, buf2, sizeof(buf2), MSG_DONTWAIT), -1); + /* Don't check errno, the error will be different depending + * on what random bytes TLS interpreted as the record length. + */ + + close(fd); + close(cfd); + + EXPECT_EQ(wait(&status), pid); + EXPECT_EQ(status, 0); +} + static void __attribute__((constructor)) fips_check(void) { int res; FILE *f; diff --git a/tools/testing/selftests/proc/proc-maps-race.c b/tools/testing/selftests/proc/proc-maps-race.c index 66773685a047..94bba4553130 100644 --- a/tools/testing/selftests/proc/proc-maps-race.c +++ b/tools/testing/selftests/proc/proc-maps-race.c @@ -202,11 +202,11 @@ static void print_first_lines(char *text, int nr) int offs = end - text; text[offs] = '\0'; - printf(text); + printf("%s", text); text[offs] = '\n'; printf("\n"); } else { - printf(text); + printf("%s", text); } } @@ -221,7 +221,7 @@ static void print_last_lines(char *text, int nr) nr--; start--; } - printf(start); + printf("%s", start); } static void print_boundaries(const char *title, FIXTURE_DATA(proc_maps_race) *self) |