diff options
| author | Dave Airlie <airlied@redhat.com> | 2025-12-02 18:09:01 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2025-12-02 18:09:08 +1000 |
| commit | b3239df349c2c2c94686674489c9629c89ca49a1 (patch) | |
| tree | 5e6652ebbc602866122c26058dfb523a6f446169 /drivers/gpu | |
| parent | 62433efe0b06042d8016ba0713d801165a939229 (diff) | |
| parent | db2bad93fe206c95808b7a164a29424791728752 (diff) | |
Merge tag 'drm-misc-next-2025-12-01-1' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
Extra drm-misc-next for v6.19-rc1:
UAPI Changes:
- Add support for drm colorop pipeline.
- Add COLOR PIPELINE plane property.
- Add DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE.
Cross-subsystem Changes:
- Attempt to use higher order mappings in system heap allocator.
- Always taint kernel with sw-sync.
Core Changes:
- Small fixes to drm/gem.
- Support emergency restore to drm-client.
- Allocate and release fb_info in single place.
- Rework ttm pipelined eviction fence handling.
Driver Changes:
- Support the drm color pipeline in vkms, amdgfx.
- Add NVJPG driver for tegra.
- Assorted small fixes and updates to rockchip, bridge/dw-hdmi-qp,
panthor.
- Add ASL CS5263 DP-to-HDMI simple bridge.
- Add and improve support for G LD070WX3-SL01 MIPI DSI, Samsung LTL106AL0,
Samsung LTL106AL01, Raystar RFF500F-AWH-DNN, Winstar WF70A8SYJHLNGA,
Wanchanglong w552946aaa, Samsung SOFEF00, Lenovo X13s panel.
- Add support for it66122 to it66121.
- Support mali-G1 gpu in panthor.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patch.msgid.link/aa5cbd50-7676-4a59-bbed-e8428af86804@linux.intel.com
Diffstat (limited to 'drivers/gpu')
104 files changed, 6454 insertions, 728 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 7789f42027ff..4b3f3ad5058a 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -41,6 +41,7 @@ drm-y := \ drm_bridge.o \ drm_cache.o \ drm_color_mgmt.o \ + drm_colorop.o \ drm_connector.o \ drm_crtc.o \ drm_displayid.o \ @@ -76,7 +77,8 @@ drm-y := \ drm-$(CONFIG_DRM_CLIENT) += \ drm_client.o \ drm_client_event.o \ - drm_client_modeset.o + drm_client_modeset.o \ + drm_client_sysrq.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 838a51b6098b..421c2bbe2b6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2185,8 +2185,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) } else { drm_sched_entity_destroy(&adev->mman.high_pr); drm_sched_entity_destroy(&adev->mman.low_pr); - dma_fence_put(man->move); - man->move = NULL; + /* Drop all the old fences since re-creating the scheduler entities + * will allocate new contexts. + */ + ttm_resource_manager_cleanup(man); } /* this just adjusts TTM size idea, which sets lpfn to the correct value */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 7329b8cc2576..8e949fe77312 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -39,7 +39,8 @@ AMDGPUDM = \ amdgpu_dm_psr.o \ amdgpu_dm_replay.o \ amdgpu_dm_quirks.o \ - amdgpu_dm_wb.o + amdgpu_dm_wb.o \ + amdgpu_dm_colorop.o ifdef CONFIG_DRM_AMD_DC_FP AMDGPUDM += dc_fpu.o diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e6728fd12eeb..020eb1651219 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5888,6 +5888,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, *color_space = COLOR_SPACE_SRGB; + /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */ + if (plane_state->state && plane_state->state->plane_color_pipeline) + return 0; + /* DRM color properties only affect non-RGB formats. */ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a4ac6d442278..1dcc79b35225 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -26,12 +26,39 @@ #include "amdgpu.h" #include "amdgpu_mode.h" #include "amdgpu_dm.h" +#include "amdgpu_dm_colorop.h" #include "dc.h" #include "modules/color/color_gamma.h" /** * DOC: overview * + * We have three types of color management in the AMD display driver. + * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties + * 2. AMD driver private color management on &drm_plane and &drm_crtc + * 3. AMD plane color pipeline + * + * The CRTC properties are the original color management. When they were + * implemented per-plane color management was not a thing yet. Because + * of that we could get away with plumbing the DEGAMMA and CTM + * properties to pre-blending HW functions. This is incompatible with + * per-plane color management, such as via the AMD private properties or + * the new drm_plane color pipeline. The only compatible CRTC property + * with per-plane color management is the GAMMA property as it is + * applied post-blending. + * + * The AMD driver private color management properties are only exposed + * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They + * are temporary building blocks on the path to full-fledged &drm_plane + * and &drm_crtc color pipelines and lay the driver's groundwork for the + * color pipelines. + * + * The AMD plane color pipeline describes AMD's &drm_colorops via the + * &drm_plane's COLOR_PIPELINE property. + * + * drm_crtc Properties + * ------------------- + * * The DC interface to HW gives us the following color management blocks * per pipe (surface): * @@ -42,36 +69,93 @@ * - Surface regamma LUT (normalized) * - Output CSC (normalized) * - * But these aren't a direct mapping to DRM color properties. The current DRM - * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware - * is essentially giving: + * But these aren't a direct mapping to DRM color properties. The + * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma + * while our hardware is essentially giving: * * Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM * - * The input gamma LUT block isn't really applicable here since it operates - * on the actual input data itself rather than the HW fp representation. The - * input and output CSC blocks are technically available to use as part of - * the DC interface but are typically used internally by DC for conversions - * between color spaces. These could be blended together with user - * adjustments in the future but for now these should remain untouched. + * The input gamma LUT block isn't really applicable here since it + * operates on the actual input data itself rather than the HW fp + * representation. The input and output CSC blocks are technically + * available to use as part of the DC interface but are typically used + * internally by DC for conversions between color spaces. These could be + * blended together with user adjustments in the future but for now + * these should remain untouched. + * + * The pipe blending also happens after these blocks so we don't + * actually support any CRTC props with correct blending with multiple + * planes - but we can still support CRTC color management properties in + * DM in most single plane cases correctly with clever management of the + * DC interface in DM. + * + * As per DRM documentation, blocks should be in hardware bypass when + * their respective property is set to NULL. A linear DGM/RGM LUT should + * also considered as putting the respective block into bypass mode. + * + * This means that the following configuration is assumed to be the + * default: + * + * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC + * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass + * + * AMD Private Color Management on drm_plane + * ----------------------------------------- + * + * The AMD private color management properties on a &drm_plane are: * - * The pipe blending also happens after these blocks so we don't actually - * support any CRTC props with correct blending with multiple planes - but we - * can still support CRTC color management properties in DM in most single - * plane cases correctly with clever management of the DC interface in DM. + * - AMD_PLANE_DEGAMMA_LUT + * - AMD_PLANE_DEGAMMA_LUT_SIZE + * - AMD_PLANE_DEGAMMA_TF + * - AMD_PLANE_HDR_MULT + * - AMD_PLANE_CTM + * - AMD_PLANE_SHAPER_LUT + * - AMD_PLANE_SHAPER_LUT_SIZE + * - AMD_PLANE_SHAPER_TF + * - AMD_PLANE_LUT3D + * - AMD_PLANE_LUT3D_SIZE + * - AMD_PLANE_BLEND_LUT + * - AMD_PLANE_BLEND_LUT_SIZE + * - AMD_PLANE_BLEND_TF * - * As per DRM documentation, blocks should be in hardware bypass when their - * respective property is set to NULL. A linear DGM/RGM LUT should also - * considered as putting the respective block into bypass mode. + * The AMD private color management property on a &drm_crtc is: * - * This means that the following - * configuration is assumed to be the default: + * - AMD_CRTC_REGAMMA_TF + * + * Use of these properties is discouraged. + * + * AMD plane color pipeline + * ------------------------ + * + * The AMD &drm_plane color pipeline is advertised for DCN generations + * 3.0 and newer. It exposes these elements in this order: + * + * 1. 1D curve colorop + * 2. Multiplier + * 3. 3x4 CTM + * 4. 1D curve colorop + * 5. 1D LUT + * 6. 3D LUT + * 7. 1D curve colorop + * 8. 1D LUT + * + * The multiplier (#2) is a simple multiplier that is applied to all + * channels. + * + * The 3x4 CTM (#3) is a simple 3x4 matrix. + * + * #1, and #7 are non-linear to linear curves. #4 is a linear to + * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or + * their inverse. + * + * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs. + * + * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT. * - * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... - * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass */ #define MAX_DRM_LUT_VALUE 0xFFFF +#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF #define SDR_WHITE_LEVEL_INIT_VALUE 80 /** @@ -342,6 +426,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) } /** + * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob. + * @blob: DRM color mgmt property blob + * @size: lut size + * + * Returns: + * DRM LUT or NULL + */ +static const struct drm_color_lut32 * +__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size) +{ + *size = blob ? drm_color_lut32_size(blob) : 0; + return blob ? (struct drm_color_lut32 *)blob->data : NULL; +} + +/** * __is_lut_linear - check if the given lut is a linear mapping of values * @lut: given lut to check values * @size: lut size @@ -415,6 +514,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, } /** + * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma. + * @lut: DRM lookup table for color conversion + * @gamma: DC gamma to set entries + * + * The conversion depends on the size of the lut - whether or not it's legacy. + */ +static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma) +{ + int i; + + for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) { + gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE); + gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE); + gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE); + } +} + +/** * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix * @ctm: DRM color transformation matrix * @matrix: DC CSC float matrix @@ -566,6 +683,63 @@ static int __set_output_tf(struct dc_transfer_func *func, return res ? 0 : -ENOMEM; } +/** + * __set_output_tf_32 - calculates the output transfer function based on expected input space. + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut + * @has_rom: if ROM can be used for hardcoded curve + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_output_tf_32(struct dc_transfer_func *func, + const struct drm_color_lut32 *lut, uint32_t lut_size, + bool has_rom) +{ + struct dc_gamma *gamma = NULL; + struct calculate_buffer cal_buffer = {0}; + bool res; + + cal_buffer.buffer_index = -1; + + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->num_entries = lut_size; + __drm_lut32_to_dc_gamma(lut, gamma); + } + + if (func->tf == TRANSFER_FUNCTION_LINEAR) { + /* + * Color module doesn't like calculating regamma params + * on top of a linear input. But degamma params can be used + * instead to simulate this. + */ + if (gamma) + gamma->type = GAMMA_CUSTOM; + res = mod_color_calculate_degamma_params(NULL, func, + gamma, gamma != NULL); + } else { + /* + * Assume sRGB. The actual mapping will depend on whether the + * input was legacy or not. + */ + if (gamma) + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, + has_rom, NULL, &cal_buffer); + } + + if (gamma) + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + + static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf, const struct drm_color_lut *regamma_lut, uint32_t regamma_size, bool has_rom, @@ -638,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f return res ? 0 : -ENOMEM; } +/** + * __set_input_tf_32 - calculates the input transfer function based on expected + * input space. + * @caps: dc color capabilities + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut. + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func, + const struct drm_color_lut32 *lut, uint32_t lut_size) +{ + struct dc_gamma *gamma = NULL; + bool res; + + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; + + __drm_lut32_to_dc_gamma(lut, gamma); + } + + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); + + if (gamma) + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + static enum dc_transfer_func_predefined amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) { @@ -667,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) } } +static enum dc_transfer_func_predefined +amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf) +{ + switch (tf) { + case DRM_COLOROP_1D_CURVE_SRGB_EOTF: + case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: + return TRANSFER_FUNCTION_SRGB; + case DRM_COLOROP_1D_CURVE_PQ_125_EOTF: + case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF: + return TRANSFER_FUNCTION_PQ; + case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF: + case DRM_COLOROP_1D_CURVE_BT2020_OETF: + return TRANSFER_FUNCTION_BT709; + case DRM_COLOROP_1D_CURVE_GAMMA22: + case DRM_COLOROP_1D_CURVE_GAMMA22_INV: + return TRANSFER_FUNCTION_GAMMA22; + default: + return TRANSFER_FUNCTION_LINEAR; + } +} + static void __to_dc_lut3d_color(struct dc_rgb *rgb, const struct drm_color_lut lut, int bit_precision) @@ -720,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut, __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); } +static void __to_dc_lut3d_32_color(struct dc_rgb *rgb, + const struct drm_color_lut32 lut, + int bit_precision) +{ + rgb->red = drm_color_lut32_extract(lut.red, bit_precision); + rgb->green = drm_color_lut32_extract(lut.green, bit_precision); + rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision); +} + +static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut, + uint32_t lut3d_size, + struct tetrahedral_params *params, + bool use_tetrahedral_9, + int bit_depth) +{ + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_i, i; + + + if (use_tetrahedral_9) { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + } else { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + } + + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { + /* + * We should consider the 3D LUT RGB values are distributed + * along four arrays lut0-3 where the first sizes 1229 and the + * other 1228. The bit depth supported for 3dlut channel is + * 12-bit, but DC also supports 10-bit. + * + * TODO: improve color pipeline API to enable the userspace set + * bit depth and 3D LUT size/stride, as specified by VA-API. + */ + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); + __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth); + __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth); + __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth); + } + /* lut0 has 1229 points (lut_size/4 + 1) */ + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); +} + /* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream * @drm_lut3d: user 3D LUT * @drm_lut3d_size: size of 3D LUT @@ -1178,6 +1462,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state, } static int +__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state, + struct drm_colorop_state *colorop_state) +{ + struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func; + struct drm_colorop *colorop = colorop_state->colorop; + struct drm_device *drm = colorop->dev; + + if (colorop->type != DRM_COLOROP_1D_CURVE) + return -EINVAL; + + if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) + return -EINVAL; + + if (colorop_state->bypass) { + tf->type = TF_TYPE_BYPASS; + tf->tf = TRANSFER_FUNCTION_LINEAR; + return 0; + } + + drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id); + + tf->type = TF_TYPE_PREDEFINED; + tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + + return 0; +} + +static int +__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + int i = 0; + + old_colorop = colorop; + + /* 1st op: 1d curve - degamma */ + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (!colorop_state) + return -EINVAL; + + return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state); +} + +static int +__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + const struct drm_device *dev = colorop->dev; + const struct drm_property_blob *blob; + struct drm_color_ctm_3x4 *ctm = NULL; + int i = 0; + + /* 3x4 matrix */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) { + drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id); + blob = colorop_state->data; + if (blob->length == sizeof(struct drm_color_ctm_3x4)) { + ctm = (struct drm_color_ctm_3x4 *) blob->data; + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); + dc_plane_state->gamut_remap_matrix.enable_remap = true; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } else { + drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n", + blob->length, sizeof(struct drm_color_ctm_3x4)); + return -EINVAL; + } + } + + return 0; +} + +static int +__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + const struct drm_device *dev = colorop->dev; + int i = 0; + + /* Multiplier */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) { + drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id); + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier); + } + + return 0; +} + +static int +__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; + const struct drm_color_lut32 *shaper_lut; + struct drm_device *dev = colorop->dev; + bool enabled = false; + u32 shaper_size; + int i = 0, ret = 0; + + /* 1D Curve - SHAPER TF */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) { + drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + ret = __set_output_tf(tf, 0, 0, false); + if (ret) + return ret; + enabled = true; + } + + /* 1D LUT - SHAPER LUT */ + colorop = old_colorop->next; + if (!colorop) { + drm_dbg(dev, "no Shaper LUT colorop found\n"); + return -EINVAL; + } + + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) { + drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size); + shaper_size = shaper_lut != NULL ? shaper_size : 0; + + /* Custom LUT size must be the same as supported size */ + if (shaper_size == colorop->size) { + ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false); + if (ret) + return ret; + enabled = true; + } + } + + if (!enabled) + tf->type = TF_TYPE_BYPASS; + + return 0; +} + +/* __set_colorop_3dlut - set DRM 3D LUT to DC stream + * @drm_lut3d: user 3D LUT + * @drm_lut3d_size: size of 3D LUT + * @lut3d: DC 3D LUT + * + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it + * on DCN accordingly. + * + * Returns: + * 0 on success. -EINVAL if drm_lut3d_size is zero. + */ +static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d, + uint32_t drm_lut3d_size, + struct dc_3dlut *lut) +{ + if (!drm_lut3d_size) { + lut->state.bits.initialized = 0; + return -EINVAL; + } + + /* Only supports 17x17x17 3D LUT (12-bit) now */ + lut->lut_3d.use_12bits = true; + lut->lut_3d.use_tetrahedral_9 = false; + + lut->state.bits.initialized = 1; + __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, + lut->lut_3d.use_tetrahedral_9, 12); + + return 0; +} + +static int +__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; + struct drm_atomic_state *state = plane_state->state; + const struct amdgpu_device *adev = drm_to_adev(colorop->dev); + const struct drm_device *dev = colorop->dev; + const struct drm_color_lut32 *lut3d; + uint32_t lut3d_size; + int i = 0, ret = 0; + + /* 3D LUT */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) { + if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) { + drm_dbg(dev, "3D LUT is not supported by hardware\n"); + return -EINVAL; + } + + drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id); + lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size); + lut3d_size = lut3d != NULL ? lut3d_size : 0; + ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func); + if (ret) { + drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n", + colorop->base.id, lut3d_size); + return ret; + } + + /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve + * with TRANSFER_FUNCTION_LINEAR + */ + if (tf->type == TF_TYPE_BYPASS) { + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = TRANSFER_FUNCTION_LINEAR; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + ret = __set_output_tf_32(tf, NULL, 0, false); + } + } + + return ret; +} + +static int +__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct drm_colorop *colorop) +{ + struct drm_colorop *old_colorop; + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; + struct drm_atomic_state *state = plane_state->state; + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; + struct dc_transfer_func *tf = &dc_plane_state->blend_tf; + const struct drm_color_lut32 *blend_lut = NULL; + struct drm_device *dev = colorop->dev; + uint32_t blend_size = 0; + int i = 0; + + /* 1D Curve - BLND TF */ + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE && + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + __set_input_tf_32(NULL, tf, blend_lut, blend_size); + } + + /* 1D Curve - BLND LUT */ + colorop = old_colorop->next; + if (!colorop) { + drm_dbg(dev, "no Blend LUT colorop found\n"); + return -EINVAL; + } + + old_colorop = colorop; + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { + if (new_colorop_state->colorop == old_colorop && + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { + colorop_state = new_colorop_state; + break; + } + } + + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT && + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { + drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id); + tf->type = TF_TYPE_DISTRIBUTED_POINTS; + tf->tf = default_tf; + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size); + blend_size = blend_lut != NULL ? blend_size : 0; + + /* Custom LUT size must be the same as supported size */ + if (blend_size == colorop->size) + __set_input_tf_32(NULL, tf, blend_lut, blend_size); + } + + return 0; +} + +static int amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, struct dc_plane_state *dc_plane_state) { @@ -1227,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, return 0; } +static int +amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct drm_colorop *colorop = plane_state->color_pipeline; + struct drm_device *dev = plane_state->plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + int ret; + + /* 1D Curve - DEGAM TF */ + if (!colorop) + return -EINVAL; + + ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* Multiplier */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no multiplier colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* 3x4 matrix */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no 3x4 matrix colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { + /* 1D Curve & LUT - SHAPER TF & LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no Shaper TF colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* Shaper LUT colorop is already handled, just skip here */ + colorop = colorop->next; + if (!colorop) + return -EINVAL; + + /* 3D LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no 3D LUT colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + } + + /* 1D Curve & LUT - BLND TF & LUT */ + colorop = colorop->next; + if (!colorop) { + drm_dbg(dev, "no Blend TF colorop found\n"); + return -EINVAL; + } + + ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop); + if (ret) + return ret; + + /* BLND LUT colorop is already handled, just skip here */ + colorop = colorop->next; + if (!colorop) + return -EINVAL; + + return 0; +} + /** * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. * @crtc: amdgpu_dm crtc state @@ -1323,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, dc_plane_state->input_csc_color_matrix.enable_adjustment = false; } + if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state)) + return 0; + return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c new file mode 100644 index 000000000000..d585618b8064 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drm_print.h> +#include <drm/drm_plane.h> +#include <drm/drm_property.h> +#include <drm/drm_colorop.h> + +#include "amdgpu.h" +#include "amdgpu_dm_colorop.h" +#include "dc.h" + +const u64 amdgpu_dm_supported_degam_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); + +const u64 amdgpu_dm_supported_shaper_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22); + +const u64 amdgpu_dm_supported_blnd_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); + +#define MAX_COLOR_PIPELINE_OPS 10 + +#define LUT3D_SIZE 17 + +int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) +{ + struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; + struct drm_device *dev = plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + int ret; + int i = 0; + + memset(ops, 0, sizeof(ops)); + + /* 1D curve - DEGAM TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_degam_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + list->type = ops[i]->base.id; + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); + + i++; + + /* Multiplier */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 3x4 matrix */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { + /* 1D curve - SHAPER TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_shaper_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 1D LUT - SHAPER LUT */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + + /* 3D LUT */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE, + DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + + i++; + } + + /* 1D curve - BLND TF */ + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, + amdgpu_dm_supported_blnd_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + i++; + + /* 1D LUT - BLND LUT */ + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); + if (!ops[i]) { + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i-1], ops[i]); + return 0; + +cleanup: + if (ret == -ENOMEM) + drm_err(plane->dev, "KMS: Failed to allocate colorop\n"); + + drm_colorop_pipeline_destroy(dev); + + return ret; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h new file mode 100644 index 000000000000..2e1617ffc8ee --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_COLOROP_H__ +#define __AMDGPU_DM_COLOROP_H__ + +extern const u64 amdgpu_dm_supported_degam_tfs; +extern const u64 amdgpu_dm_supported_shaper_tfs; +extern const u64 amdgpu_dm_supported_blnd_tfs; + +int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list); + +#endif /* __AMDGPU_DM_COLOROP_H__*/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 38f9ea313dcb..697e232acebf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -736,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, { struct amdgpu_crtc *acrtc = NULL; struct drm_plane *cursor_plane; - bool is_dcn; + bool has_degamma; int res = -ENOMEM; cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); @@ -775,20 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, dm->adev->mode_info.crtcs[crtc_index] = acrtc; - /* Don't enable DRM CRTC degamma property for DCE since it doesn't - * support programmable degamma anywhere. + /* Don't enable DRM CRTC degamma property for + * 1. Degamma is replaced by color pipeline. + * 2. DCE since it doesn't support programmable degamma anywhere. + * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor. */ - is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; - /* Dont't enable DRM CRTC degamma property for DCN401 since the - * pre-blending degamma LUT doesn't apply to cursor, and therefore - * can't work similar to a post-blending degamma LUT as in other hw - * versions. - * TODO: revisit it once KMS plane color API is merged. - */ - drm_crtc_enable_color_mgmt(&acrtc->base, - (is_dcn && - dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? - MAX_COLOR_LUT_ENTRIES : 0, + if (plane->color_pipeline_property) + has_degamma = false; + else + has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01; + + drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index e027798ece03..2e3ee78999d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -37,6 +37,7 @@ #include "amdgpu_display.h" #include "amdgpu_dm_trace.h" #include "amdgpu_dm_plane.h" +#include "amdgpu_dm_colorop.h" #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" @@ -1782,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane, return 0; } +#else + +#define MAX_COLOR_PIPELINES 5 + +static int +dm_plane_init_colorops(struct drm_plane *plane) +{ + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; + struct drm_device *dev = plane->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = adev->dm.dc; + int len = 0; + int ret; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return 0; + + /* initialize pipeline */ + if (dc->ctx->dce_version >= DCN_VERSION_3_0) { + ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]); + if (ret) { + drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n", + plane->base.id, ret); + return ret; + } + len++; + + /* Create COLOR_PIPELINE property and attach */ + drm_plane_create_color_pipeline_property(plane, pipelines, len); + } + + return 0; +} #endif static const struct drm_plane_funcs dm_plane_funcs = { @@ -1890,7 +1924,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, #ifdef AMD_PRIVATE_COLOR dm_atomic_plane_attach_color_mgmt_properties(dm, plane); +#else + res = dm_plane_init_colorops(plane); + if (res) + return res; #endif + /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index be703d35f6b7..8bbae94804f8 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -44,10 +44,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = fbh->dev; + struct fb_info *info = fbh->info; struct drm_mode_fb_cmd2 mode; struct armada_framebuffer *dfb; struct armada_gem_object *obj; - struct fb_info *info; int size, ret; void *ptr; @@ -91,12 +91,6 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, if (IS_ERR(dfb)) return PTR_ERR(dfb); - info = drm_fb_helper_alloc_info(fbh); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_fballoc; - } - info->fbops = &armada_fb_ops; info->fix.smem_start = obj->phys_addr; info->fix.smem_len = obj->obj.size; @@ -112,8 +106,4 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, (unsigned long long)obj->phys_addr); return 0; - - err_fballoc: - dfb->fb.funcs->destroy(&dfb->fb); - return ret; } diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index aa7b1dcc5d70..0185f61e6e59 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -287,6 +287,7 @@ enum chip_id { ID_IT6610, ID_IT66121, + ID_IT66122, }; struct it66121_chip_info { @@ -312,7 +313,7 @@ struct it66121_ctx { u8 swl; bool auto_cts; } audio; - const struct it66121_chip_info *info; + enum chip_id id; }; static const struct regmap_range_cfg it66121_regmap_banks[] = { @@ -402,7 +403,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_EC1, 0); if (ret) @@ -428,7 +429,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_EC1, IT66121_AFE_IP_EC1); @@ -449,7 +450,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx, if (ret) return ret; - if (ctx->info->id == ID_IT6610) { + if (ctx->id == ID_IT6610) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, IT6610_AFE_XP_BYPASS, IT6610_AFE_XP_BYPASS); @@ -599,7 +600,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge, if (ret) return ret; - if (ctx->info->id == ID_IT66121) { + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_RCLK, 0); if (ret) @@ -748,7 +749,7 @@ static int it66121_bridge_check(struct drm_bridge *bridge, { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); - if (ctx->info->id == ID_IT6610) { + if (ctx->id == ID_IT6610) { /* The IT6610 only supports these settings */ bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; @@ -802,7 +803,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI)) goto unlock; - if (ctx->info->id == ID_IT66121 && + if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) && regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, IT66121_CLK_BANK_PWROFF_TXCLK)) { @@ -815,7 +816,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge, if (it66121_configure_afe(ctx, adjusted_mode)) goto unlock; - if (ctx->info->id == ID_IT66121 && + if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) && regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, 0)) { goto unlock; @@ -1384,8 +1385,6 @@ static int it66121_audio_startup(struct device *dev, void *data) int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mutex_lock(&ctx->lock); ret = it661221_audio_output_enable(ctx, true); if (ret) @@ -1401,8 +1400,6 @@ static void it66121_audio_shutdown(struct device *dev, void *data) int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mutex_lock(&ctx->lock); ret = it661221_audio_output_enable(ctx, false); if (ret) @@ -1479,8 +1476,6 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev) .no_capture_mute = 1, }; - dev_dbg(dev, "%s\n", __func__); - if (!of_property_present(dev->of_node, "#sound-dai-cells")) { dev_info(dev, "No \"#sound-dai-cells\", no audio\n"); return 0; @@ -1504,13 +1499,20 @@ static const char * const it66121_supplies[] = { "vcn33", "vcn18", "vrf12" }; +static const struct it66121_chip_info it66xx_chip_info[] = { + {.id = ID_IT6610, .vid = 0xca00, .pid = 0x0611 }, + {.id = ID_IT66121, .vid = 0x4954, .pid = 0x0612 }, + {.id = ID_IT66122, .vid = 0x4954, .pid = 0x0622 }, +}; + static int it66121_probe(struct i2c_client *client) { u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 }; struct device_node *ep; - int ret; + int ret, i; struct it66121_ctx *ctx; struct device *dev = &client->dev; + const struct it66121_chip_info *chip_info; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "I2C check functionality failed.\n"); @@ -1528,7 +1530,6 @@ static int it66121_probe(struct i2c_client *client) ctx->dev = dev; ctx->client = client; - ctx->info = i2c_get_match_data(client); of_property_read_u32(ep, "bus-width", &ctx->bus_width); of_node_put(ep); @@ -1574,11 +1575,18 @@ static int it66121_probe(struct i2c_client *client) revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]); device_ids[1] &= IT66121_DEVICE_ID1_MASK; - if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid || - (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) { - return -ENODEV; + for (i = 0; i < ARRAY_SIZE(it66xx_chip_info); i++) { + chip_info = &it66xx_chip_info[i]; + if ((vendor_ids[1] << 8 | vendor_ids[0]) == chip_info->vid && + (device_ids[1] << 8 | device_ids[0]) == chip_info->pid) { + ctx->id = chip_info->id; + break; + } } + if (i == ARRAY_SIZE(it66xx_chip_info)) + return -ENODEV; + ctx->bridge.of_node = dev->of_node; ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA; ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; @@ -1612,28 +1620,18 @@ static void it66121_remove(struct i2c_client *client) mutex_destroy(&ctx->lock); } -static const struct it66121_chip_info it66121_chip_info = { - .id = ID_IT66121, - .vid = 0x4954, - .pid = 0x0612, -}; - -static const struct it66121_chip_info it6610_chip_info = { - .id = ID_IT6610, - .vid = 0xca00, - .pid = 0x0611, -}; - static const struct of_device_id it66121_dt_match[] = { - { .compatible = "ite,it66121", &it66121_chip_info }, - { .compatible = "ite,it6610", &it6610_chip_info }, + { .compatible = "ite,it6610" }, + { .compatible = "ite,it66121" }, + { .compatible = "ite,it66122" }, { } }; MODULE_DEVICE_TABLE(of, it66121_dt_match); static const struct i2c_device_id it66121_id[] = { - { "it66121", (kernel_ulong_t) &it66121_chip_info }, - { "it6610", (kernel_ulong_t) &it6610_chip_info }, + { .name = "it6610" }, + { .name = "it66121" }, + { .name = "it66122" }, { } }; MODULE_DEVICE_TABLE(i2c, it66121_id); diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c index e4d0bc2200f8..2cd1847ba776 100644 --- a/drivers/gpu/drm/bridge/simple-bridge.c +++ b/drivers/gpu/drm/bridge/simple-bridge.c @@ -262,6 +262,16 @@ static const struct of_device_id simple_bridge_match[] = { .connector_type = DRM_MODE_CONNECTOR_VGA, }, }, { + .compatible = "asl-tek,cs5263", + .data = &(const struct simple_bridge_info) { + .connector_type = DRM_MODE_CONNECTOR_HDMIA, + }, + }, { + .compatible = "parade,ps185hdm", + .data = &(const struct simple_bridge_info) { + .connector_type = DRM_MODE_CONNECTOR_HDMIA, + }, + }, { .compatible = "radxa,ra620", .data = &(const struct simple_bridge_info) { .connector_type = DRM_MODE_CONNECTOR_HDMIA, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 4ba7b339eff6..fe4c026280f0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -868,8 +868,9 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge, return; if (connector->display_info.is_hdmi) { - dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", - __func__, conn_state->hdmi.tmds_char_rate); + dev_dbg(hdmi->dev, "%s mode=HDMI %s rate=%llu bpc=%u\n", __func__, + drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format), + conn_state->hdmi.tmds_char_rate, conn_state->hdmi.output_bpc); op_mode = 0; hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; } else { @@ -1287,6 +1288,12 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->bridge.vendor = "Synopsys"; hdmi->bridge.product = "DW HDMI QP TX"; + if (plat_data->supported_formats) + hdmi->bridge.supported_formats = plat_data->supported_formats; + + if (plat_data->max_bpc) + hdmi->bridge.max_bpc = plat_data->max_bpc; + hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi); if (IS_ERR(hdmi->bridge.ddc)) return ERR_CAST(hdmi->bridge.ddc); diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c index 47e5f27eee58..28951e392482 100644 --- a/drivers/gpu/drm/clients/drm_fbdev_client.c +++ b/drivers/gpu/drm/clients/drm_fbdev_client.c @@ -38,9 +38,11 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client) } } -static int drm_fbdev_client_restore(struct drm_client_dev *client) +static int drm_fbdev_client_restore(struct drm_client_dev *client, bool force) { - drm_fb_helper_lastclose(client->dev); + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); return 0; } diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c index 19e55aa0ed74..4d3005273b27 100644 --- a/drivers/gpu/drm/clients/drm_log.c +++ b/drivers/gpu/drm/clients/drm_log.c @@ -315,6 +315,18 @@ static void drm_log_client_unregister(struct drm_client_dev *client) drm_client_release(client); } +static int drm_log_client_restore(struct drm_client_dev *client, bool force) +{ + int ret; + + if (force) + ret = drm_client_modeset_commit_locked(client); + else + ret = drm_client_modeset_commit(client); + + return ret; +} + static int drm_log_client_hotplug(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); @@ -348,6 +360,7 @@ static const struct drm_client_funcs drm_log_client_funcs = { .owner = THIS_MODULE, .free = drm_log_client_free, .unregister = drm_log_client_unregister, + .restore = drm_log_client_restore, .hotplug = drm_log_client_hotplug, .suspend = drm_log_client_suspend, .resume = drm_log_client_resume, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index e05820b18832..67e095e398a3 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -42,6 +42,7 @@ #include <drm/drm_mode.h> #include <drm/drm_print.h> #include <drm/drm_writeback.h> +#include <drm/drm_colorop.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -107,6 +108,7 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state) kfree(state->connectors); kfree(state->crtcs); kfree(state->planes); + kfree(state->colorops); kfree(state->private_objs); } EXPORT_SYMBOL(drm_atomic_state_default_release); @@ -138,6 +140,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) sizeof(*state->planes), GFP_KERNEL); if (!state->planes) goto fail; + state->colorops = kcalloc(dev->mode_config.num_colorop, + sizeof(*state->colorops), GFP_KERNEL); + if (!state->colorops) + goto fail; /* * Because drm_atomic_state can be committed asynchronously we need our @@ -251,6 +257,20 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->planes[i].new_state = NULL; } + for (i = 0; i < config->num_colorop; i++) { + struct drm_colorop *colorop = state->colorops[i].ptr; + + if (!colorop) + continue; + + drm_colorop_atomic_destroy_state(colorop, + state->colorops[i].state); + state->colorops[i].ptr = NULL; + state->colorops[i].state = NULL; + state->colorops[i].old_state = NULL; + state->colorops[i].new_state = NULL; + } + for (i = 0; i < state->num_private_objs; i++) { struct drm_private_obj *obj = state->private_objs[i].ptr; @@ -572,6 +592,55 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_atomic_get_plane_state); +/** + * drm_atomic_get_colorop_state - get colorop state + * @state: global atomic state object + * @colorop: colorop to get state object for + * + * This function returns the colorop state for the given colorop, allocating it + * if needed. It will also grab the relevant plane lock to make sure that the + * state is consistent. + * + * Returns: + * + * Either the allocated state or the error code encoded into the pointer. When + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the + * entire atomic sequence must be restarted. All other errors are fatal. + */ +struct drm_colorop_state * +drm_atomic_get_colorop_state(struct drm_atomic_state *state, + struct drm_colorop *colorop) +{ + int ret, index = drm_colorop_index(colorop); + struct drm_colorop_state *colorop_state; + + WARN_ON(!state->acquire_ctx); + + colorop_state = drm_atomic_get_new_colorop_state(state, colorop); + if (colorop_state) + return colorop_state; + + ret = drm_modeset_lock(&colorop->plane->mutex, state->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + colorop_state = drm_atomic_helper_colorop_duplicate_state(colorop); + if (!colorop_state) + return ERR_PTR(-ENOMEM); + + state->colorops[index].state = colorop_state; + state->colorops[index].ptr = colorop; + state->colorops[index].old_state = colorop->state; + state->colorops[index].new_state = colorop_state; + colorop_state->state = state; + + drm_dbg_atomic(colorop->dev, "Added [COLOROP:%d:%d] %p state to %p\n", + colorop->base.id, colorop->type, colorop_state, state); + + return colorop_state; +} +EXPORT_SYMBOL(drm_atomic_get_colorop_state); + static bool plane_switching_crtc(const struct drm_plane_state *old_plane_state, const struct drm_plane_state *new_plane_state) @@ -711,6 +780,46 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, return 0; } +static void drm_atomic_colorop_print_state(struct drm_printer *p, + const struct drm_colorop_state *state) +{ + struct drm_colorop *colorop = state->colorop; + + drm_printf(p, "colorop[%u]:\n", colorop->base.id); + drm_printf(p, "\ttype=%s\n", drm_get_colorop_type_name(colorop->type)); + if (colorop->bypass_property) + drm_printf(p, "\tbypass=%u\n", state->bypass); + + switch (colorop->type) { + case DRM_COLOROP_1D_CURVE: + drm_printf(p, "\tcurve_1d_type=%s\n", + drm_get_colorop_curve_1d_type_name(state->curve_1d_type)); + break; + case DRM_COLOROP_1D_LUT: + drm_printf(p, "\tsize=%d\n", colorop->size); + drm_printf(p, "\tinterpolation=%s\n", + drm_get_colorop_lut1d_interpolation_name(colorop->lut1d_interpolation)); + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + case DRM_COLOROP_CTM_3X4: + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + case DRM_COLOROP_MULTIPLIER: + drm_printf(p, "\tmultiplier=%llu\n", state->multiplier); + break; + case DRM_COLOROP_3D_LUT: + drm_printf(p, "\tsize=%d\n", colorop->size); + drm_printf(p, "\tinterpolation=%s\n", + drm_get_colorop_lut3d_interpolation_name(colorop->lut3d_interpolation)); + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); + break; + default: + break; + } + + drm_printf(p, "\tnext=%d\n", colorop->next ? colorop->next->base.id : 0); +} + static void drm_atomic_plane_print_state(struct drm_printer *p, const struct drm_plane_state *state) { @@ -732,7 +841,8 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "\tcolor-range=%s\n", drm_get_color_range_name(state->color_range)); drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); - + drm_printf(p, "\tcolor-pipeline=%d\n", + state->color_pipeline ? state->color_pipeline->base.id : 0); if (plane->funcs->atomic_print_state) plane->funcs->atomic_print_state(p, state); } @@ -1446,6 +1556,52 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_add_affected_planes); /** + * drm_atomic_add_affected_colorops - add colorops for plane + * @state: atomic state + * @plane: DRM plane + * + * This function walks the current configuration and adds all colorops + * currently used by @plane to the atomic configuration @state. This is useful + * when an atomic commit also needs to check all currently enabled colorop on + * @plane, e.g. when changing the mode. It's also useful when re-enabling a plane + * to avoid special code to force-enable all colorops. + * + * Since acquiring a colorop state will always also acquire the w/w mutex of the + * current plane for that colorop (if there is any) adding all the colorop states for + * a plane will not reduce parallelism of atomic updates. + * + * Returns: + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK + * then the w/w mutex code has detected a deadlock and the entire atomic + * sequence must be restarted. All other errors are fatal. + */ +int +drm_atomic_add_affected_colorops(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + struct drm_colorop *colorop; + struct drm_colorop_state *colorop_state; + + WARN_ON(!drm_atomic_get_new_plane_state(state, plane)); + + drm_dbg_atomic(plane->dev, + "Adding all current colorops for [PLANE:%d:%s] to %p\n", + plane->base.id, plane->name, state); + + drm_for_each_colorop(colorop, plane->dev) { + if (colorop->plane != plane) + continue; + + colorop_state = drm_atomic_get_colorop_state(state, colorop); + if (IS_ERR(colorop_state)) + return PTR_ERR(colorop_state); + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_add_affected_colorops); + +/** * drm_atomic_check_only - check whether a given config would work * @state: atomic configuration to check * @@ -1843,6 +1999,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, bool take_locks) { struct drm_mode_config *config = &dev->mode_config; + struct drm_colorop *colorop; struct drm_plane *plane; struct drm_crtc *crtc; struct drm_connector *connector; @@ -1852,6 +2009,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, if (!drm_drv_uses_atomic_modeset(dev)) return; + list_for_each_entry(colorop, &config->colorop_list, head) { + if (take_locks) + drm_modeset_lock(&colorop->plane->mutex, NULL); + drm_atomic_colorop_print_state(p, colorop->state); + if (take_locks) + drm_modeset_unlock(&colorop->plane->mutex); + } + list_for_each_entry(plane, &config->plane_list, head) { if (take_locks) drm_modeset_lock(&plane->mutex, NULL); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index e641fcf8c568..10adac9397cf 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3184,6 +3184,8 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_colorop *colorop; + struct drm_colorop_state *old_colorop_state, *new_colorop_state; struct drm_crtc_commit *commit; struct drm_private_obj *obj; struct drm_private_state *old_obj_state, *new_obj_state; @@ -3261,6 +3263,16 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, } } + for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) { + WARN_ON(colorop->state != old_colorop_state); + + old_colorop_state->state = state; + new_colorop_state->state = NULL; + + state->colorops[i].state = old_colorop_state; + colorop->state = new_colorop_state; + } + drm_panic_lock(state->dev, flags); for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { WARN_ON(plane->state != old_plane_state); diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 7142e163e618..cee6d8fc44ad 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -268,6 +268,11 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->color_range = val; } + if (plane->color_pipeline_property) { + /* default is always NULL, i.e., bypass */ + plane_state->color_pipeline = NULL; + } + if (plane->zpos_property) { if (!drm_object_property_get_default_value(&plane->base, plane->zpos_property, diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index b2cb5ae5a139..7320db4b8489 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -35,6 +35,7 @@ #include <drm/drm_drv.h> #include <drm/drm_writeback.h> #include <drm/drm_vblank.h> +#include <drm/drm_colorop.h> #include <linux/export.h> #include <linux/dma-fence.h> @@ -258,6 +259,34 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); /** + * drm_atomic_set_colorop_for_plane - set colorop for plane + * @plane_state: atomic state object for the plane + * @colorop: colorop to use for the plane + * + * Helper function to select the color pipeline on a plane by setting + * it to the first drm_colorop element of the pipeline. + */ +void +drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state, + struct drm_colorop *colorop) +{ + struct drm_plane *plane = plane_state->plane; + + if (colorop) + drm_dbg_atomic(plane->dev, + "Set [COLOROP:%d] for [PLANE:%d:%s] state %p\n", + colorop->base.id, plane->base.id, plane->name, + plane_state); + else + drm_dbg_atomic(plane->dev, + "Set [NOCOLOROP] for [PLANE:%d:%s] state %p\n", + plane->base.id, plane->name, plane_state); + + plane_state->color_pipeline = colorop; +} +EXPORT_SYMBOL(drm_atomic_set_colorop_for_plane); + +/** * drm_atomic_set_crtc_for_connector - set CRTC for connector * @conn_state: atomic state object for the connector * @crtc: CRTC to use for the connector @@ -544,6 +573,16 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->color_encoding = val; } else if (property == plane->color_range_property) { state->color_range = val; + } else if (property == plane->color_pipeline_property) { + /* find DRM colorop object */ + struct drm_colorop *colorop = NULL; + + colorop = drm_colorop_find(dev, file_priv, val); + + if (val && !colorop) + return -EACCES; + + drm_atomic_set_colorop_for_plane(state, colorop); } else if (property == config->prop_fb_damage_clips) { ret = drm_property_replace_blob_from_id(dev, &state->fb_damage_clips, @@ -626,6 +665,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->color_encoding; } else if (property == plane->color_range_property) { *val = state->color_range; + } else if (property == plane->color_pipeline_property) { + *val = (state->color_pipeline) ? state->color_pipeline->base.id : 0; } else if (property == config->prop_fb_damage_clips) { *val = (state->fb_damage_clips) ? state->fb_damage_clips->base.id : 0; @@ -648,6 +689,96 @@ drm_atomic_plane_get_property(struct drm_plane *plane, return 0; } +static int drm_atomic_color_set_data_property(struct drm_colorop *colorop, + struct drm_colorop_state *state, + struct drm_property *property, + uint64_t val) +{ + ssize_t elem_size = -1; + ssize_t size = -1; + bool replaced = false; + + switch (colorop->type) { + case DRM_COLOROP_1D_LUT: + size = colorop->size * sizeof(struct drm_color_lut32); + break; + case DRM_COLOROP_CTM_3X4: + size = sizeof(struct drm_color_ctm_3x4); + break; + case DRM_COLOROP_3D_LUT: + size = colorop->size * colorop->size * colorop->size * + sizeof(struct drm_color_lut32); + break; + default: + /* should never get here */ + return -EINVAL; + } + + return drm_property_replace_blob_from_id(colorop->dev, + &state->data, + val, + size, + elem_size, + &replaced); +} + +static int drm_atomic_colorop_set_property(struct drm_colorop *colorop, + struct drm_colorop_state *state, + struct drm_file *file_priv, + struct drm_property *property, + uint64_t val) +{ + if (property == colorop->bypass_property) { + state->bypass = val; + } else if (property == colorop->lut1d_interpolation_property) { + colorop->lut1d_interpolation = val; + } else if (property == colorop->curve_1d_type_property) { + state->curve_1d_type = val; + } else if (property == colorop->multiplier_property) { + state->multiplier = val; + } else if (property == colorop->lut3d_interpolation_property) { + colorop->lut3d_interpolation = val; + } else if (property == colorop->data_property) { + return drm_atomic_color_set_data_property(colorop, state, + property, val); + } else { + drm_dbg_atomic(colorop->dev, + "[COLOROP:%d:%d] unknown property [PROP:%d:%s]\n", + colorop->base.id, colorop->type, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +drm_atomic_colorop_get_property(struct drm_colorop *colorop, + const struct drm_colorop_state *state, + struct drm_property *property, uint64_t *val) +{ + if (property == colorop->type_property) + *val = colorop->type; + else if (property == colorop->bypass_property) + *val = state->bypass; + else if (property == colorop->lut1d_interpolation_property) + *val = colorop->lut1d_interpolation; + else if (property == colorop->curve_1d_type_property) + *val = state->curve_1d_type; + else if (property == colorop->multiplier_property) + *val = state->multiplier; + else if (property == colorop->size_property) + *val = colorop->size; + else if (property == colorop->lut3d_interpolation_property) + *val = colorop->lut3d_interpolation; + else if (property == colorop->data_property) + *val = (state->data) ? state->data->base.id : 0; + else + return -EINVAL; + + return 0; +} + static int drm_atomic_set_writeback_fb_for_connector( struct drm_connector_state *conn_state, struct drm_framebuffer *fb) @@ -914,6 +1045,15 @@ int drm_atomic_get_property(struct drm_mode_object *obj, plane->state, property, val); break; } + case DRM_MODE_OBJECT_COLOROP: { + struct drm_colorop *colorop = obj_to_colorop(obj); + + if (colorop->plane) + WARN_ON(!drm_modeset_is_locked(&colorop->plane->mutex)); + + ret = drm_atomic_colorop_get_property(colorop, colorop->state, property, val); + break; + } default: drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id); ret = -EINVAL; @@ -1111,6 +1251,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state, ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); + + break; + } + case DRM_MODE_OBJECT_COLOROP: { + struct drm_colorop *colorop = obj_to_colorop(obj); + struct drm_colorop_state *colorop_state; + + colorop_state = drm_atomic_get_colorop_state(state, colorop); + if (IS_ERR(colorop_state)) { + ret = PTR_ERR(colorop_state); + break; + } + + ret = drm_atomic_colorop_set_property(colorop, colorop_state, + file_priv, prop, prop_value); break; } default: @@ -1450,6 +1605,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); state->acquire_ctx = &ctx; state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); + state->plane_color_pipeline = file_priv->plane_color_pipeline; retry: copied_objs = 0; diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 504ec5bdfa2c..a82d741e6630 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -11,6 +11,7 @@ #include <linux/slab.h> #include <drm/drm_client.h> +#include <drm/drm_client_event.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c index d25dc5250983..7b3e362f7926 100644 --- a/drivers/gpu/drm/drm_client_event.c +++ b/drivers/gpu/drm/drm_client_event.c @@ -102,7 +102,7 @@ void drm_client_dev_hotplug(struct drm_device *dev) } EXPORT_SYMBOL(drm_client_dev_hotplug); -void drm_client_dev_restore(struct drm_device *dev) +void drm_client_dev_restore(struct drm_device *dev, bool force) { struct drm_client_dev *client; int ret; @@ -115,7 +115,7 @@ void drm_client_dev_restore(struct drm_device *dev) if (!client->funcs || !client->funcs->restore) continue; - ret = client->funcs->restore(client); + ret = client->funcs->restore(client, force); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); if (!ret) /* The first one to return zero gets the privilege to restore */ break; diff --git a/drivers/gpu/drm/drm_client_sysrq.c b/drivers/gpu/drm/drm_client_sysrq.c new file mode 100644 index 000000000000..eea660096f1b --- /dev/null +++ b/drivers/gpu/drm/drm_client_sysrq.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT + +#include <linux/sysrq.h> + +#include <drm/drm_client_event.h> +#include <drm/drm_device.h> +#include <drm/drm_print.h> + +#include "drm_internal.h" + +#ifdef CONFIG_MAGIC_SYSRQ +static LIST_HEAD(drm_client_sysrq_dev_list); +static DEFINE_MUTEX(drm_client_sysrq_dev_lock); + +/* emergency restore, don't bother with error reporting */ +static void drm_client_sysrq_restore_work_fn(struct work_struct *ignored) +{ + struct drm_device *dev; + + guard(mutex)(&drm_client_sysrq_dev_lock); + + list_for_each_entry(dev, &drm_client_sysrq_dev_list, client_sysrq_list) { + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + continue; + + drm_client_dev_restore(dev, true); + } +} + +static DECLARE_WORK(drm_client_sysrq_restore_work, drm_client_sysrq_restore_work_fn); + +static void drm_client_sysrq_restore_handler(u8 ignored) +{ + schedule_work(&drm_client_sysrq_restore_work); +} + +static const struct sysrq_key_op drm_client_sysrq_restore_op = { + .handler = drm_client_sysrq_restore_handler, + .help_msg = "force-fb(v)", + .action_msg = "Restore framebuffer console", +}; + +void drm_client_sysrq_register(struct drm_device *dev) +{ + guard(mutex)(&drm_client_sysrq_dev_lock); + + if (list_empty(&drm_client_sysrq_dev_list)) + register_sysrq_key('v', &drm_client_sysrq_restore_op); + + list_add(&dev->client_sysrq_list, &drm_client_sysrq_dev_list); +} + +void drm_client_sysrq_unregister(struct drm_device *dev) +{ + guard(mutex)(&drm_client_sysrq_dev_lock); + + /* remove device from global restore list */ + if (!drm_WARN_ON(dev, list_empty(&dev->client_sysrq_list))) + list_del(&dev->client_sysrq_list); + + /* no devices left; unregister key */ + if (list_empty(&drm_client_sysrq_dev_list)) + unregister_sysrq_key('v', &drm_client_sysrq_restore_op); +} +#endif diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 131c1c9ae92f..c598b99673fc 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -874,3 +874,46 @@ void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_pa fill_palette_8(crtc, i, set_palette); } EXPORT_SYMBOL(drm_crtc_fill_palette_8); + +/** + * drm_color_lut32_check - check validity of extended lookup table + * @lut: property blob containing extended LUT to check + * @tests: bitmask of tests to run + * + * Helper to check whether a userspace-provided extended lookup table is valid and + * satisfies hardware requirements. Drivers pass a bitmask indicating which of + * the tests in &drm_color_lut_tests should be performed. + * + * Returns 0 on success, -EINVAL on failure. + */ +int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests) +{ + const struct drm_color_lut32 *entry; + int i; + + if (!lut || !tests) + return 0; + + entry = lut->data; + for (i = 0; i < drm_color_lut32_size(lut); i++) { + if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) { + if (entry[i].red != entry[i].blue || + entry[i].red != entry[i].green) { + DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n"); + return -EINVAL; + } + } + + if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) { + if (entry[i].red < entry[i - 1].red || + entry[i].green < entry[i - 1].green || + entry[i].blue < entry[i - 1].blue) { + DRM_DEBUG_KMS("LUT entries must never decrease.\n"); + return -EINVAL; + } + } + } + + return 0; +} +EXPORT_SYMBOL(drm_color_lut32_check); diff --git a/drivers/gpu/drm/drm_colorop.c b/drivers/gpu/drm/drm_colorop.c new file mode 100644 index 000000000000..44eb823585d2 --- /dev/null +++ b/drivers/gpu/drm/drm_colorop.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drm_colorop.h> +#include <drm/drm_print.h> +#include <drm/drm_drv.h> +#include <drm/drm_plane.h> + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * When userspace signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE it + * should use the COLOR_PIPELINE plane property and associated colorops + * for any color operation on the &drm_plane. Setting of all old color + * properties, such as COLOR_ENCODING and COLOR_RANGE, will be rejected + * and the values of the properties will be ignored. + * + * Colorops are only advertised and valid for atomic drivers and atomic + * userspace that signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE + * client cap. + * + * A colorop represents a single color operation. Colorops are chained + * via the NEXT property and make up color pipelines. Color pipelines + * are advertised and selected via the COLOR_PIPELINE &drm_plane + * property. + * + * A colorop will be of a certain type, advertised by the read-only TYPE + * property. Each type of colorop will advertise a different set of + * properties and is programmed in a different manner. Types can be + * enumerated 1D curves, 1D LUTs, 3D LUTs, matrices, etc. See the + * &drm_colorop_type documentation for information on each type. + * + * If a colorop advertises the BYPASS property it can be bypassed. + * + * Information about colorop and color pipeline design decisions can be + * found at rfc/color_pipeline.rst, but note that this document will + * grow stale over time. + */ + +static const struct drm_prop_enum_list drm_colorop_type_enum_list[] = { + { DRM_COLOROP_1D_CURVE, "1D Curve" }, + { DRM_COLOROP_1D_LUT, "1D LUT" }, + { DRM_COLOROP_CTM_3X4, "3x4 Matrix"}, + { DRM_COLOROP_MULTIPLIER, "Multiplier"}, + { DRM_COLOROP_3D_LUT, "3D LUT"}, +}; + +static const char * const colorop_curve_1d_type_names[] = { + [DRM_COLOROP_1D_CURVE_SRGB_EOTF] = "sRGB EOTF", + [DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF] = "sRGB Inverse EOTF", + [DRM_COLOROP_1D_CURVE_PQ_125_EOTF] = "PQ 125 EOTF", + [DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF] = "PQ 125 Inverse EOTF", + [DRM_COLOROP_1D_CURVE_BT2020_INV_OETF] = "BT.2020 Inverse OETF", + [DRM_COLOROP_1D_CURVE_BT2020_OETF] = "BT.2020 OETF", + [DRM_COLOROP_1D_CURVE_GAMMA22] = "Gamma 2.2", + [DRM_COLOROP_1D_CURVE_GAMMA22_INV] = "Gamma 2.2 Inverse", +}; + +static const struct drm_prop_enum_list drm_colorop_lut1d_interpolation_list[] = { + { DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, "Linear" }, +}; + + +static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] = { + { DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, "Tetrahedral" }, +}; + +/* Init Helpers */ + +static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, enum drm_colorop_type type, + uint32_t flags) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_property *prop; + int ret = 0; + + ret = drm_mode_object_add(dev, &colorop->base, DRM_MODE_OBJECT_COLOROP); + if (ret) + return ret; + + colorop->base.properties = &colorop->properties; + colorop->dev = dev; + colorop->type = type; + colorop->plane = plane; + colorop->next = NULL; + + list_add_tail(&colorop->head, &config->colorop_list); + colorop->index = config->num_colorop++; + + /* add properties */ + + /* type */ + prop = drm_property_create_enum(dev, + DRM_MODE_PROP_IMMUTABLE, + "TYPE", drm_colorop_type_enum_list, + ARRAY_SIZE(drm_colorop_type_enum_list)); + + if (!prop) + return -ENOMEM; + + colorop->type_property = prop; + + drm_object_attach_property(&colorop->base, + colorop->type_property, + colorop->type); + + if (flags & DRM_COLOROP_FLAG_ALLOW_BYPASS) { + /* bypass */ + prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, + "BYPASS"); + if (!prop) + return -ENOMEM; + + colorop->bypass_property = prop; + drm_object_attach_property(&colorop->base, + colorop->bypass_property, + 1); + } + + /* next */ + prop = drm_property_create_object(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "NEXT", DRM_MODE_OBJECT_COLOROP); + if (!prop) + return -ENOMEM; + colorop->next_property = prop; + drm_object_attach_property(&colorop->base, + colorop->next_property, + 0); + + return ret; +} + +/** + * drm_colorop_cleanup - Cleanup a drm_colorop object in color_pipeline + * + * @colorop: The drm_colorop object to be cleaned + */ +void drm_colorop_cleanup(struct drm_colorop *colorop) +{ + struct drm_device *dev = colorop->dev; + struct drm_mode_config *config = &dev->mode_config; + + list_del(&colorop->head); + config->num_colorop--; + + if (colorop->state && colorop->state->data) { + drm_property_blob_put(colorop->state->data); + colorop->state->data = NULL; + } + + kfree(colorop->state); +} +EXPORT_SYMBOL(drm_colorop_cleanup); + +/** + * drm_colorop_pipeline_destroy - Helper for color pipeline destruction + * + * @dev: - The drm_device containing the drm_planes with the color_pipelines + * + * Provides a default color pipeline destroy handler for drm_device. + */ +void drm_colorop_pipeline_destroy(struct drm_device *dev) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_colorop *colorop, *next; + + list_for_each_entry_safe(colorop, next, &config->colorop_list, head) { + drm_colorop_cleanup(colorop); + kfree(colorop); + } +} +EXPORT_SYMBOL(drm_colorop_pipeline_destroy); + +/** + * drm_plane_colorop_curve_1d_init - Initialize a DRM_COLOROP_1D_CURVE + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values, + * created using BIT(curve_type) and combined with the OR '|' + * operator. + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, u64 supported_tfs, uint32_t flags) +{ + struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT]; + int i, len; + + struct drm_property *prop; + int ret; + + if (!supported_tfs) { + drm_err(dev, + "No supported TFs for new 1D curve colorop on [PLANE:%d:%s]\n", + plane->base.id, plane->name); + return -EINVAL; + } + + if ((supported_tfs & -BIT(DRM_COLOROP_1D_CURVE_COUNT)) != 0) { + drm_err(dev, "Unknown TF provided on [PLANE:%d:%s]\n", + plane->base.id, plane->name); + return -EINVAL; + } + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags); + if (ret) + return ret; + + len = 0; + for (i = 0; i < DRM_COLOROP_1D_CURVE_COUNT; i++) { + if ((supported_tfs & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = colorop_curve_1d_type_names[i]; + len++; + } + + if (WARN_ON(len <= 0)) + return -EINVAL; + + /* initialize 1D curve only attribute */ + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ATOMIC, "CURVE_1D_TYPE", + enum_list, len); + + if (!prop) + return -ENOMEM; + + colorop->curve_1d_type_property = prop; + drm_object_attach_property(&colorop->base, colorop->curve_1d_type_property, + enum_list[0].type); + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_curve_1d_init); + +static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_colorop *colorop) +{ + struct drm_property *prop; + + /* data */ + prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, + "DATA", 0); + if (!prop) + return -ENOMEM; + + colorop->data_property = prop; + drm_object_attach_property(&colorop->base, + colorop->data_property, + 0); + + return 0; +} + +/** + * drm_plane_colorop_curve_1d_lut_init - Initialize a DRM_COLOROP_1D_LUT + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @lut_size: LUT size supported by driver + * @interpolation: 1D LUT interpolation type + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t lut_size, + enum drm_colorop_lut1d_interpolation_type interpolation, + uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags); + if (ret) + return ret; + + /* initialize 1D LUT only attribute */ + /* LUT size */ + prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + + colorop->size_property = prop; + drm_object_attach_property(&colorop->base, colorop->size_property, lut_size); + colorop->size = lut_size; + + /* interpolation */ + prop = drm_property_create_enum(dev, 0, "LUT1D_INTERPOLATION", + drm_colorop_lut1d_interpolation_list, + ARRAY_SIZE(drm_colorop_lut1d_interpolation_list)); + if (!prop) + return -ENOMEM; + + colorop->lut1d_interpolation_property = prop; + drm_object_attach_property(&colorop->base, prop, interpolation); + colorop->lut1d_interpolation = interpolation; + + /* data */ + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init); + +int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t flags) +{ + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags); + if (ret) + return ret; + + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init); + +/** + * drm_plane_colorop_mult_init - Initialize a DRM_COLOROP_MULTIPLIER + * + * @dev: DRM device + * @colorop: The drm_colorop object to initialize + * @plane: The associated drm_plane + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. + * @return zero on success, -E value on failure + */ +int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags); + if (ret) + return ret; + + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "MULTIPLIER", 0, U64_MAX); + if (!prop) + return -ENOMEM; + + colorop->multiplier_property = prop; + drm_object_attach_property(&colorop->base, colorop->multiplier_property, 0); + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_mult_init); + +int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop, + struct drm_plane *plane, + uint32_t lut_size, + enum drm_colorop_lut3d_interpolation_type interpolation, + uint32_t flags) +{ + struct drm_property *prop; + int ret; + + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags); + if (ret) + return ret; + + /* LUT size */ + prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, + "SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + + colorop->size_property = prop; + drm_object_attach_property(&colorop->base, colorop->size_property, lut_size); + colorop->size = lut_size; + + /* interpolation */ + prop = drm_property_create_enum(dev, 0, "LUT3D_INTERPOLATION", + drm_colorop_lut3d_interpolation_list, + ARRAY_SIZE(drm_colorop_lut3d_interpolation_list)); + if (!prop) + return -ENOMEM; + + colorop->lut3d_interpolation_property = prop; + drm_object_attach_property(&colorop->base, prop, interpolation); + colorop->lut3d_interpolation = interpolation; + + /* data */ + ret = drm_colorop_create_data_prop(dev, colorop); + if (ret) + return ret; + + drm_colorop_reset(colorop); + + return 0; +} +EXPORT_SYMBOL(drm_plane_colorop_3dlut_init); + +static void __drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop, + struct drm_colorop_state *state) +{ + memcpy(state, colorop->state, sizeof(*state)); + + if (state->data) + drm_property_blob_get(state->data); + + state->bypass = true; +} + +struct drm_colorop_state * +drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop) +{ + struct drm_colorop_state *state; + + if (WARN_ON(!colorop->state)) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) + __drm_atomic_helper_colorop_duplicate_state(colorop, state); + + return state; +} + +void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop, + struct drm_colorop_state *state) +{ + kfree(state); +} + +/** + * __drm_colorop_state_reset - resets colorop state to default values + * @colorop_state: atomic colorop state, must not be NULL + * @colorop: colorop object, must not be NULL + * + * Initializes the newly allocated @colorop_state with default + * values. This is useful for drivers that subclass the CRTC state. + */ +static void __drm_colorop_state_reset(struct drm_colorop_state *colorop_state, + struct drm_colorop *colorop) +{ + u64 val; + + colorop_state->colorop = colorop; + colorop_state->bypass = true; + + if (colorop->curve_1d_type_property) { + drm_object_property_get_default_value(&colorop->base, + colorop->curve_1d_type_property, + &val); + colorop_state->curve_1d_type = val; + } +} + +/** + * __drm_colorop_reset - reset state on colorop + * @colorop: drm colorop + * @colorop_state: colorop state to assign + * + * Initializes the newly allocated @colorop_state and assigns it to + * the &drm_crtc->state pointer of @colorop, usually required when + * initializing the drivers or when called from the &drm_colorop_funcs.reset + * hook. + * + * This is useful for drivers that subclass the colorop state. + */ +static void __drm_colorop_reset(struct drm_colorop *colorop, + struct drm_colorop_state *colorop_state) +{ + if (colorop_state) + __drm_colorop_state_reset(colorop_state, colorop); + + colorop->state = colorop_state; +} + +void drm_colorop_reset(struct drm_colorop *colorop) +{ + kfree(colorop->state); + colorop->state = kzalloc(sizeof(*colorop->state), GFP_KERNEL); + + if (colorop->state) + __drm_colorop_reset(colorop, colorop->state); +} + +static const char * const colorop_type_name[] = { + [DRM_COLOROP_1D_CURVE] = "1D Curve", + [DRM_COLOROP_1D_LUT] = "1D LUT", + [DRM_COLOROP_CTM_3X4] = "3x4 Matrix", + [DRM_COLOROP_MULTIPLIER] = "Multiplier", + [DRM_COLOROP_3D_LUT] = "3D LUT", +}; + +static const char * const colorop_lu3d_interpolation_name[] = { + [DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL] = "Tetrahedral", +}; + +static const char * const colorop_lut1d_interpolation_name[] = { + [DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR] = "Linear", +}; + +const char *drm_get_colorop_type_name(enum drm_colorop_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_type_name))) + return "unknown"; + + return colorop_type_name[type]; +} + +const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_curve_1d_type_names))) + return "unknown"; + + return colorop_curve_1d_type_names[type]; +} + +/** + * drm_get_colorop_lut1d_interpolation_name: return a string for interpolation type + * @type: interpolation type to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_lut1d_interpolation_name))) + return "unknown"; + + return colorop_lut1d_interpolation_name[type]; +} + +/** + * drm_get_colorop_lut3d_interpolation_name - return a string for interpolation type + * @type: interpolation type to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type) +{ + if (WARN_ON(type >= ARRAY_SIZE(colorop_lu3d_interpolation_name))) + return "unknown"; + + return colorop_lu3d_interpolation_name[type]; +} + +/** + * drm_colorop_set_next_property - sets the next pointer + * @colorop: drm colorop + * @next: next colorop + * + * Should be used when constructing the color pipeline + */ +void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next) +{ + drm_object_property_set_value(&colorop->base, + colorop->next_property, + next ? next->base.id : 0); + colorop->next = next; +} +EXPORT_SYMBOL(drm_colorop_set_next_property); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 272d6254ea47..4d6dc9ebfdb5 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -3439,6 +3439,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, * properties reflect the latest status. */ ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + file_priv->plane_color_pipeline, (uint32_t __user *)(unsigned long)(out_resp->props_ptr), (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), &out_resp->count_props); diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 89706aa8232f..c09409229644 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -163,6 +163,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, void drm_mode_object_unregister(struct drm_device *dev, struct drm_mode_object *object); int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + bool plane_color_pipeline, uint32_t __user *prop_ptr, uint64_t __user *prop_values, uint32_t *arg_count_props); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8e3cb08241c8..2915118436ce 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -733,6 +733,7 @@ static int drm_dev_init(struct drm_device *dev, INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->filelist_internal); INIT_LIST_HEAD(&dev->clientlist); + INIT_LIST_HEAD(&dev->client_sysrq_list); INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->event_lock); @@ -1100,6 +1101,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_unload; } drm_panic_register(dev); + drm_client_sysrq_register(dev); DRM_INFO("Initialized %s %d.%d.%d for %s on minor %d\n", driver->name, driver->major, driver->minor, @@ -1144,6 +1146,7 @@ void drm_dev_unregister(struct drm_device *dev) { dev->registered = false; + drm_client_sysrq_unregister(dev); drm_panic_unregister(dev); drm_client_dev_unregister(dev); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 53e9dc0543de..be790fc68707 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -32,7 +32,6 @@ #include <linux/console.h> #include <linux/export.h> #include <linux/pci.h> -#include <linux/sysrq.h> #include <linux/vga_switcheroo.h> #include <drm/drm_atomic.h> @@ -255,6 +254,7 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, /** * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration * @fb_helper: driver-allocated fbdev helper, can be NULL + * @force: ignore present DRM master * * This helper should be called from fbdev emulation's &drm_client_funcs.restore * callback. It ensures that the user isn't greeted with a black screen when the @@ -263,48 +263,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, * Returns: * 0 on success, or a negative errno code otherwise. */ -int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, bool force) { - return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false); + return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); -#ifdef CONFIG_MAGIC_SYSRQ -/* emergency restore, don't bother with error reporting */ -static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) -{ - struct drm_fb_helper *helper; - - mutex_lock(&kernel_fb_helper_lock); - list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { - struct drm_device *dev = helper->dev; - - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) - continue; - - mutex_lock(&helper->lock); - drm_client_modeset_commit_locked(&helper->client); - mutex_unlock(&helper->lock); - } - mutex_unlock(&kernel_fb_helper_lock); -} - -static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); - -static void drm_fb_helper_sysrq(u8 dummy1) -{ - schedule_work(&drm_fb_helper_restore_work); -} - -static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { - .handler = drm_fb_helper_sysrq, - .help_msg = "force-fb(v)", - .action_msg = "Restore framebuffer console", -}; -#else -static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; -#endif - static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) { struct drm_fb_helper *fb_helper = info->par; @@ -495,20 +459,7 @@ int drm_fb_helper_init(struct drm_device *dev, } EXPORT_SYMBOL(drm_fb_helper_init); -/** - * drm_fb_helper_alloc_info - allocate fb_info and some of its members - * @fb_helper: driver-allocated fbdev helper - * - * A helper to alloc fb_info and the member cmap. Called by the driver - * within the struct &drm_driver.fbdev_probe callback function. Drivers do - * not need to release the allocated fb_info structure themselves, this is - * automatically done when calling drm_fb_helper_fini(). - * - * RETURNS: - * fb_info pointer if things went okay, pointer containing error code - * otherwise - */ -struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) +static struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) { struct device *dev = fb_helper->dev->dev; struct fb_info *info; @@ -535,17 +486,8 @@ err_release: framebuffer_release(info); return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_fb_helper_alloc_info); -/** - * drm_fb_helper_release_info - release fb_info and its members - * @fb_helper: driver-allocated fbdev helper - * - * A helper to release fb_info and the member cmap. Drivers do not - * need to release the allocated fb_info structure themselves, this is - * automatically done when calling drm_fb_helper_fini(). - */ -void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) +static void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) { struct fb_info *info = fb_helper->info; @@ -558,7 +500,6 @@ void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } -EXPORT_SYMBOL(drm_fb_helper_release_info); /** * drm_fb_helper_unregister_info - unregister fb_info framebuffer device @@ -601,11 +542,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) drm_fb_helper_release_info(fb_helper); mutex_lock(&kernel_fb_helper_lock); - if (!list_empty(&fb_helper->kernel_fb_list)) { + if (!list_empty(&fb_helper->kernel_fb_list)) list_del(&fb_helper->kernel_fb_list); - if (list_empty(&kernel_fb_helper_list)) - unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); - } mutex_unlock(&kernel_fb_helper_lock); if (!fb_helper->client.funcs) @@ -1328,9 +1266,9 @@ int drm_fb_helper_set_par(struct fb_info *info) * the KDSET IOCTL with KD_TEXT, and only after that drops the master * status when exiting. * - * In the past this was caught by drm_fb_helper_lastclose(), but on - * modern systems where logind always keeps a drm fd open to orchestrate - * the vt switching, this doesn't work. + * In the past this was caught by drm_fb_helper_restore_fbdev_mode_unlocked(), + * but on modern systems where logind always keeps a drm fd open to + * orchestrate the vt switching, this doesn't work. * * To not break the userspace ABI we have this special case here, which * is only used for the above case. Everything else uses the normal @@ -1809,6 +1747,11 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) height = dev->mode_config.max_height; drm_client_modeset_probe(&fb_helper->client, width, height); + + info = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(info)) + return PTR_ERR(info); + ret = drm_fb_helper_single_fb_probe(fb_helper); if (ret < 0) { if (ret == -EAGAIN) { @@ -1817,13 +1760,12 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) } mutex_unlock(&fb_helper->lock); - return ret; + goto err_drm_fb_helper_release_info; } drm_setup_crtcs_fb(fb_helper); fb_helper->deferred_setup = false; - info = fb_helper->info; info->var.pixclock = 0; /* Need to drop locks to avoid recursive deadlock in @@ -1839,13 +1781,14 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) info->node, info->fix.id); mutex_lock(&kernel_fb_helper_lock); - if (list_empty(&kernel_fb_helper_list)) - register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); - list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); mutex_unlock(&kernel_fb_helper_lock); return 0; + +err_drm_fb_helper_release_info: + drm_fb_helper_release_info(fb_helper); + return ret; } /** @@ -1955,16 +1898,3 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) return 0; } EXPORT_SYMBOL(drm_fb_helper_hotplug_event); - -/** - * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation - * @dev: DRM device - * - * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked() - * instead. - */ -void drm_fb_helper_lastclose(struct drm_device *dev) -{ - drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper); -} -EXPORT_SYMBOL(drm_fb_helper_lastclose); diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 12a8f5a5ada5..9412d9fdd74b 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -269,9 +269,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; struct drm_framebuffer *fb; - struct fb_info *info; u32 format; struct iosys_map map; int ret; @@ -301,12 +301,6 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->buffer = buffer; fb_helper->fb = fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_client_buffer_vunmap; - } - drm_fb_helper_fill_info(info, fb_helper, sizes); if (fb->funcs->dirty) @@ -314,12 +308,10 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, else ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes); if (ret) - goto err_drm_fb_helper_release_info; + goto err_drm_client_buffer_vunmap; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_drm_client_buffer_vunmap: fb_helper->fb = NULL; fb_helper->buffer = NULL; diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c index ac2b22e05cd6..458c899b5d4f 100644 --- a/drivers/gpu/drm/drm_fbdev_shmem.c +++ b/drivers/gpu/drm/drm_fbdev_shmem.c @@ -135,10 +135,10 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; struct drm_gem_shmem_object *shmem; struct drm_framebuffer *fb; - struct fb_info *info; u32 format; struct iosys_map map; int ret; @@ -168,12 +168,6 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->buffer = buffer; fb_helper->fb = fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_client_buffer_vunmap; - } - drm_fb_helper_fill_info(info, fb_helper, sizes); info->fbops = &drm_fbdev_shmem_fb_ops; @@ -194,12 +188,10 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, info->fbdefio = &fb_helper->fbdefio; ret = fb_deferred_io_init(info); if (ret) - goto err_drm_fb_helper_release_info; + goto err_drm_client_buffer_vunmap; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_drm_client_buffer_vunmap: fb_helper->fb = NULL; fb_helper->buffer = NULL; diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c index c7ad779ba590..160bc35d8738 100644 --- a/drivers/gpu/drm/drm_fbdev_ttm.c +++ b/drivers/gpu/drm/drm_fbdev_ttm.c @@ -174,8 +174,8 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct fb_info *info = fb_helper->info; struct drm_client_buffer *buffer; - struct fb_info *info; size_t screen_size; void *screen_buffer; u32 format; @@ -203,12 +203,6 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, goto err_drm_client_buffer_delete; } - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_vfree; - } - drm_fb_helper_fill_info(info, fb_helper, sizes); info->fbops = &drm_fbdev_ttm_fb_ops; @@ -225,12 +219,10 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, info->fbdefio = &fb_helper->fbdefio; ret = fb_deferred_io_init(info); if (ret) - goto err_drm_fb_helper_release_info; + goto err_vfree; return 0; -err_drm_fb_helper_release_info: - drm_fb_helper_release_info(fb_helper); err_vfree: vfree(screen_buffer); err_drm_client_buffer_delete: diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index eebd1a05ee97..be5e617ceb9f 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -405,7 +405,7 @@ EXPORT_SYMBOL(drm_open); static void drm_lastclose(struct drm_device *dev) { - drm_client_dev_restore(dev); + drm_client_dev_restore(dev, false); if (dev_is_pci(dev->dev)) vga_switcheroo_process_delayed_switch(); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index a1a9c828938b..efc79bbf3c73 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -783,7 +783,6 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count, int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out) { - struct drm_device *dev = filp->minor->dev; struct drm_gem_object **objs; u32 *handles; int ret; @@ -798,20 +797,11 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, *objs_out = objs; - handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); - if (!handles) { - ret = -ENOMEM; - goto out; - } - - if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { - ret = -EFAULT; - drm_dbg_core(dev, "Failed to copy in GEM handles\n"); - goto out; - } + handles = vmemdup_array_user(bo_handles, count, sizeof(u32)); + if (IS_ERR(handles)) + return PTR_ERR(handles); ret = objects_lookup(filp, handles, count, objs); -out: kvfree(handles); return ret; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 5a3bed48ab1f..f893b1e3a596 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -56,6 +56,17 @@ static inline void drm_client_debugfs_init(struct drm_device *dev) { } #endif +/* drm_client_sysrq.c */ +#if defined(CONFIG_DRM_CLIENT) && defined(CONFIG_MAGIC_SYSRQ) +void drm_client_sysrq_register(struct drm_device *dev); +void drm_client_sysrq_unregister(struct drm_device *dev); +#else +static inline void drm_client_sysrq_register(struct drm_device *dev) +{ } +static inline void drm_client_sysrq_unregister(struct drm_device *dev) +{ } +#endif + /* drm_file.c */ extern struct mutex drm_global_mutex; bool drm_dev_needs_global_mutex(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index d8a24875a7ba..ff193155129e 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -373,6 +373,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; file_priv->supports_virtualized_cursor_plane = req->value; break; + case DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE: + if (!file_priv->atomic) + return -EINVAL; + if (req->value > 1) + return -EINVAL; + file_priv->plane_color_pipeline = req->value; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 25f376869b3a..d12db9b0bab8 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -30,6 +30,7 @@ #include <drm/drm_managed.h> #include <drm/drm_mode_config.h> #include <drm/drm_print.h> +#include <drm/drm_colorop.h> #include <linux/dma-resv.h> #include "drm_crtc_internal.h" @@ -192,11 +193,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data, void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; + struct drm_colorop *colorop; struct drm_plane *plane; struct drm_encoder *encoder; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + drm_for_each_colorop(colorop, dev) + drm_colorop_reset(colorop); + drm_for_each_plane(plane, dev) if (plane->funcs->reset) plane->funcs->reset(plane); @@ -437,6 +442,7 @@ int drmm_mode_config_init(struct drm_device *dev) INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.plane_list); + INIT_LIST_HEAD(&dev->mode_config.colorop_list); INIT_LIST_HEAD(&dev->mode_config.privobj_list); idr_init_base(&dev->mode_config.object_idr, 1); idr_init_base(&dev->mode_config.tile_idr, 1); @@ -458,6 +464,7 @@ int drmm_mode_config_init(struct drm_device *dev) dev->mode_config.num_crtc = 0; dev->mode_config.num_encoder = 0; dev->mode_config.num_total_plane = 0; + dev->mode_config.num_colorop = 0; if (IS_ENABLED(CONFIG_LOCKDEP)) { struct drm_modeset_acquire_ctx modeset_ctx; diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index e943205a2394..b45d501b10c8 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -28,6 +28,7 @@ #include <drm/drm_device.h> #include <drm/drm_file.h> #include <drm/drm_mode_object.h> +#include <drm/drm_plane.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" @@ -386,6 +387,7 @@ EXPORT_SYMBOL(drm_object_property_get_default_value); /* helper for getconnector and getproperties ioctls */ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + bool plane_color_pipeline, uint32_t __user *prop_ptr, uint64_t __user *prop_values, uint32_t *arg_count_props) @@ -399,6 +401,21 @@ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic) continue; + if (plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) { + struct drm_plane *plane = obj_to_plane(obj); + + if (prop == plane->color_encoding_property || + prop == plane->color_range_property) + continue; + } + + if (!plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) { + struct drm_plane *plane = obj_to_plane(obj); + + if (prop == plane->color_pipeline_property) + continue; + } + if (*arg_count_props > count) { ret = __drm_object_property_get_value(obj, prop, &val); if (ret) @@ -457,6 +474,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, } ret = drm_mode_object_get_properties(obj, file_priv->atomic, + file_priv->plane_color_pipeline, (uint32_t __user *)(unsigned long)(arg->props_ptr), (uint64_t __user *)(unsigned long)(arg->prop_values_ptr), &arg->count_props); diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 38f82391bfda..f6cfa8ac090c 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -1820,3 +1820,62 @@ int drm_plane_add_size_hints_property(struct drm_plane *plane, return 0; } EXPORT_SYMBOL(drm_plane_add_size_hints_property); + +/** + * drm_plane_create_color_pipeline_property - create a new color pipeline + * property + * + * @plane: drm plane + * @pipelines: list of pipelines + * @num_pipelines: number of pipelines + * + * Create the COLOR_PIPELINE plane property to specific color pipelines on + * the plane. + * + * RETURNS: + * Zero for success or -errno + */ +int drm_plane_create_color_pipeline_property(struct drm_plane *plane, + const struct drm_prop_enum_list *pipelines, + int num_pipelines) +{ + struct drm_prop_enum_list *all_pipelines; + struct drm_property *prop; + int len = 0; + int i; + + all_pipelines = kcalloc(num_pipelines + 1, + sizeof(*all_pipelines), + GFP_KERNEL); + + if (!all_pipelines) { + drm_err(plane->dev, "failed to allocate color pipeline\n"); + return -ENOMEM; + } + + /* Create default Bypass color pipeline */ + all_pipelines[len].type = 0; + all_pipelines[len].name = "Bypass"; + len++; + + /* Add all other color pipelines */ + for (i = 0; i < num_pipelines; i++, len++) { + all_pipelines[len].type = pipelines[i].type; + all_pipelines[len].name = pipelines[i].name; + } + + prop = drm_property_create_enum(plane->dev, DRM_MODE_PROP_ATOMIC, + "COLOR_PIPELINE", + all_pipelines, len); + if (IS_ERR(prop)) { + kfree(all_pipelines); + return PTR_ERR(prop); + } + + drm_object_attach_property(&plane->base, prop, 0); + plane->color_pipeline_property = prop; + + kfree(all_pipelines); + return 0; +} +EXPORT_SYMBOL(drm_plane_create_color_pipeline_property); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index a9d35e8fca6a..637927818dfe 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -58,18 +58,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes, struct exynos_drm_gem *exynos_gem) { - struct fb_info *fbi; + struct fb_info *fbi = helper->info; struct drm_framebuffer *fb = helper->fb; unsigned int size = fb->width * fb->height * fb->format->cpp[0]; unsigned long offset; - fbi = drm_fb_helper_alloc_info(helper); - if (IS_ERR(fbi)) { - DRM_DEV_ERROR(to_dma_dev(helper->dev), - "failed to allocate fb info.\n"); - return PTR_ERR(fbi); - } - fbi->fbops = &exynos_drm_fb_ops; drm_fb_helper_fill_info(fbi, helper, sizes); diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c index bc92fa24a1e2..c26926babc2a 100644 --- a/drivers/gpu/drm/gma500/fbdev.c +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -108,7 +108,7 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_device *dev = fb_helper->dev; struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); - struct fb_info *info; + struct fb_info *info = fb_helper->info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd = { }; int size; @@ -167,12 +167,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->funcs = &psb_fbdev_fb_helper_funcs; fb_helper->fb = fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_framebuffer_unregister_private; - } - info->fbops = &psb_fbdev_fb_ops; /* Accessed stolen memory directly */ @@ -196,10 +190,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, return 0; -err_drm_framebuffer_unregister_private: - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); - kfree(fb); err_drm_gem_object_put: drm_gem_object_put(obj); return ret; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index e5449c41cfa1..9cd03e2adeb2 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -267,8 +267,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct intel_display *display = to_intel_display(helper->dev); struct intel_fbdev *ifbdev = to_intel_fbdev(helper); struct intel_framebuffer *fb = ifbdev->fb; + struct fb_info *info = helper->info; struct ref_tracker *wakeref; - struct fb_info *info; struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; @@ -318,13 +318,6 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto out_unlock; } - info = drm_fb_helper_alloc_info(helper); - if (IS_ERR(info)) { - drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info); - ret = PTR_ERR(info); - goto out_unpin; - } - helper->funcs = &intel_fb_helper_funcs; helper->fb = &fb->base; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index aad6fb77f0de..fd19995b12b5 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -91,9 +91,9 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, { struct drm_device *dev = helper->dev; struct msm_drm_private *priv = dev->dev_private; + struct fb_info *fbi = helper->info; struct drm_framebuffer *fb = NULL; struct drm_gem_object *bo; - struct fb_info *fbi = NULL; uint64_t paddr; uint32_t format; int ret, pitch; @@ -126,13 +126,6 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto fail; } - fbi = drm_fb_helper_alloc_info(helper); - if (IS_ERR(fbi)) { - DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n"); - ret = PTR_ERR(fbi); - goto fail; - } - DBG("fbi=%p, dev=%p", fbi, dev); helper->funcs = &msm_fbdev_helper_funcs; diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index d89761f13cd7..ca3fb186bf19 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -154,9 +154,9 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct drm_device *dev = helper->dev; struct omap_drm_private *priv = dev->dev_private; struct omap_fbdev *fbdev = priv->fbdev; + struct fb_info *fbi = helper->info; struct drm_framebuffer *fb = NULL; union omap_gem_size gsize; - struct fb_info *fbi = NULL; struct drm_mode_fb_cmd2 mode_cmd = {0}; struct drm_gem_object *bo; dma_addr_t dma_addr; @@ -225,13 +225,6 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto fail; } - fbi = drm_fb_helper_alloc_info(helper); - if (IS_ERR(fbi)) { - dev_err(dev->dev, "failed to allocate fb info\n"); - ret = PTR_ERR(fbi); - goto fail; - } - DBG("fbi=%p, dev=%p", fbi, dev); helper->funcs = &omap_fbdev_helper_funcs; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index ad54537d914a..76f6af819037 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -408,6 +408,19 @@ config DRM_PANEL_LG_LB035Q02 (found on the Gumstix Overo Palo35 board). To compile this driver as a module, choose M here. +config DRM_PANEL_LG_LD070WX3 + tristate "LG LD070WX3 MIPI DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS + help + Say Y here if you want to enable support for the LD070WX3 MIPI DSI + panel found in the NVIDIA Tegra Note 7 tablet. + + To compile this driver as a module, choose M here: the module will + be called panel-lg-ld070wx3. + config DRM_PANEL_LG_LG4573 tristate "LG4573 RGB/SPI panel" depends on OF && SPI @@ -881,16 +894,17 @@ config DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01 DSI protocol with 4 lanes. config DRM_PANEL_SAMSUNG_SOFEF00 - tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels" + tristate "Samsung SOFEF00 DSI panel controller" depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE select VIDEOMODE_HELPERS help Say Y or M here if you want to enable support for the Samsung AMOLED - command mode panels found in the OnePlus 6/6T smartphones. + panel SOFEF00 DDIC and connected panel. + Currently supported panels: - The panels are 2280x1080@60Hz and 2340x1080@60Hz respectively + Samsung AMS628NW01 (found in OnePlus 6, 1080x2280@60Hz) config DRM_PANEL_SEIKO_43WVF1G tristate "Seiko 43WVF1G panel" diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 4c4b6b4aefd0..b9562a6fdcb3 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o obj-$(CONFIG_DRM_PANEL_LINCOLNTECH_LCD197) += panel-lincolntech-lcd197.o obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o +obj-$(CONFIG_DRM_PANEL_LG_LD070WX3) += panel-lg-ld070wx3.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 944c7c70de55..415b894890ad 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1965,6 +1965,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a6a, &delay_200_500_e80, "NV140WUM-N44"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a84, &delay_200_500_e50, "NV133WUM-T01"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"), diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 7ecb81225981..947b47841b01 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -820,6 +820,204 @@ static const struct ili9881c_instr tl050hdv35_init[] = { ILI9881C_COMMAND_INSTR(0xd3, 0x39), }; +static const struct ili9881c_instr w552946aaa_init[] = { + ILI9881C_SWITCH_PAGE_INSTR(3), + ILI9881C_COMMAND_INSTR(0x01, 0x00), + ILI9881C_COMMAND_INSTR(0x02, 0x00), + ILI9881C_COMMAND_INSTR(0x03, 0x53), + ILI9881C_COMMAND_INSTR(0x04, 0x53), + ILI9881C_COMMAND_INSTR(0x05, 0x13), + ILI9881C_COMMAND_INSTR(0x06, 0x04), + ILI9881C_COMMAND_INSTR(0x07, 0x02), + ILI9881C_COMMAND_INSTR(0x08, 0x02), + ILI9881C_COMMAND_INSTR(0x09, 0x00), + ILI9881C_COMMAND_INSTR(0x0a, 0x00), + ILI9881C_COMMAND_INSTR(0x0b, 0x00), + ILI9881C_COMMAND_INSTR(0x0c, 0x00), + ILI9881C_COMMAND_INSTR(0x0d, 0x00), + ILI9881C_COMMAND_INSTR(0x0e, 0x00), + ILI9881C_COMMAND_INSTR(0x0f, 0x00), + ILI9881C_COMMAND_INSTR(0x10, 0x00), + ILI9881C_COMMAND_INSTR(0x11, 0x00), + ILI9881C_COMMAND_INSTR(0x12, 0x00), + ILI9881C_COMMAND_INSTR(0x13, 0x00), + ILI9881C_COMMAND_INSTR(0x14, 0x00), + ILI9881C_COMMAND_INSTR(0x15, 0x08), + ILI9881C_COMMAND_INSTR(0x16, 0x10), + ILI9881C_COMMAND_INSTR(0x17, 0x00), + ILI9881C_COMMAND_INSTR(0x18, 0x08), + ILI9881C_COMMAND_INSTR(0x19, 0x00), + ILI9881C_COMMAND_INSTR(0x1a, 0x00), + ILI9881C_COMMAND_INSTR(0x1b, 0x00), + ILI9881C_COMMAND_INSTR(0x1c, 0x00), + ILI9881C_COMMAND_INSTR(0x1d, 0x00), + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), + ILI9881C_COMMAND_INSTR(0x1f, 0x80), + ILI9881C_COMMAND_INSTR(0x20, 0x02), + ILI9881C_COMMAND_INSTR(0x21, 0x09), + ILI9881C_COMMAND_INSTR(0x22, 0x00), + ILI9881C_COMMAND_INSTR(0x23, 0x00), + ILI9881C_COMMAND_INSTR(0x24, 0x00), + ILI9881C_COMMAND_INSTR(0x25, 0x00), + ILI9881C_COMMAND_INSTR(0x26, 0x00), + ILI9881C_COMMAND_INSTR(0x27, 0x00), + ILI9881C_COMMAND_INSTR(0x28, 0x55), + ILI9881C_COMMAND_INSTR(0x29, 0x03), + ILI9881C_COMMAND_INSTR(0x2a, 0x00), + ILI9881C_COMMAND_INSTR(0x2b, 0x00), + ILI9881C_COMMAND_INSTR(0x2c, 0x00), + ILI9881C_COMMAND_INSTR(0x2d, 0x00), + ILI9881C_COMMAND_INSTR(0x2e, 0x00), + ILI9881C_COMMAND_INSTR(0x2f, 0x00), + ILI9881C_COMMAND_INSTR(0x30, 0x00), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x33, 0x00), + ILI9881C_COMMAND_INSTR(0x34, 0x04), + ILI9881C_COMMAND_INSTR(0x35, 0x05), + ILI9881C_COMMAND_INSTR(0x36, 0x05), + ILI9881C_COMMAND_INSTR(0x37, 0x00), + ILI9881C_COMMAND_INSTR(0x38, 0x3c), + ILI9881C_COMMAND_INSTR(0x39, 0x35), + ILI9881C_COMMAND_INSTR(0x3a, 0x00), + ILI9881C_COMMAND_INSTR(0x3b, 0x40), + ILI9881C_COMMAND_INSTR(0x3c, 0x00), + ILI9881C_COMMAND_INSTR(0x3d, 0x00), + ILI9881C_COMMAND_INSTR(0x3e, 0x00), + ILI9881C_COMMAND_INSTR(0x3f, 0x00), + ILI9881C_COMMAND_INSTR(0x40, 0x00), + ILI9881C_COMMAND_INSTR(0x41, 0x88), + ILI9881C_COMMAND_INSTR(0x42, 0x00), + ILI9881C_COMMAND_INSTR(0x43, 0x00), + ILI9881C_COMMAND_INSTR(0x44, 0x1f), + ILI9881C_COMMAND_INSTR(0x50, 0x01), + ILI9881C_COMMAND_INSTR(0x51, 0x23), + ILI9881C_COMMAND_INSTR(0x52, 0x45), + ILI9881C_COMMAND_INSTR(0x53, 0x67), + ILI9881C_COMMAND_INSTR(0x54, 0x89), + ILI9881C_COMMAND_INSTR(0x55, 0xab), + ILI9881C_COMMAND_INSTR(0x56, 0x01), + ILI9881C_COMMAND_INSTR(0x57, 0x23), + ILI9881C_COMMAND_INSTR(0x58, 0x45), + ILI9881C_COMMAND_INSTR(0x59, 0x67), + ILI9881C_COMMAND_INSTR(0x5a, 0x89), + ILI9881C_COMMAND_INSTR(0x5b, 0xab), + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), + ILI9881C_COMMAND_INSTR(0x5d, 0xef), + ILI9881C_COMMAND_INSTR(0x5e, 0x03), + ILI9881C_COMMAND_INSTR(0x5f, 0x14), + ILI9881C_COMMAND_INSTR(0x60, 0x15), + ILI9881C_COMMAND_INSTR(0x61, 0x0c), + ILI9881C_COMMAND_INSTR(0x62, 0x0d), + ILI9881C_COMMAND_INSTR(0x63, 0x0e), + ILI9881C_COMMAND_INSTR(0x64, 0x0f), + ILI9881C_COMMAND_INSTR(0x65, 0x10), + ILI9881C_COMMAND_INSTR(0x66, 0x11), + ILI9881C_COMMAND_INSTR(0x67, 0x08), + ILI9881C_COMMAND_INSTR(0x68, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x0a), + ILI9881C_COMMAND_INSTR(0x6a, 0x02), + ILI9881C_COMMAND_INSTR(0x6b, 0x02), + ILI9881C_COMMAND_INSTR(0x6c, 0x02), + ILI9881C_COMMAND_INSTR(0x6d, 0x02), + ILI9881C_COMMAND_INSTR(0x6e, 0x02), + ILI9881C_COMMAND_INSTR(0x6f, 0x02), + ILI9881C_COMMAND_INSTR(0x70, 0x02), + ILI9881C_COMMAND_INSTR(0x71, 0x02), + ILI9881C_COMMAND_INSTR(0x72, 0x06), + ILI9881C_COMMAND_INSTR(0x73, 0x02), + ILI9881C_COMMAND_INSTR(0x74, 0x02), + ILI9881C_COMMAND_INSTR(0x75, 0x14), + ILI9881C_COMMAND_INSTR(0x76, 0x15), + ILI9881C_COMMAND_INSTR(0x77, 0x0f), + ILI9881C_COMMAND_INSTR(0x78, 0x0e), + ILI9881C_COMMAND_INSTR(0x79, 0x0d), + ILI9881C_COMMAND_INSTR(0x7a, 0x0c), + ILI9881C_COMMAND_INSTR(0x7b, 0x11), + ILI9881C_COMMAND_INSTR(0x7c, 0x10), + ILI9881C_COMMAND_INSTR(0x7d, 0x06), + ILI9881C_COMMAND_INSTR(0x7e, 0x02), + ILI9881C_COMMAND_INSTR(0x7f, 0x0a), + ILI9881C_COMMAND_INSTR(0x80, 0x02), + ILI9881C_COMMAND_INSTR(0x81, 0x02), + ILI9881C_COMMAND_INSTR(0x82, 0x02), + ILI9881C_COMMAND_INSTR(0x83, 0x02), + ILI9881C_COMMAND_INSTR(0x84, 0x02), + ILI9881C_COMMAND_INSTR(0x85, 0x02), + ILI9881C_COMMAND_INSTR(0x86, 0x02), + ILI9881C_COMMAND_INSTR(0x87, 0x02), + ILI9881C_COMMAND_INSTR(0x88, 0x08), + ILI9881C_COMMAND_INSTR(0x89, 0x02), + ILI9881C_COMMAND_INSTR(0x8a, 0x02), + ILI9881C_SWITCH_PAGE_INSTR(4), + ILI9881C_COMMAND_INSTR(0x00, 0x80), + ILI9881C_COMMAND_INSTR(0x70, 0x00), + ILI9881C_COMMAND_INSTR(0x71, 0x00), + ILI9881C_COMMAND_INSTR(0x66, 0xfe), + ILI9881C_COMMAND_INSTR(0x82, 0x15), + ILI9881C_COMMAND_INSTR(0x84, 0x15), + ILI9881C_COMMAND_INSTR(0x85, 0x15), + ILI9881C_COMMAND_INSTR(0x3a, 0x24), + ILI9881C_COMMAND_INSTR(0x32, 0xac), + ILI9881C_COMMAND_INSTR(0x8c, 0x80), + ILI9881C_COMMAND_INSTR(0x3c, 0xf5), + ILI9881C_COMMAND_INSTR(0x88, 0x33), + ILI9881C_SWITCH_PAGE_INSTR(1), + ILI9881C_COMMAND_INSTR(0x22, 0x0a), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x53, 0x78), + ILI9881C_COMMAND_INSTR(0x55, 0x7b), + ILI9881C_COMMAND_INSTR(0x60, 0x20), + ILI9881C_COMMAND_INSTR(0x61, 0x00), + ILI9881C_COMMAND_INSTR(0x62, 0x0d), + ILI9881C_COMMAND_INSTR(0x63, 0x00), + ILI9881C_COMMAND_INSTR(0xa0, 0x00), + ILI9881C_COMMAND_INSTR(0xa1, 0x10), + ILI9881C_COMMAND_INSTR(0xa2, 0x1c), + ILI9881C_COMMAND_INSTR(0xa3, 0x13), + ILI9881C_COMMAND_INSTR(0xa4, 0x15), + ILI9881C_COMMAND_INSTR(0xa5, 0x26), + ILI9881C_COMMAND_INSTR(0xa6, 0x1a), + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), + ILI9881C_COMMAND_INSTR(0xa8, 0x67), + ILI9881C_COMMAND_INSTR(0xa9, 0x1c), + ILI9881C_COMMAND_INSTR(0xaa, 0x29), + ILI9881C_COMMAND_INSTR(0xab, 0x5b), + ILI9881C_COMMAND_INSTR(0xac, 0x26), + ILI9881C_COMMAND_INSTR(0xad, 0x28), + ILI9881C_COMMAND_INSTR(0xae, 0x5c), + ILI9881C_COMMAND_INSTR(0xaf, 0x30), + ILI9881C_COMMAND_INSTR(0xb0, 0x31), + ILI9881C_COMMAND_INSTR(0xb1, 0x32), + ILI9881C_COMMAND_INSTR(0xb2, 0x00), + ILI9881C_COMMAND_INSTR(0xb1, 0x2e), + ILI9881C_COMMAND_INSTR(0xb2, 0x32), + ILI9881C_COMMAND_INSTR(0xb3, 0x00), + ILI9881C_COMMAND_INSTR(0xb6, 0x02), + ILI9881C_COMMAND_INSTR(0xb7, 0x03), + ILI9881C_COMMAND_INSTR(0xc0, 0x00), + ILI9881C_COMMAND_INSTR(0xc1, 0x10), + ILI9881C_COMMAND_INSTR(0xc2, 0x1c), + ILI9881C_COMMAND_INSTR(0xc3, 0x13), + ILI9881C_COMMAND_INSTR(0xc4, 0x15), + ILI9881C_COMMAND_INSTR(0xc5, 0x26), + ILI9881C_COMMAND_INSTR(0xc6, 0x1a), + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), + ILI9881C_COMMAND_INSTR(0xc8, 0x67), + ILI9881C_COMMAND_INSTR(0xc9, 0x1c), + ILI9881C_COMMAND_INSTR(0xca, 0x29), + ILI9881C_COMMAND_INSTR(0xcb, 0x5b), + ILI9881C_COMMAND_INSTR(0xcc, 0x26), + ILI9881C_COMMAND_INSTR(0xcd, 0x28), + ILI9881C_COMMAND_INSTR(0xce, 0x5c), + ILI9881C_COMMAND_INSTR(0xcf, 0x30), + ILI9881C_COMMAND_INSTR(0xd0, 0x31), + ILI9881C_COMMAND_INSTR(0xd1, 0x2e), + ILI9881C_COMMAND_INSTR(0xd2, 0x32), + ILI9881C_COMMAND_INSTR(0xd3, 0x00), + ILI9881C_SWITCH_PAGE_INSTR(0), +}; + static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_SWITCH_PAGE_INSTR(3), ILI9881C_COMMAND_INSTR(0x01, 0x00), @@ -1960,6 +2158,23 @@ static const struct drm_display_mode tl050hdv35_default_mode = { .height_mm = 110, }; +static const struct drm_display_mode w552946aaa_default_mode = { + .clock = 65000, + + .hdisplay = 720, + .hsync_start = 720 + 52, + .hsync_end = 720 + 52 + 8, + .htotal = 720 + 52 + 8 + 48, + + .vdisplay = 1280, + .vsync_start = 1280 + 16, + .vsync_end = 1280 + 16 + 6, + .vtotal = 1280 + 16 + 6 + 15, + + .width_mm = 68, + .height_mm = 121, +}; + static const struct drm_display_mode w552946aba_default_mode = { .clock = 64000, @@ -2188,6 +2403,15 @@ static const struct ili9881c_desc tl050hdv35_desc = { .default_address_mode = 0x03, }; +static const struct ili9881c_desc w552946aaa_desc = { + .init = w552946aaa_init, + .init_length = ARRAY_SIZE(w552946aaa_init), + .mode = &w552946aaa_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, + .lanes = 2, +}; + static const struct ili9881c_desc w552946aba_desc = { .init = w552946ab_init, .init_length = ARRAY_SIZE(w552946ab_init), @@ -2236,6 +2460,7 @@ static const struct of_device_id ili9881c_of_match[] = { { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, { .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc }, { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc }, + { .compatible = "wanchanglong,w552946aaa", .data = &w552946aaa_desc }, { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, { .compatible = "ampire,am8001280g", .data = &am8001280g_desc }, { .compatible = "raspberrypi,dsi-5inch", &rpi_5inch_desc }, diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c index 5c2530598ddb..aa05316dc57b 100644 --- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -1132,22 +1132,19 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi) dsi->lanes = desc->lanes; jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(jadard->reset)) { - DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); - return PTR_ERR(jadard->reset); - } + if (IS_ERR(jadard->reset)) + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->reset), + "failed to get our reset GPIO\n"); jadard->vdd = devm_regulator_get(dev, "vdd"); - if (IS_ERR(jadard->vdd)) { - DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n"); - return PTR_ERR(jadard->vdd); - } + if (IS_ERR(jadard->vdd)) + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vdd), + "failed to get vdd regulator\n"); jadard->vccio = devm_regulator_get(dev, "vccio"); - if (IS_ERR(jadard->vccio)) { - DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n"); - return PTR_ERR(jadard->vccio); - } + if (IS_ERR(jadard->vccio)) + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vccio), + "failed to get vccio regulator\n"); ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-lg-ld070wx3.c b/drivers/gpu/drm/panel/panel-lg-ld070wx3.c new file mode 100644 index 000000000000..00cbfc5518a5 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-lg-ld070wx3.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/array_size.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +static const struct regulator_bulk_data lg_ld070wx3_supplies[] = { + { .supply = "vdd" }, { .supply = "vcc" }, +}; + +struct lg_ld070wx3 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + + struct regulator_bulk_data *supplies; +}; + +static inline struct lg_ld070wx3 *to_lg_ld070wx3(struct drm_panel *panel) +{ + return container_of(panel, struct lg_ld070wx3, panel); +} + +static int lg_ld070wx3_prepare(struct drm_panel *panel) +{ + struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel); + struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi }; + struct device *dev = panel->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies); + if (ret < 0) { + dev_err(dev, "failed to enable power supplies: %d\n", ret); + return ret; + } + + /* + * According to spec delay between enabling supply is 0, + * for regulators to reach required voltage ~5ms needed. + * MIPI interface signal for setup requires additional + * 110ms which in total results in 115ms. + */ + mdelay(115); + + mipi_dsi_dcs_soft_reset_multi(&ctx); + mipi_dsi_msleep(&ctx, 20); + + /* Differential input impedance selection */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xae, 0x0b); + + /* Enter test mode 1 and 2*/ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0xea); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x5f); + + /* Increased MIPI CLK driving ability */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x68); + + /* Exit test mode 1 and 2 */ + mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0x00); + mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x00); + + return ctx.accum_err; +} + +static int lg_ld070wx3_unprepare(struct drm_panel *panel) +{ + struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel); + struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi }; + + mipi_dsi_dcs_enter_sleep_mode_multi(&ctx); + + msleep(50); + + regulator_bulk_disable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies); + + /* power supply must be off for at least 1s after panel disable */ + msleep(1000); + + return 0; +} + +static const struct drm_display_mode lg_ld070wx3_mode = { + .clock = (800 + 32 + 48 + 8) * (1280 + 5 + 3 + 1) * 60 / 1000, + .hdisplay = 800, + .hsync_start = 800 + 32, + .hsync_end = 800 + 32 + 48, + .htotal = 800 + 32 + 48 + 8, + .vdisplay = 1280, + .vsync_start = 1280 + 5, + .vsync_end = 1280 + 5 + 3, + .vtotal = 1280 + 5 + 3 + 1, + .width_mm = 94, + .height_mm = 151, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static int lg_ld070wx3_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &lg_ld070wx3_mode); +} + +static const struct drm_panel_funcs lg_ld070wx3_panel_funcs = { + .prepare = lg_ld070wx3_prepare, + .unprepare = lg_ld070wx3_unprepare, + .get_modes = lg_ld070wx3_get_modes, +}; + +static int lg_ld070wx3_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct lg_ld070wx3 *priv; + int ret; + + priv = devm_drm_panel_alloc(dev, struct lg_ld070wx3, panel, + &lg_ld070wx3_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(lg_ld070wx3_supplies), + lg_ld070wx3_supplies, &priv->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get supplies\n"); + + priv->dsi = dsi; + mipi_dsi_set_drvdata(dsi, priv); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM; + + ret = drm_panel_of_backlight(&priv->panel); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get backlight\n"); + + drm_panel_add(&priv->panel); + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + drm_panel_remove(&priv->panel); + return dev_err_probe(dev, ret, "failed to attach to DSI host\n"); + } + + return 0; +} + +static void lg_ld070wx3_remove(struct mipi_dsi_device *dsi) +{ + struct lg_ld070wx3 *priv = mipi_dsi_get_drvdata(dsi); + + drm_panel_remove(&priv->panel); +} + +static const struct of_device_id lg_ld070wx3_of_match[] = { + { .compatible = "lg,ld070wx3-sl01" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, lg_ld070wx3_of_match); + +static struct mipi_dsi_driver lg_ld070wx3_driver = { + .driver = { + .name = "panel-lg-ld070wx3", + .of_match_table = lg_ld070wx3_of_match, + }, + .probe = lg_ld070wx3_probe, + .remove = lg_ld070wx3_remove, +}; +module_mipi_dsi_driver(lg_ld070wx3_driver); + +MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>"); +MODULE_DESCRIPTION("LG LD070WX3-SL01 DSI panel driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c index ad35d0fb0a16..c3fbc459c7e0 100644 --- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -54,9 +54,9 @@ static int rb070d30_panel_prepare(struct drm_panel *panel) } msleep(20); - gpiod_set_value(ctx->gpios.power, 1); + gpiod_set_value_cansleep(ctx->gpios.power, 1); msleep(20); - gpiod_set_value(ctx->gpios.reset, 1); + gpiod_set_value_cansleep(ctx->gpios.reset, 1); msleep(20); return 0; } @@ -65,8 +65,8 @@ static int rb070d30_panel_unprepare(struct drm_panel *panel) { struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); - gpiod_set_value(ctx->gpios.reset, 0); - gpiod_set_value(ctx->gpios.power, 0); + gpiod_set_value_cansleep(ctx->gpios.reset, 0); + gpiod_set_value_cansleep(ctx->gpios.power, 0); regulator_disable(ctx->supply); return 0; diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c index 064258217d50..e00a497a7c96 100644 --- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -16,20 +16,32 @@ #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> struct sofef00_panel { struct drm_panel panel; struct mipi_dsi_device *dsi; - struct regulator *supply; + struct regulator_bulk_data *supplies; struct gpio_desc *reset_gpio; }; +static const struct regulator_bulk_data sofef00_supplies[] = { + { .supply = "vddio" }, + { .supply = "vci" }, + { .supply = "poc" }, +}; + static inline struct sofef00_panel *to_sofef00_panel(struct drm_panel *panel) { return container_of(panel, struct sofef00_panel, panel); } +#define sofef00_test_key_on_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a) +#define sofef00_test_key_off_lvl2(ctx) \ + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5) + static void sofef00_panel_reset(struct sofef00_panel *ctx) { gpiod_set_value_cansleep(ctx->reset_gpio, 0); @@ -50,18 +62,26 @@ static int sofef00_panel_on(struct sofef00_panel *ctx) mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); - + sofef00_test_key_on_lvl2(&dsi_ctx); mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + sofef00_test_key_off_lvl2(&dsi_ctx); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); + sofef00_test_key_on_lvl2(&dsi_ctx); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); + sofef00_test_key_off_lvl2(&dsi_ctx); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + return dsi_ctx.accum_err; +} + +static int sofef00_enable(struct drm_panel *panel) +{ + struct sofef00_panel *ctx = to_sofef00_panel(panel); + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); return dsi_ctx.accum_err; @@ -72,8 +92,6 @@ static int sofef00_panel_off(struct sofef00_panel *ctx) struct mipi_dsi_device *dsi = ctx->dsi; struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); mipi_dsi_msleep(&dsi_ctx, 40); @@ -86,70 +104,70 @@ static int sofef00_panel_off(struct sofef00_panel *ctx) static int sofef00_panel_prepare(struct drm_panel *panel) { struct sofef00_panel *ctx = to_sofef00_panel(panel); - struct device *dev = &ctx->dsi->dev; int ret; - ret = regulator_enable(ctx->supply); - if (ret < 0) { - dev_err(dev, "Failed to enable regulator: %d\n", ret); + ret = regulator_bulk_enable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); + if (ret < 0) return ret; - } sofef00_panel_reset(ctx); ret = sofef00_panel_on(ctx); if (ret < 0) { gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); return ret; } return 0; } -static int sofef00_panel_unprepare(struct drm_panel *panel) +static int sofef00_disable(struct drm_panel *panel) { struct sofef00_panel *ctx = to_sofef00_panel(panel); sofef00_panel_off(ctx); - regulator_disable(ctx->supply); return 0; } -static const struct drm_display_mode enchilada_panel_mode = { +static int sofef00_panel_unprepare(struct drm_panel *panel) +{ + struct sofef00_panel *ctx = to_sofef00_panel(panel); + + regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); + + return 0; +} + +static const struct drm_display_mode ams628nw01_panel_mode = { .clock = (1080 + 112 + 16 + 36) * (2280 + 36 + 8 + 12) * 60 / 1000, + .hdisplay = 1080, .hsync_start = 1080 + 112, .hsync_end = 1080 + 112 + 16, .htotal = 1080 + 112 + 16 + 36, + .vdisplay = 2280, .vsync_start = 2280 + 36, .vsync_end = 2280 + 36 + 8, .vtotal = 2280 + 36 + 8 + 12, + .width_mm = 68, .height_mm = 145, + + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }; static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode); - if (!mode) - return -ENOMEM; - - drm_mode_set_name(mode); - - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - drm_mode_probed_add(connector, mode); - - return 1; + return drm_connector_helper_get_modes_fixed(connector, &ams628nw01_panel_mode); } static const struct drm_panel_funcs sofef00_panel_panel_funcs = { .prepare = sofef00_panel_prepare, + .enable = sofef00_enable, + .disable = sofef00_disable, .unprepare = sofef00_panel_unprepare, .get_modes = sofef00_panel_get_modes, }; @@ -160,10 +178,14 @@ static int sofef00_panel_bl_update_status(struct backlight_device *bl) int err; u16 brightness = (u16)backlight_get_brightness(bl); + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); if (err < 0) return err; + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + return 0; } @@ -177,7 +199,7 @@ sofef00_create_backlight(struct mipi_dsi_device *dsi) struct device *dev = &dsi->dev; const struct backlight_properties props = { .type = BACKLIGHT_PLATFORM, - .brightness = 1023, + .brightness = 512, .max_brightness = 1023, }; @@ -197,10 +219,12 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) if (IS_ERR(ctx)) return PTR_ERR(ctx); - ctx->supply = devm_regulator_get(dev, "vddio"); - if (IS_ERR(ctx->supply)) - return dev_err_probe(dev, PTR_ERR(ctx->supply), - "Failed to get vddio regulator\n"); + ret = devm_regulator_bulk_get_const(dev, + ARRAY_SIZE(sofef00_supplies), + sofef00_supplies, + &ctx->supplies); + if (ret) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->reset_gpio)) @@ -212,6 +236,10 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; + + ctx->panel.prepare_prev_first = true; ctx->panel.backlight = sofef00_create_backlight(dsi); if (IS_ERR(ctx->panel.backlight)) @@ -243,7 +271,8 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi) } static const struct of_device_id sofef00_panel_of_match[] = { - { .compatible = "samsung,sofef00" }, + { .compatible = "samsung,sofef00" }, /* legacy */ + { .compatible = "samsung,sofef00-ams628nw01" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sofef00_panel_of_match); @@ -252,7 +281,7 @@ static struct mipi_dsi_driver sofef00_panel_driver = { .probe = sofef00_panel_probe, .remove = sofef00_panel_remove, .driver = { - .name = "panel-oneplus6", + .name = "panel-samsung-sofef00", .of_match_table = sofef00_panel_of_match, }, }; @@ -260,5 +289,5 @@ static struct mipi_dsi_driver sofef00_panel_driver = { module_mipi_dsi_driver(sofef00_panel_driver); MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); -MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones"); +MODULE_DESCRIPTION("DRM driver for Samsung SOFEF00 DDIC"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index da6b71b70a46..b26b682826bc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -4106,6 +4106,30 @@ static const struct panel_desc qishenglong_gopher2b_lcd = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct display_timing raystar_rff500f_awh_dnn_timing = { + .pixelclock = { 23000000, 25000000, 27000000 }, + .hactive = { 800, 800, 800 }, + .hback_porch = { 4, 8, 48 }, + .hfront_porch = { 4, 8, 48 }, + .hsync_len = { 2, 4, 8 }, + .vactive = { 480, 480, 480 }, + .vback_porch = { 4, 8, 12 }, + .vfront_porch = { 4, 8, 12 }, + .vsync_len = { 2, 4, 8 }, +}; + +static const struct panel_desc raystar_rff500f_awh_dnn = { + .timings = &raystar_rff500f_awh_dnn_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 108, + .height = 65, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct display_timing rocktech_rk043fn48h_timing = { .pixelclock = { 6000000, 9000000, 12000000 }, .hactive = { 480, 480, 480 }, @@ -4223,6 +4247,37 @@ static const struct panel_desc samsung_ltl101al01 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct display_timing samsung_ltl106al01_timing = { + .pixelclock = { 71980000, 71980000, 71980000 }, + .hactive = { 1366, 1366, 1366 }, + .hfront_porch = { 56, 56, 56 }, + .hback_porch = { 106, 106, 106 }, + .hsync_len = { 14, 14, 14 }, + .vactive = { 768, 768, 768 }, + .vfront_porch = { 3, 3, 3 }, + .vback_porch = { 6, 6, 6 }, + .vsync_len = { 1, 1, 1 }, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, +}; + +static const struct panel_desc samsung_ltl106al01 = { + .timings = &samsung_ltl106al01_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 235, + .height = 132, + }, + .delay = { + .prepare = 5, + .enable = 10, + .disable = 10, + .unprepare = 5, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode samsung_ltn101nt05_mode = { .clock = 54030, .hdisplay = 1024, @@ -5379,6 +5434,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "qishenglong,gopher2b-lcd", .data = &qishenglong_gopher2b_lcd, }, { + .compatible = "raystar,rff500f-awh-dnn", + .data = &raystar_rff500f_awh_dnn, + }, { .compatible = "rocktech,rk043fn48h", .data = &rocktech_rk043fn48h, }, { @@ -5391,6 +5449,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "samsung,ltl101al01", .data = &samsung_ltl101al01, }, { + .compatible = "samsung,ltl106al01", + .data = &samsung_ltl106al01, + }, { .compatible = "samsung,ltn101nt05", .data = &samsung_ltn101nt05, }, { @@ -5600,34 +5661,6 @@ static const struct panel_desc_dsi boe_tv080wum_nl0 = { .lanes = 4, }; -static const struct drm_display_mode lg_ld070wx3_sl01_mode = { - .clock = 71000, - .hdisplay = 800, - .hsync_start = 800 + 32, - .hsync_end = 800 + 32 + 1, - .htotal = 800 + 32 + 1 + 57, - .vdisplay = 1280, - .vsync_start = 1280 + 28, - .vsync_end = 1280 + 28 + 1, - .vtotal = 1280 + 28 + 1 + 14, -}; - -static const struct panel_desc_dsi lg_ld070wx3_sl01 = { - .desc = { - .modes = &lg_ld070wx3_sl01_mode, - .num_modes = 1, - .bpc = 8, - .size = { - .width = 94, - .height = 151, - }, - .connector_type = DRM_MODE_CONNECTOR_DSI, - }, - .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS, - .format = MIPI_DSI_FMT_RGB888, - .lanes = 4, -}; - static const struct drm_display_mode lg_lh500wx1_sd03_mode = { .clock = 67000, .hdisplay = 720, @@ -5752,9 +5785,6 @@ static const struct of_device_id dsi_of_match[] = { .compatible = "boe,tv080wum-nl0", .data = &boe_tv080wum_nl0 }, { - .compatible = "lg,ld070wx3-sl01", - .data = &lg_ld070wx3_sl01 - }, { .compatible = "lg,lh500wx1-sd03", .data = &lg_lh500wx1_sd03 }, { diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile index 02db21748c12..753a32c446df 100644 --- a/drivers/gpu/drm/panthor/Makefile +++ b/drivers/gpu/drm/panthor/Makefile @@ -10,6 +10,7 @@ panthor-y := \ panthor_heap.o \ panthor_hw.o \ panthor_mmu.o \ + panthor_pwr.o \ panthor_sched.o obj-$(CONFIG_DRM_PANTHOR) += panthor.o diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 31fb8ed68199..e133b1e0ad6d 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -21,6 +21,7 @@ #include "panthor_gpu.h" #include "panthor_hw.h" #include "panthor_mmu.h" +#include "panthor_pwr.h" #include "panthor_regs.h" #include "panthor_sched.h" @@ -113,6 +114,7 @@ void panthor_device_unplug(struct panthor_device *ptdev) panthor_fw_unplug(ptdev); panthor_mmu_unplug(ptdev); panthor_gpu_unplug(ptdev); + panthor_pwr_unplug(ptdev); pm_runtime_dont_use_autosuspend(ptdev->base.dev); pm_runtime_put_sync_suspend(ptdev->base.dev); @@ -152,8 +154,8 @@ static void panthor_device_reset_work(struct work_struct *work) panthor_sched_pre_reset(ptdev); panthor_fw_pre_reset(ptdev, true); panthor_mmu_pre_reset(ptdev); - panthor_gpu_soft_reset(ptdev); - panthor_gpu_l2_power_on(ptdev); + panthor_hw_soft_reset(ptdev); + panthor_hw_l2_power_on(ptdev); panthor_mmu_post_reset(ptdev); ret = panthor_fw_post_reset(ptdev); atomic_set(&ptdev->reset.pending, 0); @@ -268,10 +270,14 @@ int panthor_device_init(struct panthor_device *ptdev) if (ret) goto err_rpm_put; - ret = panthor_gpu_init(ptdev); + ret = panthor_pwr_init(ptdev); if (ret) goto err_rpm_put; + ret = panthor_gpu_init(ptdev); + if (ret) + goto err_unplug_pwr; + ret = panthor_gpu_coherency_init(ptdev); if (ret) goto err_unplug_gpu; @@ -312,6 +318,9 @@ err_unplug_mmu: err_unplug_gpu: panthor_gpu_unplug(ptdev); +err_unplug_pwr: + panthor_pwr_unplug(ptdev); + err_rpm_put: pm_runtime_put_sync_suspend(ptdev->base.dev); return ret; @@ -465,6 +474,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev) { int ret; + panthor_pwr_resume(ptdev); panthor_gpu_resume(ptdev); panthor_mmu_resume(ptdev); @@ -474,6 +484,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev) panthor_mmu_suspend(ptdev); panthor_gpu_suspend(ptdev); + panthor_pwr_suspend(ptdev); return ret; } @@ -587,6 +598,7 @@ int panthor_device_suspend(struct device *dev) panthor_fw_suspend(ptdev); panthor_mmu_suspend(ptdev); panthor_gpu_suspend(ptdev); + panthor_pwr_suspend(ptdev); drm_dev_exit(cookie); } diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index f32c1868bf6d..f35e52b9546a 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -24,10 +24,12 @@ struct panthor_device; struct panthor_gpu; struct panthor_group_pool; struct panthor_heap_pool; +struct panthor_hw; struct panthor_job; struct panthor_mmu; struct panthor_fw; struct panthor_perfcnt; +struct panthor_pwr; struct panthor_vm; struct panthor_vm_pool; @@ -134,6 +136,12 @@ struct panthor_device { /** @csif_info: Command stream interface information. */ struct drm_panthor_csif_info csif_info; + /** @hw: GPU-specific data. */ + struct panthor_hw *hw; + + /** @pwr: Power control management data. */ + struct panthor_pwr *pwr; + /** @gpu: GPU management data. */ struct panthor_gpu *gpu; diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index 38d87ab92eda..1a5e3c1a27fb 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -22,6 +22,7 @@ #include "panthor_fw.h" #include "panthor_gem.h" #include "panthor_gpu.h" +#include "panthor_hw.h" #include "panthor_mmu.h" #include "panthor_regs.h" #include "panthor_sched.h" @@ -33,6 +34,7 @@ #define PROGRESS_TIMEOUT_SCALE_SHIFT 10 #define IDLE_HYSTERESIS_US 800 #define PWROFF_HYSTERESIS_US 10000 +#define MCU_HALT_TIMEOUT_US (1ULL * USEC_PER_SEC) /** * struct panthor_fw_binary_hdr - Firmware binary header. @@ -317,6 +319,49 @@ panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot) return &ptdev->fw->iface.streams[csg_slot][cs_slot]; } +static bool panthor_fw_has_glb_state(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + + return glb_iface->control->version >= CSF_IFACE_VERSION(4, 1, 0); +} + +static bool panthor_fw_has_64bit_ep_req(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + + return glb_iface->control->version >= CSF_IFACE_VERSION(4, 0, 0); +} + +u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface) +{ + if (panthor_fw_has_64bit_ep_req(ptdev)) + return csg_iface->input->endpoint_req2; + else + return csg_iface->input->endpoint_req; +} + +void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface, u64 value) +{ + if (panthor_fw_has_64bit_ep_req(ptdev)) + csg_iface->input->endpoint_req2 = value; + else + csg_iface->input->endpoint_req = lower_32_bits(value); +} + +void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface, u64 value, + u64 mask) +{ + if (panthor_fw_has_64bit_ep_req(ptdev)) + panthor_fw_update_reqs64(csg_iface, endpoint_req2, value, mask); + else + panthor_fw_update_reqs(csg_iface, endpoint_req, lower_32_bits(value), + lower_32_bits(mask)); +} + /** * panthor_fw_conv_timeout() - Convert a timeout into a cycle-count * @ptdev: Device. @@ -996,6 +1041,9 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev) GLB_IDLE_EN | GLB_IDLE; + if (panthor_fw_has_glb_state(ptdev)) + glb_iface->input->ack_irq_mask |= GLB_STATE_MASK; + panthor_fw_update_reqs(glb_iface, req, GLB_IDLE_EN, GLB_IDLE_EN); panthor_fw_toggle_reqs(glb_iface, req, ack, GLB_CFG_ALLOC_EN | @@ -1069,6 +1117,54 @@ static void panthor_fw_stop(struct panthor_device *ptdev) drm_err(&ptdev->base, "Failed to stop MCU"); } +static bool panthor_fw_mcu_halted(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + bool halted; + + halted = gpu_read(ptdev, MCU_STATUS) == MCU_STATUS_HALT; + + if (panthor_fw_has_glb_state(ptdev)) + halted &= (GLB_STATE_GET(glb_iface->output->ack) == GLB_STATE_HALT); + + return halted; +} + +static void panthor_fw_halt_mcu(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + + if (panthor_fw_has_glb_state(ptdev)) + panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_HALT), GLB_STATE_MASK); + else + panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT); + + gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1); +} + +static bool panthor_fw_wait_mcu_halted(struct panthor_device *ptdev) +{ + bool halted = false; + + if (read_poll_timeout_atomic(panthor_fw_mcu_halted, halted, halted, 10, + MCU_HALT_TIMEOUT_US, 0, ptdev)) { + drm_warn(&ptdev->base, "Timed out waiting for MCU to halt"); + return false; + } + + return true; +} + +static void panthor_fw_mcu_set_active(struct panthor_device *ptdev) +{ + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); + + if (panthor_fw_has_glb_state(ptdev)) + panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_ACTIVE), GLB_STATE_MASK); + else + panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT); +} + /** * panthor_fw_pre_reset() - Call before a reset. * @ptdev: Device. @@ -1085,19 +1181,13 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang) ptdev->reset.fast = false; if (!on_hang) { - struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); - u32 status; - - panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT); - gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1); - if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status, - status == MCU_STATUS_HALT, 10, - 100000)) { - ptdev->reset.fast = true; - } else { + panthor_fw_halt_mcu(ptdev); + if (!panthor_fw_wait_mcu_halted(ptdev)) drm_warn(&ptdev->base, "Failed to cleanly suspend MCU"); - } + else + ptdev->reset.fast = true; } + panthor_fw_stop(ptdev); panthor_job_irq_suspend(&ptdev->fw->irq); panthor_fw_stop(ptdev); @@ -1126,14 +1216,14 @@ int panthor_fw_post_reset(struct panthor_device *ptdev) */ panthor_reload_fw_sections(ptdev, true); } else { - /* The FW detects 0 -> 1 transitions. Make sure we reset - * the HALT bit before the FW is rebooted. + /* + * If the FW was previously successfully halted in the pre-reset + * operation, we need to transition it to active again before + * the FW is rebooted. * This is not needed on a slow reset because FW sections are * re-initialized. */ - struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); - - panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT); + panthor_fw_mcu_set_active(ptdev); } ret = panthor_fw_start(ptdev); @@ -1171,6 +1261,10 @@ void panthor_fw_unplug(struct panthor_device *ptdev) if (ptdev->fw->irq.irq) panthor_job_irq_suspend(&ptdev->fw->irq); + panthor_fw_halt_mcu(ptdev); + if (!panthor_fw_wait_mcu_halted(ptdev)) + drm_warn(&ptdev->base, "Failed to halt MCU on unplug"); + panthor_fw_stop(ptdev); } @@ -1186,7 +1280,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev) ptdev->fw->vm = NULL; if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) - panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000); + panthor_hw_l2_power_off(ptdev); } /** @@ -1365,7 +1459,7 @@ int panthor_fw_init(struct panthor_device *ptdev) return ret; } - ret = panthor_gpu_l2_power_on(ptdev); + ret = panthor_hw_l2_power_on(ptdev); if (ret) return ret; @@ -1409,3 +1503,4 @@ MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin"); MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin"); MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin"); MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin"); +MODULE_FIRMWARE("arm/mali/arch14.8/mali_csffw.bin"); diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h index 6598d96c6d2a..fbdc21469ba3 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.h +++ b/drivers/gpu/drm/panthor/panthor_fw.h @@ -167,10 +167,11 @@ struct panthor_fw_csg_input_iface { #define CSG_EP_REQ_TILER(x) (((x) << 16) & GENMASK(19, 16)) #define CSG_EP_REQ_EXCL_COMPUTE BIT(20) #define CSG_EP_REQ_EXCL_FRAGMENT BIT(21) -#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & GENMASK(31, 28)) #define CSG_EP_REQ_PRIORITY_MASK GENMASK(31, 28) +#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & CSG_EP_REQ_PRIORITY_MASK) +#define CSG_EP_REQ_PRIORITY_GET(x) (((x) & CSG_EP_REQ_PRIORITY_MASK) >> 28) u32 endpoint_req; - u32 reserved2[2]; + u64 endpoint_req2; u64 suspend_buf; u64 protm_suspend_buf; u32 config; @@ -214,6 +215,13 @@ struct panthor_fw_global_input_iface { #define GLB_FWCFG_UPDATE BIT(9) #define GLB_IDLE_EN BIT(10) #define GLB_SLEEP BIT(12) +#define GLB_STATE_MASK GENMASK(14, 12) +#define GLB_STATE_ACTIVE 0 +#define GLB_STATE_HALT 1 +#define GLB_STATE_SLEEP 2 +#define GLB_STATE_SUSPEND 3 +#define GLB_STATE(x) (((x) << 12) & GLB_STATE_MASK) +#define GLB_STATE_GET(x) (((x) & GLB_STATE_MASK) >> 12) #define GLB_INACTIVE_COMPUTE BIT(20) #define GLB_INACTIVE_FRAGMENT BIT(21) #define GLB_INACTIVE_TILER BIT(22) @@ -457,6 +465,16 @@ struct panthor_fw_global_iface { spin_unlock(&(__iface)->lock); \ } while (0) +#define panthor_fw_update_reqs64(__iface, __in_reg, __val, __mask) \ + do { \ + u64 __cur_val, __new_val; \ + spin_lock(&(__iface)->lock); \ + __cur_val = READ_ONCE((__iface)->input->__in_reg); \ + __new_val = (__cur_val & ~(__mask)) | ((__val) & (__mask)); \ + WRITE_ONCE((__iface)->input->__in_reg, __new_val); \ + spin_unlock(&(__iface)->lock); \ + } while (0) + struct panthor_fw_global_iface * panthor_fw_get_glb_iface(struct panthor_device *ptdev); @@ -466,6 +484,16 @@ panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot); struct panthor_fw_cs_iface * panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot); +u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface); + +void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface, u64 value); + +void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev, + struct panthor_fw_csg_iface *csg_iface, u64 value, + u64 mask); + int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask, u32 *acked, u32 timeout_ms); diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 2c12c1c58e2b..fbde78db270a 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -145,6 +145,9 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, bo = to_panthor_bo(&obj->base); kbo->obj = &obj->base; bo->flags = bo_flags; + bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); + drm_gem_object_get(bo->exclusive_vm_root_gem); + bo->base.base.resv = bo->exclusive_vm_root_gem->resv; if (vm == panthor_fw_vm(ptdev)) debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED; @@ -168,9 +171,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, goto err_free_va; kbo->vm = panthor_vm_get(vm); - bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); - drm_gem_object_get(bo->exclusive_vm_root_gem); - bo->base.base.resv = bo->exclusive_vm_root_gem->resv; return kbo; err_free_va: diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index eda670229184..06b231b2460a 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -19,6 +19,7 @@ #include "panthor_device.h" #include "panthor_gpu.h" +#include "panthor_hw.h" #include "panthor_regs.h" /** @@ -241,6 +242,11 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev, return 0; } +void panthor_gpu_l2_power_off(struct panthor_device *ptdev) +{ + panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000); +} + /** * panthor_gpu_l2_power_on() - Power-on the L2-cache * @ptdev: Device. @@ -368,9 +374,9 @@ void panthor_gpu_suspend(struct panthor_device *ptdev) { /* On a fast reset, simply power down the L2. */ if (!ptdev->reset.fast) - panthor_gpu_soft_reset(ptdev); + panthor_hw_soft_reset(ptdev); else - panthor_gpu_power_off(ptdev, L2, 1, 20000); + panthor_hw_l2_power_off(ptdev); panthor_gpu_irq_suspend(&ptdev->gpu->irq); } @@ -385,6 +391,6 @@ void panthor_gpu_suspend(struct panthor_device *ptdev) void panthor_gpu_resume(struct panthor_device *ptdev) { panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK); - panthor_gpu_l2_power_on(ptdev); + panthor_hw_l2_power_on(ptdev); } diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h index 7c17a8c06858..12e66f48ced1 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.h +++ b/drivers/gpu/drm/panthor/panthor_gpu.h @@ -46,6 +46,7 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev, type ## _PWRTRANS, \ mask, timeout_us) +void panthor_gpu_l2_power_off(struct panthor_device *ptdev); int panthor_gpu_l2_power_on(struct panthor_device *ptdev); int panthor_gpu_flush_caches(struct panthor_device *ptdev, u32 l2, u32 lsc, u32 other); diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c index c44033a0bba8..87ebb7ae42c4 100644 --- a/drivers/gpu/drm/panthor/panthor_hw.c +++ b/drivers/gpu/drm/panthor/panthor_hw.c @@ -4,12 +4,55 @@ #include <drm/drm_print.h> #include "panthor_device.h" +#include "panthor_gpu.h" #include "panthor_hw.h" +#include "panthor_pwr.h" #include "panthor_regs.h" #define GPU_PROD_ID_MAKE(arch_major, prod_major) \ (((arch_major) << 24) | (prod_major)) +/** struct panthor_hw_entry - HW arch major to panthor_hw binding entry */ +struct panthor_hw_entry { + /** @arch_min: Minimum supported architecture major value (inclusive) */ + u8 arch_min; + + /** @arch_max: Maximum supported architecture major value (inclusive) */ + u8 arch_max; + + /** @hwdev: Pointer to panthor_hw structure */ + struct panthor_hw *hwdev; +}; + +static struct panthor_hw panthor_hw_arch_v10 = { + .ops = { + .soft_reset = panthor_gpu_soft_reset, + .l2_power_off = panthor_gpu_l2_power_off, + .l2_power_on = panthor_gpu_l2_power_on, + }, +}; + +static struct panthor_hw panthor_hw_arch_v14 = { + .ops = { + .soft_reset = panthor_pwr_reset_soft, + .l2_power_off = panthor_pwr_l2_power_off, + .l2_power_on = panthor_pwr_l2_power_on, + }, +}; + +static struct panthor_hw_entry panthor_hw_match[] = { + { + .arch_min = 10, + .arch_max = 13, + .hwdev = &panthor_hw_arch_v10, + }, + { + .arch_min = 14, + .arch_max = 14, + .hwdev = &panthor_hw_arch_v14, + }, +}; + static char *get_gpu_model_name(struct panthor_device *ptdev) { const u32 gpu_id = ptdev->gpu_info.gpu_id; @@ -55,6 +98,12 @@ static char *get_gpu_model_name(struct panthor_device *ptdev) fallthrough; case GPU_PROD_ID_MAKE(13, 1): return "Mali-G625"; + case GPU_PROD_ID_MAKE(14, 0): + return "Mali-G1-Ultra"; + case GPU_PROD_ID_MAKE(14, 1): + return "Mali-G1-Premium"; + case GPU_PROD_ID_MAKE(14, 3): + return "Mali-G1-Pro"; } return "(Unknown Mali GPU)"; @@ -64,7 +113,6 @@ static void panthor_gpu_info_init(struct panthor_device *ptdev) { unsigned int i; - ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); @@ -82,12 +130,19 @@ static void panthor_gpu_info_init(struct panthor_device *ptdev) ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT); - ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); - ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); - ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); - /* Introduced in arch 11.x */ ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES); + + if (panthor_hw_has_pwr_ctrl(ptdev)) { + /* Introduced in arch 14.x */ + ptdev->gpu_info.l2_present = gpu_read64(ptdev, PWR_L2_PRESENT); + ptdev->gpu_info.tiler_present = gpu_read64(ptdev, PWR_TILER_PRESENT); + ptdev->gpu_info.shader_present = gpu_read64(ptdev, PWR_SHADER_PRESENT); + } else { + ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); + ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); + ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); + } } static void panthor_hw_info_init(struct panthor_device *ptdev) @@ -119,8 +174,50 @@ static void panthor_hw_info_init(struct panthor_device *ptdev) ptdev->gpu_info.tiler_present); } +static int panthor_hw_bind_device(struct panthor_device *ptdev) +{ + struct panthor_hw *hdev = NULL; + const u32 arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id); + int i = 0; + + for (i = 0; i < ARRAY_SIZE(panthor_hw_match); i++) { + struct panthor_hw_entry *entry = &panthor_hw_match[i]; + + if (arch_major >= entry->arch_min && arch_major <= entry->arch_max) { + hdev = entry->hwdev; + break; + } + } + + if (!hdev) + return -EOPNOTSUPP; + + ptdev->hw = hdev; + + return 0; +} + +static int panthor_hw_gpu_id_init(struct panthor_device *ptdev) +{ + ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); + if (!ptdev->gpu_info.gpu_id) + return -ENXIO; + + return 0; +} + int panthor_hw_init(struct panthor_device *ptdev) { + int ret = 0; + + ret = panthor_hw_gpu_id_init(ptdev); + if (ret) + return ret; + + ret = panthor_hw_bind_device(ptdev); + if (ret) + return ret; + panthor_hw_info_init(ptdev); return 0; diff --git a/drivers/gpu/drm/panthor/panthor_hw.h b/drivers/gpu/drm/panthor/panthor_hw.h index 0af6acc6aa6a..56c68c1e9c26 100644 --- a/drivers/gpu/drm/panthor/panthor_hw.h +++ b/drivers/gpu/drm/panthor/panthor_hw.h @@ -4,8 +4,53 @@ #ifndef __PANTHOR_HW_H__ #define __PANTHOR_HW_H__ -struct panthor_device; +#include "panthor_device.h" +#include "panthor_regs.h" + +/** + * struct panthor_hw_ops - HW operations that are specific to a GPU + */ +struct panthor_hw_ops { + /** @soft_reset: Soft reset function pointer */ + int (*soft_reset)(struct panthor_device *ptdev); + + /** @l2_power_off: L2 power off function pointer */ + void (*l2_power_off)(struct panthor_device *ptdev); + + /** @l2_power_on: L2 power on function pointer */ + int (*l2_power_on)(struct panthor_device *ptdev); +}; + +/** + * struct panthor_hw - GPU specific register mapping and functions + */ +struct panthor_hw { + /** @features: Bitmap containing panthor_hw_feature */ + + /** @ops: Panthor HW specific operations */ + struct panthor_hw_ops ops; +}; int panthor_hw_init(struct panthor_device *ptdev); +static inline int panthor_hw_soft_reset(struct panthor_device *ptdev) +{ + return ptdev->hw->ops.soft_reset(ptdev); +} + +static inline int panthor_hw_l2_power_on(struct panthor_device *ptdev) +{ + return ptdev->hw->ops.l2_power_on(ptdev); +} + +static inline void panthor_hw_l2_power_off(struct panthor_device *ptdev) +{ + ptdev->hw->ops.l2_power_off(ptdev); +} + +static inline bool panthor_hw_has_pwr_ctrl(struct panthor_device *ptdev) +{ + return GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) >= 14; +} + #endif /* __PANTHOR_HW_H__ */ diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 478ea98db95c..d4839d282689 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -904,10 +904,9 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) { struct panthor_device *ptdev = vm->ptdev; struct io_pgtable_ops *ops = vm->pgtbl_ops; + u64 start_iova = iova; u64 offset = 0; - drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); - while (offset < size) { size_t unmapped_sz = 0, pgcount; size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount); @@ -922,6 +921,12 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) panthor_vm_flush_range(vm, iova, offset + unmapped_sz); return -EINVAL; } + + drm_dbg(&ptdev->base, + "unmap: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pgcnt=%zu, pgsz=%zu", + vm->as.id, start_iova, size, iova + offset, + unmapped_sz / pgsize, pgsize); + offset += unmapped_sz; } @@ -937,6 +942,7 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, struct scatterlist *sgl; struct io_pgtable_ops *ops = vm->pgtbl_ops; u64 start_iova = iova; + u64 start_size = size; int ret; if (!size) @@ -956,15 +962,18 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, len = min_t(size_t, len, size); size -= len; - drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx", - vm->as.id, iova, &paddr, len); - while (len) { size_t pgcount, mapped = 0; size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_KERNEL, &mapped); + + drm_dbg(&ptdev->base, + "map: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pa=%pad, pgcnt=%zu, pgsz=%zu", + vm->as.id, start_iova, start_size, iova, &paddr, + mapped / pgsize, pgsize); + iova += mapped; paddr += mapped; len -= mapped; diff --git a/drivers/gpu/drm/panthor/panthor_pwr.c b/drivers/gpu/drm/panthor/panthor_pwr.c new file mode 100644 index 000000000000..57cfc7ce715b --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_pwr.c @@ -0,0 +1,549 @@ +// SPDX-License-Identifier: GPL-2.0 or MIT +/* Copyright 2025 ARM Limited. All rights reserved. */ + +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/cleanup.h> +#include <linux/iopoll.h> +#include <linux/wait.h> + +#include <drm/drm_managed.h> +#include <drm/drm_print.h> + +#include "panthor_device.h" +#include "panthor_hw.h" +#include "panthor_pwr.h" +#include "panthor_regs.h" + +#define PWR_INTERRUPTS_MASK \ + (PWR_IRQ_POWER_CHANGED_SINGLE | \ + PWR_IRQ_POWER_CHANGED_ALL | \ + PWR_IRQ_DELEGATION_CHANGED | \ + PWR_IRQ_RESET_COMPLETED | \ + PWR_IRQ_RETRACT_COMPLETED | \ + PWR_IRQ_INSPECT_COMPLETED | \ + PWR_IRQ_COMMAND_NOT_ALLOWED | \ + PWR_IRQ_COMMAND_INVALID) + +#define PWR_ALL_CORES_MASK GENMASK_U64(63, 0) + +#define PWR_DOMAIN_MAX_BITS 16 + +#define PWR_TRANSITION_TIMEOUT_US (2ULL * USEC_PER_SEC) + +#define PWR_RETRACT_TIMEOUT_US (2ULL * USEC_PER_MSEC) + +#define PWR_RESET_TIMEOUT_MS 500 + +/** + * struct panthor_pwr - PWR_CONTROL block management data. + */ +struct panthor_pwr { + /** @irq: PWR irq. */ + struct panthor_irq irq; + + /** @reqs_lock: Lock protecting access to pending_reqs. */ + spinlock_t reqs_lock; + + /** @pending_reqs: Pending PWR requests. */ + u32 pending_reqs; + + /** @reqs_acked: PWR request wait queue. */ + wait_queue_head_t reqs_acked; +}; + +static void panthor_pwr_irq_handler(struct panthor_device *ptdev, u32 status) +{ + spin_lock(&ptdev->pwr->reqs_lock); + gpu_write(ptdev, PWR_INT_CLEAR, status); + + if (unlikely(status & PWR_IRQ_COMMAND_NOT_ALLOWED)) + drm_err(&ptdev->base, "PWR_IRQ: COMMAND_NOT_ALLOWED"); + + if (unlikely(status & PWR_IRQ_COMMAND_INVALID)) + drm_err(&ptdev->base, "PWR_IRQ: COMMAND_INVALID"); + + if (status & ptdev->pwr->pending_reqs) { + ptdev->pwr->pending_reqs &= ~status; + wake_up_all(&ptdev->pwr->reqs_acked); + } + spin_unlock(&ptdev->pwr->reqs_lock); +} +PANTHOR_IRQ_HANDLER(pwr, PWR, panthor_pwr_irq_handler); + +static void panthor_pwr_write_command(struct panthor_device *ptdev, u32 command, u64 args) +{ + if (args) + gpu_write64(ptdev, PWR_CMDARG, args); + + gpu_write(ptdev, PWR_COMMAND, command); +} + +static bool reset_irq_raised(struct panthor_device *ptdev) +{ + return gpu_read(ptdev, PWR_INT_RAWSTAT) & PWR_IRQ_RESET_COMPLETED; +} + +static bool reset_pending(struct panthor_device *ptdev) +{ + return (ptdev->pwr->pending_reqs & PWR_IRQ_RESET_COMPLETED); +} + +static int panthor_pwr_reset(struct panthor_device *ptdev, u32 reset_cmd) +{ + scoped_guard(spinlock_irqsave, &ptdev->pwr->reqs_lock) { + if (reset_pending(ptdev)) { + drm_WARN(&ptdev->base, 1, "Reset already pending"); + } else { + ptdev->pwr->pending_reqs |= PWR_IRQ_RESET_COMPLETED; + gpu_write(ptdev, PWR_INT_CLEAR, PWR_IRQ_RESET_COMPLETED); + panthor_pwr_write_command(ptdev, reset_cmd, 0); + } + } + + if (!wait_event_timeout(ptdev->pwr->reqs_acked, !reset_pending(ptdev), + msecs_to_jiffies(PWR_RESET_TIMEOUT_MS))) { + guard(spinlock_irqsave)(&ptdev->pwr->reqs_lock); + + if (reset_pending(ptdev) && !reset_irq_raised(ptdev)) { + drm_err(&ptdev->base, "RESET timed out (0x%x)", reset_cmd); + return -ETIMEDOUT; + } + + ptdev->pwr->pending_reqs &= ~PWR_IRQ_RESET_COMPLETED; + } + + return 0; +} + +static const char *get_domain_name(u8 domain) +{ + switch (domain) { + case PWR_COMMAND_DOMAIN_L2: + return "L2"; + case PWR_COMMAND_DOMAIN_TILER: + return "Tiler"; + case PWR_COMMAND_DOMAIN_SHADER: + return "Shader"; + case PWR_COMMAND_DOMAIN_BASE: + return "Base"; + case PWR_COMMAND_DOMAIN_STACK: + return "Stack"; + } + return "Unknown"; +} + +static u32 get_domain_base(u8 domain) +{ + switch (domain) { + case PWR_COMMAND_DOMAIN_L2: + return PWR_L2_PRESENT; + case PWR_COMMAND_DOMAIN_TILER: + return PWR_TILER_PRESENT; + case PWR_COMMAND_DOMAIN_SHADER: + return PWR_SHADER_PRESENT; + case PWR_COMMAND_DOMAIN_BASE: + return PWR_BASE_PRESENT; + case PWR_COMMAND_DOMAIN_STACK: + return PWR_STACK_PRESENT; + } + return 0; +} + +static u32 get_domain_ready_reg(u32 domain) +{ + return get_domain_base(domain) + (PWR_L2_READY - PWR_L2_PRESENT); +} + +static u32 get_domain_pwrtrans_reg(u32 domain) +{ + return get_domain_base(domain) + (PWR_L2_PWRTRANS - PWR_L2_PRESENT); +} + +static bool is_valid_domain(u32 domain) +{ + return get_domain_base(domain) != 0; +} + +static bool has_rtu(struct panthor_device *ptdev) +{ + return ptdev->gpu_info.gpu_features & GPU_FEATURES_RAY_TRAVERSAL; +} + +static u8 get_domain_subdomain(struct panthor_device *ptdev, u32 domain) +{ + if (domain == PWR_COMMAND_DOMAIN_SHADER && has_rtu(ptdev)) + return PWR_COMMAND_SUBDOMAIN_RTU; + + return 0; +} + +static int panthor_pwr_domain_wait_transition(struct panthor_device *ptdev, u32 domain, + u32 timeout_us) +{ + u32 pwrtrans_reg = get_domain_pwrtrans_reg(domain); + u64 val; + int ret = 0; + + ret = gpu_read64_poll_timeout(ptdev, pwrtrans_reg, val, !(PWR_ALL_CORES_MASK & val), 100, + timeout_us); + if (ret) { + drm_err(&ptdev->base, "%s domain power in transition, pwrtrans(0x%llx)", + get_domain_name(domain), val); + return ret; + } + + return 0; +} + +static void panthor_pwr_debug_info_show(struct panthor_device *ptdev) +{ + drm_info(&ptdev->base, "GPU_FEATURES: 0x%016llx", gpu_read64(ptdev, GPU_FEATURES)); + drm_info(&ptdev->base, "PWR_STATUS: 0x%016llx", gpu_read64(ptdev, PWR_STATUS)); + drm_info(&ptdev->base, "L2_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_L2_PRESENT)); + drm_info(&ptdev->base, "L2_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_L2_PWRTRANS)); + drm_info(&ptdev->base, "L2_READY: 0x%016llx", gpu_read64(ptdev, PWR_L2_READY)); + drm_info(&ptdev->base, "TILER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PRESENT)); + drm_info(&ptdev->base, "TILER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PWRTRANS)); + drm_info(&ptdev->base, "TILER_READY: 0x%016llx", gpu_read64(ptdev, PWR_TILER_READY)); + drm_info(&ptdev->base, "SHADER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PRESENT)); + drm_info(&ptdev->base, "SHADER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PWRTRANS)); + drm_info(&ptdev->base, "SHADER_READY: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_READY)); +} + +static int panthor_pwr_domain_transition(struct panthor_device *ptdev, u32 cmd, u32 domain, + u64 mask, u32 timeout_us) +{ + u32 ready_reg = get_domain_ready_reg(domain); + u32 pwr_cmd = PWR_COMMAND_DEF(cmd, domain, get_domain_subdomain(ptdev, domain)); + u64 expected_val = 0; + u64 val; + int ret = 0; + + if (drm_WARN_ON(&ptdev->base, !is_valid_domain(domain))) + return -EINVAL; + + switch (cmd) { + case PWR_COMMAND_POWER_DOWN: + expected_val = 0; + break; + case PWR_COMMAND_POWER_UP: + expected_val = mask; + break; + default: + drm_err(&ptdev->base, "Invalid power domain transition command (0x%x)", cmd); + return -EINVAL; + } + + ret = panthor_pwr_domain_wait_transition(ptdev, domain, timeout_us); + if (ret) + return ret; + + /* domain already in target state, return early */ + if ((gpu_read64(ptdev, ready_reg) & mask) == expected_val) + return 0; + + panthor_pwr_write_command(ptdev, pwr_cmd, mask); + + ret = gpu_read64_poll_timeout(ptdev, ready_reg, val, (mask & val) == expected_val, 100, + timeout_us); + if (ret) { + drm_err(&ptdev->base, + "timeout waiting on %s power domain transition, cmd(0x%x), arg(0x%llx)", + get_domain_name(domain), pwr_cmd, mask); + panthor_pwr_debug_info_show(ptdev); + return ret; + } + + return 0; +} + +#define panthor_pwr_domain_power_off(__ptdev, __domain, __mask, __timeout_us) \ + panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_DOWN, __domain, __mask, \ + __timeout_us) + +#define panthor_pwr_domain_power_on(__ptdev, __domain, __mask, __timeout_us) \ + panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_UP, __domain, __mask, __timeout_us) + +/** + * retract_domain() - Retract control of a domain from MCU + * @ptdev: Device. + * @domain: Domain to retract the control + * + * Retracting L2 domain is not expected since it won't be delegated. + * + * Return: 0 on success or retracted already. + * -EPERM if domain is L2. + * A negative error code otherwise. + */ +static int retract_domain(struct panthor_device *ptdev, u32 domain) +{ + const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_RETRACT, domain, 0); + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); + const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain); + const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain); + u64 val; + int ret; + + if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2)) + return -EPERM; + + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, !(PWR_STATUS_RETRACT_PENDING & val), + 0, PWR_RETRACT_TIMEOUT_US); + if (ret) { + drm_err(&ptdev->base, "%s domain retract pending", get_domain_name(domain)); + return ret; + } + + if (!(pwr_status & delegated_mask)) { + drm_dbg(&ptdev->base, "%s domain already retracted", get_domain_name(domain)); + return 0; + } + + panthor_pwr_write_command(ptdev, pwr_cmd, 0); + + /* + * On successful retraction + * allow-flag will be set with delegated-flag being cleared. + */ + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, + ((delegated_mask | allow_mask) & val) == allow_mask, 10, + PWR_TRANSITION_TIMEOUT_US); + if (ret) { + drm_err(&ptdev->base, "Retracting %s domain timeout, cmd(0x%x)", + get_domain_name(domain), pwr_cmd); + return ret; + } + + return 0; +} + +/** + * delegate_domain() - Delegate control of a domain to MCU + * @ptdev: Device. + * @domain: Domain to delegate the control + * + * Delegating L2 domain is prohibited. + * + * Return: + * * 0 on success or delegated already. + * * -EPERM if domain is L2. + * * A negative error code otherwise. + */ +static int delegate_domain(struct panthor_device *ptdev, u32 domain) +{ + const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_DELEGATE, domain, 0); + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); + const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain); + const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain); + u64 val; + int ret; + + if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2)) + return -EPERM; + + /* Already delegated, exit early */ + if (pwr_status & delegated_mask) + return 0; + + /* Check if the command is allowed before delegating. */ + if (!(pwr_status & allow_mask)) { + drm_warn(&ptdev->base, "Delegating %s domain not allowed", get_domain_name(domain)); + return -EPERM; + } + + ret = panthor_pwr_domain_wait_transition(ptdev, domain, PWR_TRANSITION_TIMEOUT_US); + if (ret) + return ret; + + panthor_pwr_write_command(ptdev, pwr_cmd, 0); + + /* + * On successful delegation + * allow-flag will be cleared with delegated-flag being set. + */ + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, + ((delegated_mask | allow_mask) & val) == delegated_mask, + 10, PWR_TRANSITION_TIMEOUT_US); + if (ret) { + drm_err(&ptdev->base, "Delegating %s domain timeout, cmd(0x%x)", + get_domain_name(domain), pwr_cmd); + return ret; + } + + return 0; +} + +static int panthor_pwr_delegate_domains(struct panthor_device *ptdev) +{ + int ret; + + if (!ptdev->pwr) + return 0; + + ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER); + if (ret) + return ret; + + ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_TILER); + if (ret) + goto err_retract_shader; + + return 0; + +err_retract_shader: + retract_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER); + + return ret; +} + +/** + * panthor_pwr_domain_force_off - Forcefully power down a domain. + * @ptdev: Device. + * @domain: Domain to forcefully power down. + * + * This function will attempt to retract and power off the requested power + * domain. However, if retraction fails, the operation is aborted. If power off + * fails, the domain will remain retracted and under the host control. + * + * Return: 0 on success or a negative error code on failure. + */ +static int panthor_pwr_domain_force_off(struct panthor_device *ptdev, u32 domain) +{ + const u64 domain_ready = gpu_read64(ptdev, get_domain_ready_reg(domain)); + int ret; + + /* Domain already powered down, early exit. */ + if (!domain_ready) + return 0; + + /* Domain has to be in host control to issue power off command. */ + ret = retract_domain(ptdev, domain); + if (ret) + return ret; + + return panthor_pwr_domain_power_off(ptdev, domain, domain_ready, PWR_TRANSITION_TIMEOUT_US); +} + +void panthor_pwr_unplug(struct panthor_device *ptdev) +{ + unsigned long flags; + + if (!ptdev->pwr) + return; + + /* Make sure the IRQ handler is not running after that point. */ + panthor_pwr_irq_suspend(&ptdev->pwr->irq); + + /* Wake-up all waiters. */ + spin_lock_irqsave(&ptdev->pwr->reqs_lock, flags); + ptdev->pwr->pending_reqs = 0; + wake_up_all(&ptdev->pwr->reqs_acked); + spin_unlock_irqrestore(&ptdev->pwr->reqs_lock, flags); +} + +int panthor_pwr_init(struct panthor_device *ptdev) +{ + struct panthor_pwr *pwr; + int err, irq; + + if (!panthor_hw_has_pwr_ctrl(ptdev)) + return 0; + + pwr = drmm_kzalloc(&ptdev->base, sizeof(*pwr), GFP_KERNEL); + if (!pwr) + return -ENOMEM; + + spin_lock_init(&pwr->reqs_lock); + init_waitqueue_head(&pwr->reqs_acked); + ptdev->pwr = pwr; + + irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu"); + if (irq < 0) + return irq; + + err = panthor_request_pwr_irq(ptdev, &pwr->irq, irq, PWR_INTERRUPTS_MASK); + if (err) + return err; + + return 0; +} + +int panthor_pwr_reset_soft(struct panthor_device *ptdev) +{ + if (!(gpu_read64(ptdev, PWR_STATUS) & PWR_STATUS_ALLOW_SOFT_RESET)) { + drm_err(&ptdev->base, "RESET_SOFT not allowed"); + return -EOPNOTSUPP; + } + + return panthor_pwr_reset(ptdev, PWR_COMMAND_RESET_SOFT); +} + +void panthor_pwr_l2_power_off(struct panthor_device *ptdev) +{ + const u64 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2); + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); + + /* Abort if L2 power off constraints are not satisfied */ + if (!(pwr_status & l2_allow_mask)) { + drm_warn(&ptdev->base, "Power off L2 domain not allowed"); + return; + } + + /* It is expected that when halting the MCU, it would power down its + * delegated domains. However, an unresponsive or hung MCU may not do + * so, which is why we need to check and retract the domains back into + * host control to be powered down in the right order before powering + * down the L2. + */ + if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_TILER)) + return; + + if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_SHADER)) + return; + + panthor_pwr_domain_power_off(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present, + PWR_TRANSITION_TIMEOUT_US); +} + +int panthor_pwr_l2_power_on(struct panthor_device *ptdev) +{ + const u32 pwr_status = gpu_read64(ptdev, PWR_STATUS); + const u32 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2); + int ret; + + if ((pwr_status & l2_allow_mask) == 0) { + drm_warn(&ptdev->base, "Power on L2 domain not allowed"); + return -EPERM; + } + + ret = panthor_pwr_domain_power_on(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present, + PWR_TRANSITION_TIMEOUT_US); + if (ret) + return ret; + + /* Delegate control of the shader and tiler power domains to the MCU as + * it can better manage which shader/tiler cores need to be powered up + * or can be powered down based on currently running jobs. + * + * If the shader and tiler domains are already delegated to the MCU, + * this call would just return early. + */ + return panthor_pwr_delegate_domains(ptdev); +} + +void panthor_pwr_suspend(struct panthor_device *ptdev) +{ + if (!ptdev->pwr) + return; + + panthor_pwr_irq_suspend(&ptdev->pwr->irq); +} + +void panthor_pwr_resume(struct panthor_device *ptdev) +{ + if (!ptdev->pwr) + return; + + panthor_pwr_irq_resume(&ptdev->pwr->irq, PWR_INTERRUPTS_MASK); +} diff --git a/drivers/gpu/drm/panthor/panthor_pwr.h b/drivers/gpu/drm/panthor/panthor_pwr.h new file mode 100644 index 000000000000..adf1f6136abc --- /dev/null +++ b/drivers/gpu/drm/panthor/panthor_pwr.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ +/* Copyright 2025 ARM Limited. All rights reserved. */ + +#ifndef __PANTHOR_PWR_H__ +#define __PANTHOR_PWR_H__ + +struct panthor_device; + +void panthor_pwr_unplug(struct panthor_device *ptdev); + +int panthor_pwr_init(struct panthor_device *ptdev); + +int panthor_pwr_reset_soft(struct panthor_device *ptdev); + +void panthor_pwr_l2_power_off(struct panthor_device *ptdev); + +int panthor_pwr_l2_power_on(struct panthor_device *ptdev); + +void panthor_pwr_suspend(struct panthor_device *ptdev); + +void panthor_pwr_resume(struct panthor_device *ptdev); + +#endif /* __PANTHOR_PWR_H__ */ diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h index 8fa69f33e911..08bf06c452d6 100644 --- a/drivers/gpu/drm/panthor/panthor_regs.h +++ b/drivers/gpu/drm/panthor/panthor_regs.h @@ -74,6 +74,7 @@ #define GPU_FEATURES 0x60 #define GPU_FEATURES_RAY_INTERSECTION BIT(2) +#define GPU_FEATURES_RAY_TRAVERSAL BIT(5) #define GPU_TIMESTAMP_OFFSET 0x88 #define GPU_CYCLE_COUNT 0x90 @@ -209,4 +210,82 @@ #define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000)) #define CSF_GLB_DOORBELL_ID 0 +/* PWR Control registers */ + +#define PWR_CONTROL_BASE 0x800 +#define PWR_CTRL_REG(x) (PWR_CONTROL_BASE + (x)) + +#define PWR_INT_RAWSTAT PWR_CTRL_REG(0x0) +#define PWR_INT_CLEAR PWR_CTRL_REG(0x4) +#define PWR_INT_MASK PWR_CTRL_REG(0x8) +#define PWR_INT_STAT PWR_CTRL_REG(0xc) +#define PWR_IRQ_POWER_CHANGED_SINGLE BIT(0) +#define PWR_IRQ_POWER_CHANGED_ALL BIT(1) +#define PWR_IRQ_DELEGATION_CHANGED BIT(2) +#define PWR_IRQ_RESET_COMPLETED BIT(3) +#define PWR_IRQ_RETRACT_COMPLETED BIT(4) +#define PWR_IRQ_INSPECT_COMPLETED BIT(5) +#define PWR_IRQ_COMMAND_NOT_ALLOWED BIT(30) +#define PWR_IRQ_COMMAND_INVALID BIT(31) + +#define PWR_STATUS PWR_CTRL_REG(0x20) +#define PWR_STATUS_ALLOW_L2 BIT_U64(0) +#define PWR_STATUS_ALLOW_TILER BIT_U64(1) +#define PWR_STATUS_ALLOW_SHADER BIT_U64(8) +#define PWR_STATUS_ALLOW_BASE BIT_U64(14) +#define PWR_STATUS_ALLOW_STACK BIT_U64(15) +#define PWR_STATUS_DOMAIN_ALLOWED(x) BIT_U64(x) +#define PWR_STATUS_DELEGATED_L2 BIT_U64(16) +#define PWR_STATUS_DELEGATED_TILER BIT_U64(17) +#define PWR_STATUS_DELEGATED_SHADER BIT_U64(24) +#define PWR_STATUS_DELEGATED_BASE BIT_U64(30) +#define PWR_STATUS_DELEGATED_STACK BIT_U64(31) +#define PWR_STATUS_DELEGATED_SHIFT 16 +#define PWR_STATUS_DOMAIN_DELEGATED(x) BIT_U64((x) + PWR_STATUS_DELEGATED_SHIFT) +#define PWR_STATUS_ALLOW_SOFT_RESET BIT_U64(33) +#define PWR_STATUS_ALLOW_FAST_RESET BIT_U64(34) +#define PWR_STATUS_POWER_PENDING BIT_U64(41) +#define PWR_STATUS_RESET_PENDING BIT_U64(42) +#define PWR_STATUS_RETRACT_PENDING BIT_U64(43) +#define PWR_STATUS_INSPECT_PENDING BIT_U64(44) + +#define PWR_COMMAND PWR_CTRL_REG(0x28) +#define PWR_COMMAND_POWER_UP 0x10 +#define PWR_COMMAND_POWER_DOWN 0x11 +#define PWR_COMMAND_DELEGATE 0x20 +#define PWR_COMMAND_RETRACT 0x21 +#define PWR_COMMAND_RESET_SOFT 0x31 +#define PWR_COMMAND_RESET_FAST 0x32 +#define PWR_COMMAND_INSPECT 0xF0 +#define PWR_COMMAND_DOMAIN_L2 0 +#define PWR_COMMAND_DOMAIN_TILER 1 +#define PWR_COMMAND_DOMAIN_SHADER 8 +#define PWR_COMMAND_DOMAIN_BASE 14 +#define PWR_COMMAND_DOMAIN_STACK 15 +#define PWR_COMMAND_SUBDOMAIN_RTU BIT(0) +#define PWR_COMMAND_DEF(cmd, domain, subdomain) \ + (((subdomain) << 16) | ((domain) << 8) | (cmd)) + +#define PWR_CMDARG PWR_CTRL_REG(0x30) + +#define PWR_L2_PRESENT PWR_CTRL_REG(0x100) +#define PWR_L2_READY PWR_CTRL_REG(0x108) +#define PWR_L2_PWRTRANS PWR_CTRL_REG(0x110) +#define PWR_L2_PWRACTIVE PWR_CTRL_REG(0x118) +#define PWR_TILER_PRESENT PWR_CTRL_REG(0x140) +#define PWR_TILER_READY PWR_CTRL_REG(0x148) +#define PWR_TILER_PWRTRANS PWR_CTRL_REG(0x150) +#define PWR_TILER_PWRACTIVE PWR_CTRL_REG(0x158) +#define PWR_SHADER_PRESENT PWR_CTRL_REG(0x200) +#define PWR_SHADER_READY PWR_CTRL_REG(0x208) +#define PWR_SHADER_PWRTRANS PWR_CTRL_REG(0x210) +#define PWR_SHADER_PWRACTIVE PWR_CTRL_REG(0x218) +#define PWR_BASE_PRESENT PWR_CTRL_REG(0x380) +#define PWR_BASE_READY PWR_CTRL_REG(0x388) +#define PWR_BASE_PWRTRANS PWR_CTRL_REG(0x390) +#define PWR_BASE_PWRACTIVE PWR_CTRL_REG(0x398) +#define PWR_STACK_PRESENT PWR_CTRL_REG(0x3c0) +#define PWR_STACK_READY PWR_CTRL_REG(0x3c8) +#define PWR_STACK_PWRTRANS PWR_CTRL_REG(0x3d0) + #endif diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index e74ca071159d..b834123a6560 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -364,17 +364,20 @@ struct panthor_queue { /** @name: DRM scheduler name for this queue. */ char *name; - /** - * @remaining_time: Time remaining before the job timeout expires. - * - * The job timeout is suspended when the queue is not scheduled by the - * FW. Every time we suspend the timer, we need to save the remaining - * time so we can restore it later on. - */ - unsigned long remaining_time; + /** @timeout: Queue timeout related fields. */ + struct { + /** @timeout.work: Work executed when a queue timeout occurs. */ + struct delayed_work work; - /** @timeout_suspended: True if the job timeout was suspended. */ - bool timeout_suspended; + /** + * @timeout.remaining: Time remaining before a queue timeout. + * + * When the timer is running, this value is set to MAX_SCHEDULE_TIMEOUT. + * When the timer is suspended, it's set to the time remaining when the + * timer was suspended. + */ + unsigned long remaining; + } timeout; /** * @doorbell_id: Doorbell assigned to this queue. @@ -899,6 +902,10 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * if (IS_ERR_OR_NULL(queue)) return; + /* This should have been disabled before that point. */ + drm_WARN_ON(&group->ptdev->base, + disable_delayed_work_sync(&queue->timeout.work)); + if (queue->entity.fence_context) drm_sched_entity_destroy(&queue->entity); @@ -1046,6 +1053,115 @@ group_unbind_locked(struct panthor_group *group) return 0; } +static bool +group_is_idle(struct panthor_group *group) +{ + struct panthor_device *ptdev = group->ptdev; + u32 inactive_queues; + + if (group->csg_id >= 0) + return ptdev->scheduler->csg_slots[group->csg_id].idle; + + inactive_queues = group->idle_queues | group->blocked_queues; + return hweight32(inactive_queues) == group->queue_count; +} + +static void +queue_reset_timeout_locked(struct panthor_queue *queue) +{ + lockdep_assert_held(&queue->fence_ctx.lock); + + if (queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT) { + mod_delayed_work(queue->scheduler.timeout_wq, + &queue->timeout.work, + msecs_to_jiffies(JOB_TIMEOUT_MS)); + } +} + +static bool +group_can_run(struct panthor_group *group) +{ + return group->state != PANTHOR_CS_GROUP_TERMINATED && + group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && + !group->destroyed && group->fatal_queues == 0 && + !group->timedout; +} + +static bool +queue_timeout_is_suspended(struct panthor_queue *queue) +{ + /* When running, the remaining time is set to MAX_SCHEDULE_TIMEOUT. */ + return queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT; +} + +static void +queue_suspend_timeout_locked(struct panthor_queue *queue) +{ + unsigned long qtimeout, now; + struct panthor_group *group; + struct panthor_job *job; + bool timer_was_active; + + lockdep_assert_held(&queue->fence_ctx.lock); + + /* Already suspended, nothing to do. */ + if (queue_timeout_is_suspended(queue)) + return; + + job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs, + struct panthor_job, node); + group = job ? job->group : NULL; + + /* If the queue is blocked and the group is idle, we want the timer to + * keep running because the group can't be unblocked by other queues, + * so it has to come from an external source, and we want to timebox + * this external signalling. + */ + if (group && group_can_run(group) && + (group->blocked_queues & BIT(job->queue_idx)) && + group_is_idle(group)) + return; + + now = jiffies; + qtimeout = queue->timeout.work.timer.expires; + + /* Cancel the timer. */ + timer_was_active = cancel_delayed_work(&queue->timeout.work); + if (!timer_was_active || !job) + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); + else if (time_after(qtimeout, now)) + queue->timeout.remaining = qtimeout - now; + else + queue->timeout.remaining = 0; + + if (WARN_ON_ONCE(queue->timeout.remaining > msecs_to_jiffies(JOB_TIMEOUT_MS))) + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); +} + +static void +queue_suspend_timeout(struct panthor_queue *queue) +{ + spin_lock(&queue->fence_ctx.lock); + queue_suspend_timeout_locked(queue); + spin_unlock(&queue->fence_ctx.lock); +} + +static void +queue_resume_timeout(struct panthor_queue *queue) +{ + spin_lock(&queue->fence_ctx.lock); + + if (queue_timeout_is_suspended(queue)) { + mod_delayed_work(queue->scheduler.timeout_wq, + &queue->timeout.work, + queue->timeout.remaining); + + queue->timeout.remaining = MAX_SCHEDULE_TIMEOUT; + } + + spin_unlock(&queue->fence_ctx.lock); +} + /** * cs_slot_prog_locked() - Program a queue slot * @ptdev: Device. @@ -1084,10 +1200,8 @@ cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) CS_IDLE_EMPTY | CS_STATE_MASK | CS_EXTRACT_EVENT); - if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) { - drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time); - queue->timeout_suspended = false; - } + if (queue->iface.input->insert != queue->iface.input->extract) + queue_resume_timeout(queue); } /** @@ -1114,14 +1228,7 @@ cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id) CS_STATE_STOP, CS_STATE_MASK); - /* If the queue is blocked, we want to keep the timeout running, so - * we can detect unbounded waits and kill the group when that happens. - */ - if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) { - queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); - queue->timeout_suspended = true; - WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS)); - } + queue_suspend_timeout(queue); return 0; } @@ -1140,11 +1247,13 @@ csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id) { struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; struct panthor_fw_csg_iface *csg_iface; + u64 endpoint_req; lockdep_assert_held(&ptdev->scheduler->lock); csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); - csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28; + endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface); + csg_slot->priority = CSG_EP_REQ_PRIORITY_GET(endpoint_req); } /** @@ -1304,6 +1413,7 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) struct panthor_csg_slot *csg_slot; struct panthor_group *group; u32 queue_mask = 0, i; + u64 endpoint_req; lockdep_assert_held(&ptdev->scheduler->lock); @@ -1330,10 +1440,12 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority) csg_iface->input->allow_compute = group->compute_core_mask; csg_iface->input->allow_fragment = group->fragment_core_mask; csg_iface->input->allow_other = group->tiler_core_mask; - csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | - CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | - CSG_EP_REQ_TILER(group->max_tiler_cores) | - CSG_EP_REQ_PRIORITY(priority); + endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | + CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | + CSG_EP_REQ_TILER(group->max_tiler_cores) | + CSG_EP_REQ_PRIORITY(priority); + panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req); + csg_iface->input->config = panthor_vm_as(group->vm); if (group->suspend_buf) @@ -1916,28 +2028,6 @@ tick_ctx_is_full(const struct panthor_scheduler *sched, return ctx->group_count == sched->csg_slot_count; } -static bool -group_is_idle(struct panthor_group *group) -{ - struct panthor_device *ptdev = group->ptdev; - u32 inactive_queues; - - if (group->csg_id >= 0) - return ptdev->scheduler->csg_slots[group->csg_id].idle; - - inactive_queues = group->idle_queues | group->blocked_queues; - return hweight32(inactive_queues) == group->queue_count; -} - -static bool -group_can_run(struct panthor_group *group) -{ - return group->state != PANTHOR_CS_GROUP_TERMINATED && - group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && - !group->destroyed && group->fatal_queues == 0 && - !group->timedout; -} - static void tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx, @@ -2231,9 +2321,9 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c continue; } - panthor_fw_update_reqs(csg_iface, endpoint_req, - CSG_EP_REQ_PRIORITY(new_csg_prio), - CSG_EP_REQ_PRIORITY_MASK); + panthor_fw_csg_endpoint_req_update(ptdev, csg_iface, + CSG_EP_REQ_PRIORITY(new_csg_prio), + CSG_EP_REQ_PRIORITY_MASK); csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, CSG_ENDPOINT_CONFIG); @@ -2619,6 +2709,7 @@ static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) static void queue_stop(struct panthor_queue *queue, struct panthor_job *bad_job) { + disable_delayed_work_sync(&queue->timeout.work); drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); } @@ -2630,6 +2721,7 @@ static void queue_start(struct panthor_queue *queue) list_for_each_entry(job, &queue->scheduler.pending_list, base.list) job->base.s_fence->parent = dma_fence_get(job->done_fence); + enable_delayed_work(&queue->timeout.work); drm_sched_start(&queue->scheduler, 0); } @@ -2696,7 +2788,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev) { struct panthor_scheduler *sched = ptdev->scheduler; struct panthor_csg_slots_upd_ctx upd_ctx; - struct panthor_group *group; u32 suspended_slots; u32 i; @@ -2750,13 +2841,23 @@ void panthor_sched_suspend(struct panthor_device *ptdev) while (slot_mask) { u32 csg_id = ffs(slot_mask) - 1; struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; + struct panthor_group *group = csg_slot->group; /* Terminate command timedout, but the soft-reset will * automatically terminate all active groups, so let's * force the state to halted here. */ - if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED) - csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; + if (group->state != PANTHOR_CS_GROUP_TERMINATED) { + group->state = PANTHOR_CS_GROUP_TERMINATED; + + /* Reset the queue slots manually if the termination + * request failed. + */ + for (i = 0; i < group->queue_count; i++) { + if (group->queues[i]) + cs_slot_reset_locked(ptdev, csg_id, i); + } + } slot_mask &= ~BIT(csg_id); } } @@ -2786,8 +2887,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev) for (i = 0; i < sched->csg_slot_count; i++) { struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; + struct panthor_group *group = csg_slot->group; - group = csg_slot->group; if (!group) continue; @@ -2916,35 +3017,47 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) xa_unlock(&gpool->xa); } -static void group_sync_upd_work(struct work_struct *work) +static bool queue_check_job_completion(struct panthor_queue *queue) { - struct panthor_group *group = - container_of(work, struct panthor_group, sync_upd_work); + struct panthor_syncobj_64b *syncobj = NULL; struct panthor_job *job, *job_tmp; + bool cookie, progress = false; LIST_HEAD(done_jobs); - u32 queue_idx; - bool cookie; cookie = dma_fence_begin_signalling(); - for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { - struct panthor_queue *queue = group->queues[queue_idx]; - struct panthor_syncobj_64b *syncobj; + spin_lock(&queue->fence_ctx.lock); + list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { + if (!syncobj) { + struct panthor_group *group = job->group; - if (!queue) - continue; + syncobj = group->syncobjs->kmap + + (job->queue_idx * sizeof(*syncobj)); + } - syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj)); + if (syncobj->seqno < job->done_fence->seqno) + break; - spin_lock(&queue->fence_ctx.lock); - list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { - if (syncobj->seqno < job->done_fence->seqno) - break; + list_move_tail(&job->node, &done_jobs); + dma_fence_signal_locked(job->done_fence); + } - list_move_tail(&job->node, &done_jobs); - dma_fence_signal_locked(job->done_fence); - } - spin_unlock(&queue->fence_ctx.lock); + if (list_empty(&queue->fence_ctx.in_flight_jobs)) { + /* If we have no job left, we cancel the timer, and reset remaining + * time to its default so it can be restarted next time + * queue_resume_timeout() is called. + */ + queue_suspend_timeout_locked(queue); + + /* If there's no job pending, we consider it progress to avoid a + * spurious timeout if the timeout handler and the sync update + * handler raced. + */ + progress = true; + } else if (!list_empty(&done_jobs)) { + queue_reset_timeout_locked(queue); + progress = true; } + spin_unlock(&queue->fence_ctx.lock); dma_fence_end_signalling(cookie); list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { @@ -2954,6 +3067,27 @@ static void group_sync_upd_work(struct work_struct *work) panthor_job_put(&job->base); } + return progress; +} + +static void group_sync_upd_work(struct work_struct *work) +{ + struct panthor_group *group = + container_of(work, struct panthor_group, sync_upd_work); + u32 queue_idx; + bool cookie; + + cookie = dma_fence_begin_signalling(); + for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { + struct panthor_queue *queue = group->queues[queue_idx]; + + if (!queue) + continue; + + queue_check_job_completion(queue); + } + dma_fence_end_signalling(cookie); + group_put(group); } @@ -3201,17 +3335,6 @@ queue_run_job(struct drm_sched_job *sched_job) queue->iface.input->insert = job->ringbuf.end; if (group->csg_id < 0) { - /* If the queue is blocked, we want to keep the timeout running, so we - * can detect unbounded waits and kill the group when that happens. - * Otherwise, we suspend the timeout so the time we spend waiting for - * a CSG slot is not counted. - */ - if (!(group->blocked_queues & BIT(job->queue_idx)) && - !queue->timeout_suspended) { - queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); - queue->timeout_suspended = true; - } - group_schedule_locked(group, BIT(job->queue_idx)); } else { gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); @@ -3220,6 +3343,7 @@ queue_run_job(struct drm_sched_job *sched_job) pm_runtime_get(ptdev->base.dev); sched->pm.has_ref = true; } + queue_resume_timeout(queue); panthor_devfreq_record_busy(sched->ptdev); } @@ -3269,7 +3393,6 @@ queue_timedout_job(struct drm_sched_job *sched_job) mutex_unlock(&sched->lock); queue_start(queue); - return DRM_GPU_SCHED_STAT_RESET; } @@ -3312,6 +3435,17 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev, return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64)); } +static void queue_timeout_work(struct work_struct *work) +{ + struct panthor_queue *queue = container_of(work, struct panthor_queue, + timeout.work.work); + bool progress; + + progress = queue_check_job_completion(queue); + if (!progress) + drm_sched_fault(&queue->scheduler); +} + static struct panthor_queue * group_create_queue(struct panthor_group *group, const struct drm_panthor_queue_create *args, @@ -3328,7 +3462,7 @@ group_create_queue(struct panthor_group *group, * their profiling status. */ .credit_limit = args->ringbuf_size / sizeof(u64), - .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), + .timeout = MAX_SCHEDULE_TIMEOUT, .timeout_wq = group->ptdev->reset.wq, .dev = group->ptdev->base.dev, }; @@ -3350,6 +3484,8 @@ group_create_queue(struct panthor_group *group, if (!queue) return ERR_PTR(-ENOMEM); + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); + INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work); queue->fence_ctx.id = dma_fence_context_alloc(1); spin_lock_init(&queue->fence_ctx.lock); INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs); diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index c2cfe2d7915f..fd083aaa91bb 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -202,7 +202,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct radeon_device *rdev = fb_helper->dev->dev_private; const struct drm_format_info *format_info; struct drm_mode_fb_cmd2 mode_cmd = { }; - struct fb_info *info; + struct fb_info *info = fb_helper->info; struct drm_gem_object *gobj; struct radeon_bo *rbo; struct drm_framebuffer *fb; @@ -243,13 +243,6 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, fb_helper->funcs = &radeon_fbdev_fb_helper_funcs; fb_helper->fb = fb; - /* okay we have an object now allocate the framebuffer */ - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_framebuffer_unregister_private; - } - info->fbops = &radeon_fbdev_fb_ops; /* radeon resume is fragile and needs a vt switch to help it along */ @@ -275,10 +268,6 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, return 0; -err_drm_framebuffer_unregister_private: - fb_helper->fb = NULL; - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); err_kfree: kfree(fb); err_radeon_fbdev_destroy_pinned_object: diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index d30fef00c34c..fdab71d51e2a 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -331,32 +331,29 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp) struct device_node *np = dev->of_node; dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); - if (IS_ERR(dp->grf)) { - DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n"); - return PTR_ERR(dp->grf); - } + if (IS_ERR(dp->grf)) + return dev_err_probe(dev, PTR_ERR(dp->grf), + "failed to get rockchip,grf property\n"); dp->grfclk = devm_clk_get_optional(dev, "grf"); if (IS_ERR(dp->grfclk)) - return dev_err_probe(dev, PTR_ERR(dp->grfclk), "failed to get grf clock\n"); + return dev_err_probe(dev, PTR_ERR(dp->grfclk), + "failed to get grf clock\n"); dp->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(dp->pclk)) { - DRM_DEV_ERROR(dev, "failed to get pclk property\n"); - return PTR_ERR(dp->pclk); - } + if (IS_ERR(dp->pclk)) + return dev_err_probe(dev, PTR_ERR(dp->pclk), + "failed to get pclk property\n"); dp->rst = devm_reset_control_get(dev, "dp"); - if (IS_ERR(dp->rst)) { - DRM_DEV_ERROR(dev, "failed to get dp reset control\n"); - return PTR_ERR(dp->rst); - } + if (IS_ERR(dp->rst)) + return dev_err_probe(dev, PTR_ERR(dp->rst), + "failed to get dp reset control\n"); dp->apbrst = devm_reset_control_get_optional(dev, "apb"); - if (IS_ERR(dp->apbrst)) { - DRM_DEV_ERROR(dev, "failed to get apb reset control\n"); - return PTR_ERR(dp->apbrst); - } + if (IS_ERR(dp->apbrst)) + return dev_err_probe(dev, PTR_ERR(dp->apbrst), + "failed to get apb reset control\n"); return 0; } diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c index 931343b072ad..c9fe6aa3e3e3 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> +#include <linux/phy/phy-hdmi.h> #include <linux/regmap.h> #include <linux/workqueue.h> @@ -38,21 +39,16 @@ #define RK3576_HDMI_HDCP14_MEM_EN BIT(15) #define RK3576_VO0_GRF_SOC_CON8 0x0020 -#define RK3576_COLOR_FORMAT_MASK (0xf << 4) -#define RK3576_COLOR_DEPTH_MASK (0xf << 8) -#define RK3576_RGB (0 << 4) -#define RK3576_YUV422 (0x1 << 4) -#define RK3576_YUV444 (0x2 << 4) -#define RK3576_YUV420 (0x3 << 4) -#define RK3576_8BPC (0x0 << 8) -#define RK3576_10BPC (0x6 << 8) +#define RK3576_COLOR_DEPTH_MASK GENMASK(11, 8) +#define RK3576_8BPC 0x0 +#define RK3576_10BPC 0x6 +#define RK3576_COLOR_FORMAT_MASK GENMASK(7, 4) +#define RK3576_RGB 0x9 +#define RK3576_YUV422 0x1 +#define RK3576_YUV444 0x2 +#define RK3576_YUV420 0x3 #define RK3576_CECIN_MASK BIT(3) -#define RK3576_VO0_GRF_SOC_CON12 0x0030 -#define RK3576_GRF_OSDA_DLYN (0xf << 12) -#define RK3576_GRF_OSDA_DIV (0x7f << 1) -#define RK3576_GRF_OSDA_DLY_EN BIT(0) - #define RK3576_VO0_GRF_SOC_CON14 0x0038 #define RK3576_I2S_SEL_MASK BIT(0) #define RK3576_SPDIF_SEL_MASK BIT(1) @@ -74,6 +70,12 @@ #define RK3588_HDMI1_LEVEL_INT BIT(24) #define RK3588_GRF_VO1_CON3 0x000c #define RK3588_GRF_VO1_CON6 0x0018 +#define RK3588_COLOR_DEPTH_MASK GENMASK(7, 4) +#define RK3588_8BPC 0x0 +#define RK3588_10BPC 0x6 +#define RK3588_COLOR_FORMAT_MASK GENMASK(3, 0) +#define RK3588_RGB 0x0 +#define RK3588_YUV420 0x3 #define RK3588_SCLIN_MASK BIT(9) #define RK3588_SDAIN_MASK BIT(10) #define RK3588_MODE_MASK BIT(11) @@ -92,14 +94,16 @@ struct rockchip_hdmi_qp { struct rockchip_encoder encoder; struct dw_hdmi_qp *hdmi; struct phy *phy; - struct gpio_desc *enable_gpio; + struct gpio_desc *frl_enable_gpio; struct delayed_work hpd_work; int port_id; const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops; + unsigned long long tmds_char_rate; }; struct rockchip_hdmi_qp_ctrl_ops { void (*io_init)(struct rockchip_hdmi_qp *hdmi); + void (*enc_init)(struct rockchip_hdmi_qp *hdmi, struct rockchip_crtc_state *state); irqreturn_t (*irq_callback)(int irq, void *dev_id); irqreturn_t (*hardirq_callback)(int irq, void *dev_id); }; @@ -115,23 +119,15 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder) { struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); struct drm_crtc *crtc = encoder->crtc; - unsigned long long rate; /* Unconditionally switch to TMDS as FRL is not yet supported */ - gpiod_set_value(hdmi->enable_gpio, 1); - - if (crtc && crtc->state) { - rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode, - 8, HDMI_COLORSPACE_RGB); - /* - * FIXME: Temporary workaround to pass pixel clock rate - * to the PHY driver until phy_configure_opts_hdmi - * becomes available in the PHY API. See also the related - * comment in rk_hdptx_phy_power_on() from - * drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c - */ - phy_set_bus_width(hdmi->phy, div_u64(rate, 100)); - } + gpiod_set_value(hdmi->frl_enable_gpio, 0); + + if (!crtc || !crtc->state) + return; + + if (hdmi->ctrl_ops->enc_init) + hdmi->ctrl_ops->enc_init(hdmi, to_rockchip_crtc_state(crtc->state)); } static int @@ -139,12 +135,29 @@ dw_hdmi_qp_rockchip_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); + union phy_configure_opts phy_cfg = {}; + int ret; + + if (hdmi->tmds_char_rate == conn_state->hdmi.tmds_char_rate && + s->output_bpc == conn_state->hdmi.output_bpc) + return 0; + + phy_cfg.hdmi.tmds_char_rate = conn_state->hdmi.tmds_char_rate; + phy_cfg.hdmi.bpc = conn_state->hdmi.output_bpc; + + ret = phy_configure(hdmi->phy, &phy_cfg); + if (!ret) { + hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; + s->output_type = DRM_MODE_CONNECTOR_HDMIA; + s->output_bpc = conn_state->hdmi.output_bpc; + } else { + dev_err(hdmi->dev, "Failed to configure phy: %d\n", ret); + } - s->output_mode = ROCKCHIP_OUT_MODE_AAAA; - s->output_type = DRM_MODE_CONNECTOR_HDMIA; - - return 0; + return ret; } static const struct @@ -375,15 +388,45 @@ static void dw_hdmi_qp_rk3588_io_init(struct rockchip_hdmi_qp *hdmi) regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); } +static void dw_hdmi_qp_rk3576_enc_init(struct rockchip_hdmi_qp *hdmi, + struct rockchip_crtc_state *state) +{ + u32 val; + + if (state->output_bpc == 10) + val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_10BPC); + else + val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_8BPC); + + regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON8, val); +} + +static void dw_hdmi_qp_rk3588_enc_init(struct rockchip_hdmi_qp *hdmi, + struct rockchip_crtc_state *state) +{ + u32 val; + + if (state->output_bpc == 10) + val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_10BPC); + else + val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_8BPC); + + regmap_write(hdmi->vo_regmap, + hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3, + val); +} + static const struct rockchip_hdmi_qp_ctrl_ops rk3576_hdmi_ctrl_ops = { .io_init = dw_hdmi_qp_rk3576_io_init, - .irq_callback = dw_hdmi_qp_rk3576_irq, + .enc_init = dw_hdmi_qp_rk3576_enc_init, + .irq_callback = dw_hdmi_qp_rk3576_irq, .hardirq_callback = dw_hdmi_qp_rk3576_hardirq, }; static const struct rockchip_hdmi_qp_ctrl_ops rk3588_hdmi_ctrl_ops = { .io_init = dw_hdmi_qp_rk3588_io_init, - .irq_callback = dw_hdmi_qp_rk3588_irq, + .enc_init = dw_hdmi_qp_rk3588_enc_init, + .irq_callback = dw_hdmi_qp_rk3588_irq, .hardirq_callback = dw_hdmi_qp_rk3588_hardirq, }; @@ -476,6 +519,7 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, plat_data.phy_ops = cfg->phy_ops; plat_data.phy_data = hdmi; + plat_data.max_bpc = 10; encoder = &hdmi->encoder.encoder; encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); @@ -515,11 +559,11 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, plat_data.ref_clk_rate = clk_get_rate(ref_clk); clk_put(ref_clk); - hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable", - GPIOD_OUT_HIGH); - if (IS_ERR(hdmi->enable_gpio)) - return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->enable_gpio), - "Failed to request enable GPIO\n"); + hdmi->frl_enable_gpio = devm_gpiod_get_optional(hdmi->dev, "frl-enable", + GPIOD_OUT_LOW); + if (IS_ERR(hdmi->frl_enable_gpio)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->frl_enable_gpio), + "Failed to request FRL enable GPIO\n"); hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0); if (IS_ERR(hdmi->phy)) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index c5c6e2b5772d..3099408e9d05 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -97,6 +97,9 @@ void rockchip_drm_dma_init_device(struct drm_device *drm_dev, private->iommu_dev = ERR_PTR(-ENODEV); else if (!private->iommu_dev) private->iommu_dev = dev; + + if (!IS_ERR(private->iommu_dev)) + drm_dev_set_dma_dev(drm_dev, private->iommu_dev); } static int rockchip_drm_init_iommu(struct drm_device *drm_dev) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index e979d5e02ff4..498df0ce4680 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -102,7 +102,7 @@ enum vop2_afbc_format { VOP2_AFBC_FMT_INVALID = -1, }; -#define VOP2_MAX_DCLK_RATE 600000000 +#define VOP2_MAX_DCLK_RATE 600000000UL /* * bus-format types. @@ -1743,36 +1743,42 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc, * Switch to HDMI PHY PLL as DCLK source for display modes up * to 4K@60Hz, if available, otherwise keep using the system CRU. */ - if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) { - drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { - struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); - - if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { - if (!vop2->pll_hdmiphy0) + if (vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) { + unsigned long max_dclk = DIV_ROUND_CLOSEST_ULL(VOP2_MAX_DCLK_RATE * 8, + vcstate->output_bpc); + if (clock <= max_dclk) { + drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { + struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); + + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { + if (!vop2->pll_hdmiphy0) + break; + + if (!vp->dclk_src) + vp->dclk_src = clk_get_parent(vp->dclk); + + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0); + if (ret < 0) + drm_warn(vop2->drm, + "Could not switch to HDMI0 PHY PLL: %d\n", + ret); break; + } - if (!vp->dclk_src) - vp->dclk_src = clk_get_parent(vp->dclk); + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) { + if (!vop2->pll_hdmiphy1) + break; - ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0); - if (ret < 0) - drm_warn(vop2->drm, - "Could not switch to HDMI0 PHY PLL: %d\n", ret); - break; - } + if (!vp->dclk_src) + vp->dclk_src = clk_get_parent(vp->dclk); - if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) { - if (!vop2->pll_hdmiphy1) + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1); + if (ret < 0) + drm_warn(vop2->drm, + "Could not switch to HDMI1 PHY PLL: %d\n", + ret); break; - - if (!vp->dclk_src) - vp->dclk_src = clk_get_parent(vp->dclk); - - ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1); - if (ret < 0) - drm_warn(vop2->drm, - "Could not switch to HDMI1 PHY PLL: %d\n", ret); - break; + } } } } diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 38c49030c7ab..cd8380f0eddc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -1369,6 +1369,25 @@ static const struct vop2_regs_dump rk3588_regs_dump[] = { }, }; +/* + * phys_id is used to identify a main window(Cluster Win/Smart Win, not + * include the sub win of a cluster or the multi area) that can do overlay + * in main overlay stage. + */ +static struct vop2_win *vop2_find_win_by_phys_id(struct vop2 *vop2, uint8_t phys_id) +{ + struct vop2_win *win; + int i; + + for (i = 0; i < vop2->data->win_size; i++) { + win = &vop2->win[i]; + if (win->data->phys_id == phys_id) + return win; + } + + return NULL; +} + static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) { struct vop2 *vop2 = vp->vop2; @@ -1842,15 +1861,31 @@ static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config, alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; } -static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id) +static int vop2_find_start_mixer_id_for_vp(struct vop2_video_port *vp) { - struct vop2_video_port *vp; - int used_layer = 0; + struct vop2 *vop2 = vp->vop2; + struct vop2_win *win; + u32 layer_sel = vop2->old_layer_sel; + u32 used_layer = 0; + unsigned long win_mask = vp->win_mask; + unsigned long phys_id; + bool match; int i; - for (i = 0; i < port_id; i++) { - vp = &vop2->vps[i]; - used_layer += hweight32(vp->win_mask); + for (i = 0; i < 31; i += 4) { + match = false; + for_each_set_bit(phys_id, &win_mask, ROCKCHIP_VOP2_ESMART3) { + win = vop2_find_win_by_phys_id(vop2, phys_id); + if (win->data->layer_sel_id[vp->id] == ((layer_sel >> i) & 0xf)) { + match = true; + break; + } + } + + if (!match) + used_layer += 1; + else + break; } return used_layer; @@ -1935,7 +1970,7 @@ static void vop2_setup_alpha(struct vop2_video_port *vp) u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE; if (vop2->version <= VOP_VERSION_RK3588) - mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); + mixer_id = vop2_find_start_mixer_id_for_vp(vp); else mixer_id = 0; diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile index 6fc4b504e786..e399b40d64a1 100644 --- a/drivers/gpu/drm/tegra/Makefile +++ b/drivers/gpu/drm/tegra/Makefile @@ -25,6 +25,7 @@ tegra-drm-y := \ falcon.o \ vic.o \ nvdec.o \ + nvjpg.o \ riscv.o tegra-drm-y += trace.o diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 4bf10c1bc373..1d18d43292dc 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1384,6 +1384,7 @@ static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra210-sor1", }, { .compatible = "nvidia,tegra210-vic", }, { .compatible = "nvidia,tegra210-nvdec", }, + { .compatible = "nvidia,tegra210-nvjpg", }, { .compatible = "nvidia,tegra186-display", }, { .compatible = "nvidia,tegra186-dc", }, { .compatible = "nvidia,tegra186-sor", }, @@ -1422,6 +1423,7 @@ static struct platform_driver * const drivers[] = { &tegra_gr3d_driver, &tegra_vic_driver, &tegra_nvdec_driver, + &tegra_nvjpg_driver, }; static int __init host1x_drm_init(void) diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 1dd3670f37db..ae68b03d8483 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -214,5 +214,6 @@ extern struct platform_driver tegra_gr2d_driver; extern struct platform_driver tegra_gr3d_driver; extern struct platform_driver tegra_vic_driver; extern struct platform_driver tegra_nvdec_driver; +extern struct platform_driver tegra_nvjpg_driver; #endif /* HOST1X_DRM_H */ diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index ae7aeb8e90ef..ae13b7a6837c 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -546,12 +546,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, /* horizontal back porch */ hbp = (mode->htotal - mode->hsync_end) * mul / div; - if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0) - hbp += hsw; - /* horizontal front porch */ hfp = (mode->hsync_start - mode->hdisplay) * mul / div; + if (dsi->master || dsi->slave) { + hact /= 2; + hsw /= 2; + hbp /= 2; + hfp /= 2; + } + + if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0) + hbp += hsw; + /* subtract packet overhead */ hsw -= 10; hbp -= 14; @@ -561,11 +568,6 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3); tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5); tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7); - - /* set SOL delay (for non-burst mode only) */ - tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY); - - /* TODO: implement ganged mode */ } else { u16 bytes; @@ -587,29 +589,28 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, value = MIPI_DCS_WRITE_MEMORY_START << 8 | MIPI_DCS_WRITE_MEMORY_CONTINUE; tegra_dsi_writel(dsi, value, DSI_DCS_CMDS); + } - /* set SOL delay */ - if (dsi->master || dsi->slave) { - unsigned long delay, bclk, bclk_ganged; - unsigned int lanes = state->lanes; - - /* SOL to valid, valid to FIFO and FIFO write delay */ - delay = 4 + 4 + 2; - delay = DIV_ROUND_UP(delay * mul, div * lanes); - /* FIFO read delay */ - delay = delay + 6; - - bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes); - bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes); - value = bclk - bclk_ganged + delay + 20; - } else { - /* TODO: revisit for non-ganged mode */ - value = 8 * mul / div; - } + /* set SOL delay */ + if (dsi->master || dsi->slave) { + unsigned long delay, bclk, bclk_ganged; + unsigned int lanes = state->lanes; + + /* SOL to valid, valid to FIFO and FIFO write delay */ + delay = 4 + 4 + 2; + delay = DIV_ROUND_UP(delay * mul, div * lanes); + /* FIFO read delay */ + delay = delay + 6; - tegra_dsi_writel(dsi, value, DSI_SOL_DELAY); + bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes); + bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes); + value = bclk - bclk_ganged + delay + 20; + } else { + value = 8 * mul / div; } + tegra_dsi_writel(dsi, value, DSI_SOL_DELAY); + if (dsi->slave) { tegra_dsi_configure(dsi->slave, pipe, mode); diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c index 91aece6f34e0..8f40882aa76e 100644 --- a/drivers/gpu/drm/tegra/fbdev.c +++ b/drivers/gpu/drm/tegra/fbdev.c @@ -73,10 +73,10 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct tegra_drm *tegra = helper->dev->dev_private; struct drm_device *drm = helper->dev; struct drm_mode_fb_cmd2 cmd = { 0 }; + struct fb_info *info = helper->info; unsigned int bytes_per_pixel; struct drm_framebuffer *fb; unsigned long offset; - struct fb_info *info; struct tegra_bo *bo; size_t size; int err; @@ -97,13 +97,6 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, if (IS_ERR(bo)) return PTR_ERR(bo); - info = drm_fb_helper_alloc_info(helper); - if (IS_ERR(info)) { - dev_err(drm->dev, "failed to allocate framebuffer info\n"); - drm_gem_object_put(&bo->gem); - return PTR_ERR(info); - } - fb = tegra_fb_alloc(drm, drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]), &cmd, &bo, 1); diff --git a/drivers/gpu/drm/tegra/nvjpg.c b/drivers/gpu/drm/tegra/nvjpg.c new file mode 100644 index 000000000000..94503fd0d52d --- /dev/null +++ b/drivers/gpu/drm/tegra/nvjpg.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/host1x.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "drm.h" +#include "falcon.h" + +struct nvjpg_config { + const char *firmware; + unsigned int version; +}; + +struct nvjpg { + struct falcon falcon; + + void __iomem *regs; + struct tegra_drm_client client; + struct device *dev; + struct clk *clk; + + /* Platform configuration */ + const struct nvjpg_config *config; +}; + +static inline struct nvjpg *to_nvjpg(struct tegra_drm_client *client) +{ + return container_of(client, struct nvjpg, client); +} + +static int nvjpg_init(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->host); + struct tegra_drm *tegra = dev->dev_private; + struct nvjpg *nvjpg = to_nvjpg(drm); + int err; + + err = host1x_client_iommu_attach(client); + if (err < 0 && err != -ENODEV) { + dev_err(nvjpg->dev, "failed to attach to domain: %d\n", err); + return err; + } + + err = tegra_drm_register_client(tegra, drm); + if (err < 0) + goto detach; + + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent host1x device. + */ + client->dev->dma_parms = client->host->dma_parms; + + return 0; + +detach: + host1x_client_iommu_detach(client); + + return err; +} + +static int nvjpg_exit(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->host); + struct tegra_drm *tegra = dev->dev_private; + struct nvjpg *nvjpg = to_nvjpg(drm); + int err; + + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + + err = tegra_drm_unregister_client(tegra, drm); + if (err < 0) + return err; + + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + + host1x_client_iommu_detach(client); + + if (client->group) { + dma_unmap_single(nvjpg->dev, nvjpg->falcon.firmware.phys, + nvjpg->falcon.firmware.size, DMA_TO_DEVICE); + tegra_drm_free(tegra, nvjpg->falcon.firmware.size, + nvjpg->falcon.firmware.virt, + nvjpg->falcon.firmware.iova); + } else { + dma_free_coherent(nvjpg->dev, nvjpg->falcon.firmware.size, + nvjpg->falcon.firmware.virt, + nvjpg->falcon.firmware.iova); + } + + return 0; +} + +static const struct host1x_client_ops nvjpg_client_ops = { + .init = nvjpg_init, + .exit = nvjpg_exit, +}; + +static int nvjpg_load_falcon_firmware(struct nvjpg *nvjpg) +{ + struct host1x_client *client = &nvjpg->client.base; + struct tegra_drm *tegra = nvjpg->client.drm; + dma_addr_t iova; + size_t size; + void *virt; + int err; + + if (nvjpg->falcon.firmware.virt) + return 0; + + err = falcon_read_firmware(&nvjpg->falcon, nvjpg->config->firmware); + if (err < 0) + return err; + + size = nvjpg->falcon.firmware.size; + + if (!client->group) { + virt = dma_alloc_coherent(nvjpg->dev, size, &iova, GFP_KERNEL); + if (!virt) + return -ENOMEM; + } else { + virt = tegra_drm_alloc(tegra, size, &iova); + if (IS_ERR(virt)) + return PTR_ERR(virt); + } + + nvjpg->falcon.firmware.virt = virt; + nvjpg->falcon.firmware.iova = iova; + + err = falcon_load_firmware(&nvjpg->falcon); + if (err < 0) + goto cleanup; + + /* + * In this case we have received an IOVA from the shared domain, so we + * need to make sure to get the physical address so that the DMA API + * knows what memory pages to flush the cache for. + */ + if (client->group) { + dma_addr_t phys; + + phys = dma_map_single(nvjpg->dev, virt, size, DMA_TO_DEVICE); + + err = dma_mapping_error(nvjpg->dev, phys); + if (err < 0) + goto cleanup; + + nvjpg->falcon.firmware.phys = phys; + } + + return 0; + +cleanup: + if (!client->group) + dma_free_coherent(nvjpg->dev, size, virt, iova); + else + tegra_drm_free(tegra, size, virt, iova); + + return err; +} + +static __maybe_unused int nvjpg_runtime_resume(struct device *dev) +{ + struct nvjpg *nvjpg = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(nvjpg->clk); + if (err < 0) + return err; + + usleep_range(20, 30); + + err = nvjpg_load_falcon_firmware(nvjpg); + if (err < 0) + goto disable_clk; + + err = falcon_boot(&nvjpg->falcon); + if (err < 0) + goto disable_clk; + + return 0; + +disable_clk: + clk_disable_unprepare(nvjpg->clk); + return err; +} + +static __maybe_unused int nvjpg_runtime_suspend(struct device *dev) +{ + struct nvjpg *nvjpg = dev_get_drvdata(dev); + + clk_disable_unprepare(nvjpg->clk); + + return 0; +} + +static int nvjpg_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported) +{ + *supported = false; + + return 0; +} + +static const struct tegra_drm_client_ops nvjpg_ops = { + .get_streamid_offset = NULL, + .can_use_memory_ctx = nvjpg_can_use_memory_ctx, +}; + +#define NVIDIA_TEGRA_210_NVJPG_FIRMWARE "nvidia/tegra210/nvjpg.bin" + +static const struct nvjpg_config tegra210_nvjpg_config = { + .firmware = NVIDIA_TEGRA_210_NVJPG_FIRMWARE, + .version = 0x21, +}; + +static const struct of_device_id tegra_nvjpg_of_match[] = { + { .compatible = "nvidia,tegra210-nvjpg", .data = &tegra210_nvjpg_config }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_nvjpg_of_match); + +static int nvjpg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct nvjpg *nvjpg; + int err; + + /* inherit DMA mask from host1x parent */ + err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + return err; + } + + nvjpg = devm_kzalloc(dev, sizeof(*nvjpg), GFP_KERNEL); + if (!nvjpg) + return -ENOMEM; + + nvjpg->config = of_device_get_match_data(dev); + + nvjpg->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(nvjpg->regs)) + return PTR_ERR(nvjpg->regs); + + nvjpg->clk = devm_clk_get(dev, "nvjpg"); + if (IS_ERR(nvjpg->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(nvjpg->clk); + } + + err = clk_set_rate(nvjpg->clk, ULONG_MAX); + if (err < 0) { + dev_err(&pdev->dev, "failed to set clock rate\n"); + return err; + } + + nvjpg->falcon.dev = dev; + nvjpg->falcon.regs = nvjpg->regs; + + err = falcon_init(&nvjpg->falcon); + if (err < 0) + return err; + + platform_set_drvdata(pdev, nvjpg); + + INIT_LIST_HEAD(&nvjpg->client.base.list); + nvjpg->client.base.ops = &nvjpg_client_ops; + nvjpg->client.base.dev = dev; + nvjpg->client.base.class = HOST1X_CLASS_NVJPG; + nvjpg->dev = dev; + + INIT_LIST_HEAD(&nvjpg->client.list); + nvjpg->client.version = nvjpg->config->version; + nvjpg->client.ops = &nvjpg_ops; + + err = host1x_client_register(&nvjpg->client.base); + if (err < 0) { + dev_err(dev, "failed to register host1x client: %d\n", err); + goto exit_falcon; + } + + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 500); + devm_pm_runtime_enable(dev); + + return 0; + +exit_falcon: + falcon_exit(&nvjpg->falcon); + + return err; +} + +static void nvjpg_remove(struct platform_device *pdev) +{ + struct nvjpg *nvjpg = platform_get_drvdata(pdev); + + host1x_client_unregister(&nvjpg->client.base); + falcon_exit(&nvjpg->falcon); +} + +static const struct dev_pm_ops nvjpg_pm_ops = { + RUNTIME_PM_OPS(nvjpg_runtime_suspend, nvjpg_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) +}; + +struct platform_driver tegra_nvjpg_driver = { + .driver = { + .name = "tegra-nvjpg", + .of_match_table = tegra_nvjpg_of_match, + .pm = &nvjpg_pm_ops + }, + .probe = nvjpg_probe, + .remove = nvjpg_remove, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVJPG_FIRMWARE); +#endif diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index c0e952293ad0..87d5d5f9332a 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_plane_helper_test.o \ drm_probe_helper_test.o \ drm_rect_test.o \ - drm_sysfb_modeset_test.o + drm_sysfb_modeset_test.o \ + drm_fixp_test.o CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN) diff --git a/drivers/gpu/drm/tests/drm_fixp_test.c b/drivers/gpu/drm/tests/drm_fixp_test.c new file mode 100644 index 000000000000..dd77fdedb2a9 --- /dev/null +++ b/drivers/gpu/drm/tests/drm_fixp_test.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + */ + +#include <kunit/test.h> +#include <drm/drm_fixed.h> + +static void drm_test_sm2fixp(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, 0x7fffffffffffffffll, ((1ull << 63) - 1)); + + /* 1 */ + KUNIT_EXPECT_EQ(test, drm_int2fixp(1), drm_sm2fixp(1ull << DRM_FIXED_POINT)); + + /* -1 */ + KUNIT_EXPECT_EQ(test, drm_int2fixp(-1), + drm_sm2fixp((1ull << 63) | (1ull << DRM_FIXED_POINT))); + + /* 0.5 */ + KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(1, 2), + drm_sm2fixp(1ull << (DRM_FIXED_POINT - 1))); + + /* -0.5 */ + KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(-1, 2), + drm_sm2fixp((1ull << 63) | (1ull << (DRM_FIXED_POINT - 1)))); +} + +static void drm_test_int2fixp(struct kunit *test) +{ + /* 1 */ + KUNIT_EXPECT_EQ(test, 1ll << 32, drm_int2fixp(1)); + + /* -1 */ + KUNIT_EXPECT_EQ(test, -(1ll << 32), drm_int2fixp(-1)); + + /* 1 + (-1) = 0 */ + KUNIT_EXPECT_EQ(test, 0, drm_int2fixp(1) + drm_int2fixp(-1)); + + /* 1 / 2 */ + KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(1, 2)); + + /* -0.5 */ + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(-1, 2)); + + /* (1 / 2) + (-1) = 0.5 */ + KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(-1, 2) + drm_int2fixp(1)); + + /* (1 / 2) - 1) = 0.5 */ + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) + drm_int2fixp(-1)); + + /* (1 / 2) - 1) = 0.5 */ + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) - drm_int2fixp(1)); +} + +static struct kunit_case drm_fixp_tests[] = { + KUNIT_CASE(drm_test_int2fixp), + KUNIT_CASE(drm_test_sm2fixp), + { } +}; + +static struct kunit_suite drm_fixp_test_suite = { + .name = "drm_fixp", + .test_cases = drm_fixp_tests, +}; + +kunit_test_suite(drm_fixp_test_suite); + +MODULE_AUTHOR("AMD"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Unit tests for drm_fixed.h"); diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c index 17a570af296c..2eda87882e65 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c @@ -652,7 +652,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test) int err; man = ttm_manager_type(priv->ttm_dev, mem_type); - man->move = dma_fence_get_stub(); + man->eviction_fences[0] = dma_fence_get_stub(); bo = ttm_bo_kunit_init(test, test->priv, size, NULL); bo->type = bo_type; @@ -669,7 +669,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test) KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size); ttm_bo_fini(bo); - dma_fence_put(man->move); + dma_fence_put(man->eviction_fences[0]); } static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = { @@ -733,9 +733,9 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test) spin_lock_init(&fence_lock); man = ttm_manager_type(priv->ttm_dev, fst_mem); - man->move = alloc_mock_fence(test); + man->eviction_fences[0] = alloc_mock_fence(test); - task = kthread_create(threaded_fence_signal, man->move, "move-fence-signal"); + task = kthread_create(threaded_fence_signal, man->eviction_fences[0], "move-fence-signal"); if (IS_ERR(task)) KUNIT_FAIL(test, "Couldn't create move fence signal task\n"); @@ -743,7 +743,8 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test) err = ttm_bo_validate(bo, placement_val, &ctx_val); dma_resv_unlock(bo->base.resv); - dma_fence_wait_timeout(man->move, false, MAX_SCHEDULE_TIMEOUT); + dma_fence_wait_timeout(man->eviction_fences[0], false, MAX_SCHEDULE_TIMEOUT); + man->eviction_fences[0] = NULL; KUNIT_EXPECT_EQ(test, err, 0); KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size); diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c index e6ea2bd01f07..c0e4e35e0442 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c @@ -207,6 +207,7 @@ static void ttm_resource_manager_init_basic(struct kunit *test) struct ttm_resource_test_priv *priv = test->priv; struct ttm_resource_manager *man; size_t size = SZ_16K; + int i; man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, man); @@ -216,8 +217,8 @@ static void ttm_resource_manager_init_basic(struct kunit *test) KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev); KUNIT_ASSERT_EQ(test, man->size, size); KUNIT_ASSERT_EQ(test, man->usage, 0); - KUNIT_ASSERT_NULL(test, man->move); - KUNIT_ASSERT_NOT_NULL(test, &man->move_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) + KUNIT_ASSERT_NULL(test, man->eviction_fences[i]); for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i) KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i])); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c4e669686fd6..bd27607f8076 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -659,34 +659,35 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unpin); /* - * Add the last move fence to the BO as kernel dependency and reserve a new - * fence slot. + * Add the pipelined eviction fencesto the BO as kernel dependency and reserve new + * fence slots. */ -static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, - struct ttm_resource_manager *man, - bool no_wait_gpu) +static int ttm_bo_add_pipelined_eviction_fences(struct ttm_buffer_object *bo, + struct ttm_resource_manager *man, + bool no_wait_gpu) { struct dma_fence *fence; - int ret; - - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); + int i; - if (!fence) - return 0; + spin_lock(&man->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + fence = man->eviction_fences[i]; + if (!fence) + continue; - if (no_wait_gpu) { - ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; - dma_fence_put(fence); - return ret; + if (no_wait_gpu) { + if (!dma_fence_is_signaled(fence)) { + spin_unlock(&man->eviction_lock); + return -EBUSY; + } + } else { + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); + } } + spin_unlock(&man->eviction_lock); - dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); - - ret = dma_resv_reserve_fences(bo->base.resv, 1); - dma_fence_put(fence); - return ret; + /* TODO: this call should be removed. */ + return dma_resv_reserve_fences(bo->base.resv, 1); } /** @@ -719,7 +720,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo, int i, ret; ticket = dma_resv_locking_ctx(bo->base.resv); - ret = dma_resv_reserve_fences(bo->base.resv, 1); + ret = dma_resv_reserve_fences(bo->base.resv, TTM_NUM_MOVE_FENCES); if (unlikely(ret)) return ret; @@ -758,7 +759,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo, return ret; } - ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu); + ret = ttm_bo_add_pipelined_eviction_fences(bo, man, ctx->no_wait_gpu); if (unlikely(ret)) { ttm_resource_free(bo, res); if (ret == -EBUSY) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index acbbca9d5c92..2ff35d55e462 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -258,7 +258,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); - ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); + ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES); if (ret) { dma_resv_unlock(&fbo->base.base._resv); kfree(fbo); @@ -646,20 +646,44 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, { struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *from; + struct dma_fence *tmp; + int i; from = ttm_manager_type(bdev, bo->resource->mem_type); /** * BO doesn't have a TTM we need to bind/unbind. Just remember - * this eviction and free up the allocation + * this eviction and free up the allocation. + * The fence will be saved in the first free slot or in the slot + * already used to store a fence from the same context. Since + * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for + * evictions we should always find a slot to use. */ - spin_lock(&from->move_lock); - if (!from->move || dma_fence_is_later(fence, from->move)) { - dma_fence_put(from->move); - from->move = dma_fence_get(fence); + spin_lock(&from->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + tmp = from->eviction_fences[i]; + if (!tmp) + break; + if (fence->context != tmp->context) + continue; + if (dma_fence_is_later(fence, tmp)) { + dma_fence_put(tmp); + break; + } + goto unlock; + } + if (i < TTM_NUM_MOVE_FENCES) { + from->eviction_fences[i] = dma_fence_get(fence); + } else { + WARN(1, "not enough fence slots for all fence contexts"); + spin_unlock(&from->eviction_lock); + dma_fence_wait(fence, false); + goto end; } - spin_unlock(&from->move_lock); +unlock: + spin_unlock(&from->eviction_lock); +end: ttm_resource_free(bo, &bo->resource); } diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 1a39c30f22fb..f5aa29dc6ec0 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -524,14 +524,15 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, { unsigned i; - spin_lock_init(&man->move_lock); man->bdev = bdev; man->size = size; man->usage = 0; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) INIT_LIST_HEAD(&man->lru[i]); - man->move = NULL; + spin_lock_init(&man->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) + man->eviction_fences[i] = NULL; } EXPORT_SYMBOL(ttm_resource_manager_init); @@ -552,7 +553,7 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, .no_wait_gpu = false, }; struct dma_fence *fence; - int ret; + int ret, i; do { ret = ttm_bo_evict_first(bdev, man, &ctx); @@ -562,18 +563,24 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, if (ret && ret != -ENOENT) return ret; - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); - - if (fence) { - ret = dma_fence_wait(fence, false); - dma_fence_put(fence); - if (ret) - return ret; + ret = 0; + + spin_lock(&man->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + fence = man->eviction_fences[i]; + if (fence && !dma_fence_is_signaled(fence)) { + dma_fence_get(fence); + spin_unlock(&man->eviction_lock); + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); + if (ret) + return ret; + spin_lock(&man->eviction_lock); + } } + spin_unlock(&man->eviction_lock); - return 0; + return ret; } EXPORT_SYMBOL(ttm_resource_manager_evict_all); diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index 939991fc8233..9bb264091c38 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -9,7 +9,9 @@ vkms-y := \ vkms_writeback.o \ vkms_connector.o \ vkms_config.o \ - vkms_configfs.o + vkms_configfs.o \ + vkms_colorop.o \ + vkms_luts.o obj-$(CONFIG_DRM_VKMS) += vkms.o obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile index 5750f0bd9d40..d4d9ba8d4c54 100644 --- a/drivers/gpu/drm/vkms/tests/Makefile +++ b/drivers/gpu/drm/vkms/tests/Makefile @@ -2,6 +2,7 @@ vkms-kunit-tests-y := \ vkms_config_test.o \ - vkms_format_test.o + vkms_format_test.o \ + vkms_color_test.o obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms-kunit-tests.o diff --git a/drivers/gpu/drm/vkms/tests/vkms_color_test.c b/drivers/gpu/drm/vkms/tests/vkms_color_test.c new file mode 100644 index 000000000000..1a1c7cac2f15 --- /dev/null +++ b/drivers/gpu/drm/vkms/tests/vkms_color_test.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <kunit/test.h> + +#include <drm/drm_fixed.h> +#include <drm/drm_mode.h> +#include "../vkms_composer.h" +#include "../vkms_drv.h" +#include "../vkms_luts.h" + +#define TEST_LUT_SIZE 16 + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static struct drm_color_lut test_linear_array[TEST_LUT_SIZE] = { + { 0x0, 0x0, 0x0, 0 }, + { 0x1111, 0x1111, 0x1111, 0 }, + { 0x2222, 0x2222, 0x2222, 0 }, + { 0x3333, 0x3333, 0x3333, 0 }, + { 0x4444, 0x4444, 0x4444, 0 }, + { 0x5555, 0x5555, 0x5555, 0 }, + { 0x6666, 0x6666, 0x6666, 0 }, + { 0x7777, 0x7777, 0x7777, 0 }, + { 0x8888, 0x8888, 0x8888, 0 }, + { 0x9999, 0x9999, 0x9999, 0 }, + { 0xaaaa, 0xaaaa, 0xaaaa, 0 }, + { 0xbbbb, 0xbbbb, 0xbbbb, 0 }, + { 0xcccc, 0xcccc, 0xcccc, 0 }, + { 0xdddd, 0xdddd, 0xdddd, 0 }, + { 0xeeee, 0xeeee, 0xeeee, 0 }, + { 0xffff, 0xffff, 0xffff, 0 }, +}; + +/* lerp test parameters */ +struct vkms_color_test_lerp_params { + s64 t; + __u16 a; + __u16 b; + __u16 expected; +}; + +/* lerp test cases */ +static const struct vkms_color_test_lerp_params color_test_lerp_cases[] = { + /* Half-way round down */ + { 0x80000000 - 1, 0x0, 0x10, 0x8 }, + { 0x80000000 - 1, 0x1, 0x10, 0x8 }, /* Odd a */ + { 0x80000000 - 1, 0x1, 0xf, 0x8 }, /* Odd b */ + { 0x80000000 - 1, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x80000000 - 1, 0x10, 0x11, 0x10 }, /* b = a + 1*/ + /* Half-way round up */ + { 0x80000000, 0x0, 0x10, 0x8 }, + { 0x80000000, 0x1, 0x10, 0x9 }, /* Odd a */ + { 0x80000000, 0x1, 0xf, 0x8 }, /* Odd b */ + { 0x80000000, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x80000000, 0x10, 0x11, 0x11 }, /* b = a + 1*/ + /* t = 0.0 */ + { 0x0, 0x0, 0x10, 0x0 }, + { 0x0, 0x1, 0x10, 0x1 }, /* Odd a */ + { 0x0, 0x1, 0xf, 0x1 }, /* Odd b */ + { 0x0, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x0, 0x10, 0x11, 0x10 }, /* b = a + 1*/ + /* t = 1.0 */ + { 0x100000000, 0x0, 0x10, 0x10 }, + { 0x100000000, 0x1, 0x10, 0x10 }, /* Odd a */ + { 0x100000000, 0x1, 0xf, 0xf }, /* Odd b */ + { 0x100000000, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x100000000, 0x10, 0x11, 0x11 }, /* b = a + 1*/ + /* t = 0.0 + 1 */ + { 0x0 + 1, 0x0, 0x10, 0x0 }, + { 0x0 + 1, 0x1, 0x10, 0x1 }, /* Odd a */ + { 0x0 + 1, 0x1, 0xf, 0x1 }, /* Odd b */ + { 0x0 + 1, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x0 + 1, 0x10, 0x11, 0x10 }, /* b = a + 1*/ + /* t = 1.0 - 1 */ + { 0x100000000 - 1, 0x0, 0x10, 0x10 }, + { 0x100000000 - 1, 0x1, 0x10, 0x10 }, /* Odd a */ + { 0x100000000 - 1, 0x1, 0xf, 0xf }, /* Odd b */ + { 0x100000000 - 1, 0x10, 0x10, 0x10 }, /* b = a */ + { 0x100000000 - 1, 0x10, 0x11, 0x11 }, /* b = a + 1*/ + /* t chosen to verify the flipping point of result a (or b) to a+1 (or b-1) */ + { 0x80000000 - 1, 0x0, 0x1, 0x0 }, + { 0x80000000, 0x0, 0x1, 0x1 }, +}; + +static const struct vkms_color_lut test_linear_lut = { + .base = test_linear_array, + .lut_length = TEST_LUT_SIZE, + .channel_value2index_ratio = 0xf000fll +}; + +static void vkms_color_test_get_lut_index(struct kunit *test) +{ + s64 lut_index; + int i; + + lut_index = get_lut_index(&test_linear_lut, test_linear_array[0].red); + KUNIT_EXPECT_EQ(test, drm_fixp2int(lut_index), 0); + + for (i = 0; i < TEST_LUT_SIZE; i++) { + lut_index = get_lut_index(&test_linear_lut, test_linear_array[i].red); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(lut_index), i); + } + + KUNIT_EXPECT_EQ(test, drm_fixp2int(get_lut_index(&srgb_eotf, 0x0)), 0x0); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x0)), 0x0); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x101)), 0x1); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x202)), 0x2); + + KUNIT_EXPECT_EQ(test, drm_fixp2int(get_lut_index(&srgb_inv_eotf, 0x0)), 0x0); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x0)), 0x0); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x101)), 0x1); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x202)), 0x2); + + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0xfefe)), 0xfe); + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0xffff)), 0xff); +} + +static void vkms_color_test_lerp(struct kunit *test) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(color_test_lerp_cases); i++) { + const struct vkms_color_test_lerp_params *params = &color_test_lerp_cases[i]; + + KUNIT_EXPECT_EQ(test, lerp_u16(params->a, params->b, params->t), params->expected); + } +} + +static void vkms_color_test_linear(struct kunit *test) +{ + for (int i = 0; i < LUT_SIZE; i++) { + int linear = apply_lut_to_channel_value(&linear_eotf, i * 0x101, LUT_RED); + + KUNIT_EXPECT_EQ(test, DIV_ROUND_CLOSEST(linear, 0x101), i); + } +} + +static void vkms_color_srgb_inv_srgb(struct kunit *test) +{ + u16 srgb, final; + + for (int i = 0; i < LUT_SIZE; i++) { + srgb = apply_lut_to_channel_value(&srgb_eotf, i * 0x101, LUT_RED); + final = apply_lut_to_channel_value(&srgb_inv_eotf, srgb, LUT_RED); + + KUNIT_EXPECT_GE(test, final / 0x101, i - 1); + KUNIT_EXPECT_LE(test, final / 0x101, i + 1); + } +} + +#define FIXPT_HALF (DRM_FIXED_ONE >> 1) +#define FIXPT_QUARTER (DRM_FIXED_ONE >> 2) + +static const struct drm_color_ctm_3x4 test_matrix_3x4_50_desat = { { + FIXPT_HALF, FIXPT_QUARTER, FIXPT_QUARTER, 0, + FIXPT_QUARTER, FIXPT_HALF, FIXPT_QUARTER, 0, + FIXPT_QUARTER, FIXPT_QUARTER, FIXPT_HALF, 0 +} }; + +static void vkms_color_ctm_3x4_50_desat(struct kunit *test) +{ + struct pixel_argb_s32 ref, out; + + /* full white */ + ref.a = 0xffff; + ref.r = 0xffff; + ref.g = 0xffff; + ref.b = 0xffff; + + memcpy(&out, &ref, sizeof(out)); + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); + + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); + + /* full black */ + ref.a = 0xffff; + ref.r = 0x0; + ref.g = 0x0; + ref.b = 0x0; + + memcpy(&out, &ref, sizeof(out)); + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); + + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); + + /* 50% grey */ + ref.a = 0xffff; + ref.r = 0x8000; + ref.g = 0x8000; + ref.b = 0x8000; + + memcpy(&out, &ref, sizeof(out)); + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); + + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); + + /* full red to 50% desat */ + ref.a = 0xffff; + ref.r = 0x8000; + ref.g = 0x4000; + ref.b = 0x4000; + + out.a = 0xffff; + out.r = 0xffff; + out.g = 0x0; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); + + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); +} + +/* + * BT.709 encoding matrix + * + * Values printed from within IGT when converting + * igt_matrix_3x4_bt709_enc to the fixed-point format expected + * by DRM/KMS. + */ +static const struct drm_color_ctm_3x4 test_matrix_3x4_bt709_enc = { { + 0x00000000366cf400ull, 0x00000000b7175900ull, 0x0000000127bb300ull, 0, + 0x800000001993b3a0ull, 0x800000005609fe80ull, 0x000000006f9db200ull, 0, + 0x000000009d70a400ull, 0x800000008f011100ull, 0x800000000e6f9330ull, 0 +} }; + +static void vkms_color_ctm_3x4_bt709(struct kunit *test) +{ + struct pixel_argb_s32 out; + + /* full white to bt709 */ + out.a = 0xffff; + out.r = 0xffff; + out.g = 0xffff; + out.b = 0xffff; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 255 */ + KUNIT_EXPECT_GT(test, out.r, 0xfe00); + KUNIT_EXPECT_LT(test, out.r, 0x10000); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* full black to bt709 */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0x0; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 0 */ + KUNIT_EXPECT_LT(test, out.r, 0x100); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* gray to bt709 */ + out.a = 0xffff; + out.r = 0x7fff; + out.g = 0x7fff; + out.b = 0x7fff; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 127 */ + KUNIT_EXPECT_GT(test, out.r, 0x7e00); + KUNIT_EXPECT_LT(test, out.r, 0x8000); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* == red 255 - bt709 enc == */ + out.a = 0xffff; + out.r = 0xffff; + out.g = 0x0; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 54 */ + KUNIT_EXPECT_GT(test, out.r, 0x3500); + KUNIT_EXPECT_LT(test, out.r, 0x3700); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 157 */ + KUNIT_EXPECT_GT(test, out.b, 0x9C00); + KUNIT_EXPECT_LT(test, out.b, 0x9E00); + + /* == green 255 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0xffff; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 182 */ + KUNIT_EXPECT_GT(test, out.r, 0xB500); + KUNIT_EXPECT_LT(test, out.r, 0xB780); /* laxed by half*/ + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x0100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* == blue 255 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0x0; + out.b = 0xffff; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 18 */ + KUNIT_EXPECT_GT(test, out.r, 0x1100); + KUNIT_EXPECT_LT(test, out.r, 0x1300); + + /* U 111 */ + KUNIT_EXPECT_GT(test, out.g, 0x6E00); + KUNIT_EXPECT_LT(test, out.g, 0x7000); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x0100); + + /* == red 140 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x8c8c; + out.g = 0x0; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 30 */ + KUNIT_EXPECT_GT(test, out.r, 0x1D00); + KUNIT_EXPECT_LT(test, out.r, 0x1F00); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x100); + + /* V 87 */ + KUNIT_EXPECT_GT(test, out.b, 0x5600); + KUNIT_EXPECT_LT(test, out.b, 0x5800); + + /* == green 140 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0x8c8c; + out.b = 0x0; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 30 */ + KUNIT_EXPECT_GT(test, out.r, 0x6400); + KUNIT_EXPECT_LT(test, out.r, 0x6600); + + /* U 0 */ + KUNIT_EXPECT_LT(test, out.g, 0x100); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x100); + + /* == blue 140 - bt709 enc == */ + out.a = 0xffff; + out.r = 0x0; + out.g = 0x0; + out.b = 0x8c8c; + + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); + + /* Y 30 */ + KUNIT_EXPECT_GT(test, out.r, 0x900); + KUNIT_EXPECT_LT(test, out.r, 0xB00); + + /* U 61 */ + KUNIT_EXPECT_GT(test, out.g, 0x3C00); + KUNIT_EXPECT_LT(test, out.g, 0x3E00); + + /* V 0 */ + KUNIT_EXPECT_LT(test, out.b, 0x100); +} + +static struct kunit_case vkms_color_test_cases[] = { + KUNIT_CASE(vkms_color_test_get_lut_index), + KUNIT_CASE(vkms_color_test_lerp), + KUNIT_CASE(vkms_color_test_linear), + KUNIT_CASE(vkms_color_srgb_inv_srgb), + KUNIT_CASE(vkms_color_ctm_3x4_50_desat), + KUNIT_CASE(vkms_color_ctm_3x4_bt709), + {} +}; + +static struct kunit_suite vkms_color_test_suite = { + .name = "vkms-color", + .test_cases = vkms_color_test_cases, +}; + +kunit_test_suite(vkms_color_test_suite); + +MODULE_DESCRIPTION("Kunit test for VKMS LUT handling"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index d75a6252e4d2..1e4ea1863420 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -83,6 +83,7 @@ struct default_config_case { bool enable_cursor; bool enable_writeback; bool enable_overlay; + bool enable_plane_pipeline; }; static void vkms_config_test_empty_config(struct kunit *test) @@ -108,14 +109,22 @@ static void vkms_config_test_empty_config(struct kunit *test) } static struct default_config_case default_config_cases[] = { - { false, false, false }, - { true, false, false }, - { true, true, false }, - { true, false, true }, - { false, true, false }, - { false, true, true }, - { false, false, true }, - { true, true, true }, + { false, false, false, false }, + { true, false, false, false }, + { true, true, false, false }, + { true, false, true, false }, + { false, true, false, false }, + { false, true, true, false }, + { false, false, true, false }, + { true, true, true, false }, + { false, false, false, true }, + { true, false, false, true }, + { true, true, false, true }, + { true, false, true, true }, + { false, true, false, true }, + { false, true, true, true }, + { false, false, true, true }, + { true, true, true, true }, }; KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL); @@ -132,11 +141,15 @@ static void vkms_config_test_default_config(struct kunit *test) config = vkms_config_default_create(params->enable_cursor, params->enable_writeback, - params->enable_overlay); + params->enable_overlay, + params->enable_plane_pipeline); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Planes */ vkms_config_for_each_plane(config, plane_cfg) { + KUNIT_EXPECT_EQ(test, + vkms_config_plane_get_default_pipeline(plane_cfg), + params->enable_plane_pipeline); switch (vkms_config_plane_get_type(plane_cfg)) { case DRM_PLANE_TYPE_PRIMARY: n_primaries++; @@ -368,7 +381,7 @@ static void vkms_config_test_invalid_plane_number(struct kunit *test) struct vkms_config_plane *plane_cfg; int n; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Invalid: No planes */ @@ -393,7 +406,7 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) struct vkms_config_encoder *encoder_cfg; int err; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); plane_cfg = get_first_plane(config); @@ -474,7 +487,7 @@ static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test) struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); plane_cfg = get_first_plane(config); @@ -493,7 +506,7 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test) struct vkms_config_crtc *crtc_cfg; int n; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Invalid: No CRTCs */ @@ -516,7 +529,7 @@ static void vkms_config_test_invalid_encoder_number(struct kunit *test) struct vkms_config_encoder *encoder_cfg; int n; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Invalid: No encoders */ @@ -541,7 +554,7 @@ static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test) struct vkms_config_encoder *encoder_cfg; int err; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); crtc_cfg1 = get_first_crtc(config); @@ -587,7 +600,7 @@ static void vkms_config_test_invalid_connector_number(struct kunit *test) struct vkms_config_connector *connector_cfg; int n; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); /* Invalid: No connectors */ @@ -610,7 +623,7 @@ static void vkms_config_test_valid_connector_possible_encoders(struct kunit *tes struct vkms_config_encoder *encoder_cfg; struct vkms_config_connector *connector_cfg; - config = vkms_config_default_create(false, false, false); + config = vkms_config_default_create(false, false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); encoder_cfg = get_first_encoder(config); diff --git a/drivers/gpu/drm/vkms/vkms_colorop.c b/drivers/gpu/drm/vkms/vkms_colorop.c new file mode 100644 index 000000000000..5c3ffc78aea0 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_colorop.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/slab.h> +#include <drm/drm_colorop.h> +#include <drm/drm_print.h> +#include <drm/drm_property.h> +#include <drm/drm_plane.h> + +#include "vkms_drv.h" + +static const u64 supported_tfs = + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | + BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF); + +#define MAX_COLOR_PIPELINE_OPS 4 + +static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) +{ + struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; + struct drm_device *dev = plane->dev; + int ret; + int i = 0, j = 0; + + memset(ops, 0, sizeof(ops)); + + /* 1st op: 1d curve */ + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); + if (!ops[i]) { + drm_err(dev, "KMS: Failed to allocate colorop\n"); + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + list->type = ops[i]->base.id; + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); + + i++; + + /* 2nd op: 3x4 matrix */ + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); + if (!ops[i]) { + drm_err(dev, "KMS: Failed to allocate colorop\n"); + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + i++; + + /* 3rd op: 3x4 matrix */ + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); + if (!ops[i]) { + drm_err(dev, "KMS: Failed to allocate colorop\n"); + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + i++; + + /* 4th op: 1d curve */ + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); + if (!ops[i]) { + drm_err(dev, "KMS: Failed to allocate colorop\n"); + ret = -ENOMEM; + goto cleanup; + } + + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs, + DRM_COLOROP_FLAG_ALLOW_BYPASS); + if (ret) + goto cleanup; + + drm_colorop_set_next_property(ops[i - 1], ops[i]); + + return 0; + +cleanup: + for (j = 0; j < i; j++) { + if (ops[j]) { + drm_colorop_cleanup(ops[j]); + kfree(ops[j]); + } + } + + return ret; +} + +int vkms_initialize_colorops(struct drm_plane *plane) +{ + struct drm_prop_enum_list pipeline; + int ret; + + /* Add color pipeline */ + ret = vkms_initialize_color_pipeline(plane, &pipeline); + if (ret) + return ret; + + /* Create COLOR_PIPELINE property and attach */ + ret = drm_plane_create_color_pipeline_property(plane, &pipeline, 1); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index c1ab12a44621..3cf3f26e0d8e 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -11,8 +11,10 @@ #include <drm/drm_print.h> #include <drm/drm_vblank.h> #include <linux/minmax.h> +#include <kunit/visibility.h> -#include "vkms_drv.h" +#include "vkms_composer.h" +#include "vkms_luts.h" static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha) { @@ -61,7 +63,7 @@ static void fill_background(const struct pixel_argb_u16 *background_color, } // lerp(a, b, t) = a + (b - a) * t -static u16 lerp_u16(u16 a, u16 b, s64 t) +VISIBLE_IF_KUNIT u16 lerp_u16(u16 a, u16 b, s64 t) { s64 a_fp = drm_int2fixp(a); s64 b_fp = drm_int2fixp(b); @@ -70,27 +72,18 @@ static u16 lerp_u16(u16 a, u16 b, s64 t) return drm_fixp2int_round(a_fp + delta); } +EXPORT_SYMBOL_IF_KUNIT(lerp_u16); -static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value) +VISIBLE_IF_KUNIT s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value) { s64 color_channel_fp = drm_int2fixp(channel_value); return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio); } +EXPORT_SYMBOL_IF_KUNIT(get_lut_index); -/* - * This enum is related to the positions of the variables inside - * `struct drm_color_lut`, so the order of both needs to be the same. - */ -enum lut_channel { - LUT_RED = 0, - LUT_GREEN, - LUT_BLUE, - LUT_RESERVED -}; - -static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, - enum lut_channel channel) +VISIBLE_IF_KUNIT u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, + enum lut_channel channel) { s64 lut_index = get_lut_index(lut, channel_value); u16 *floor_lut_value, *ceil_lut_value; @@ -115,6 +108,8 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan return lerp_u16(floor_channel_value, ceil_channel_value, lut_index & DRM_FIXED_DECIMAL_MASK); } +EXPORT_SYMBOL_IF_KUNIT(apply_lut_to_channel_value); + static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer) { @@ -133,6 +128,112 @@ static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buff } } +VISIBLE_IF_KUNIT void apply_3x4_matrix(struct pixel_argb_s32 *pixel, + const struct drm_color_ctm_3x4 *matrix) +{ + s64 rf, gf, bf; + s64 r, g, b; + + r = drm_int2fixp(pixel->r); + g = drm_int2fixp(pixel->g); + b = drm_int2fixp(pixel->b); + + rf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[0]), r) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[1]), g) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[2]), b) + + drm_sm2fixp(matrix->matrix[3]); + + gf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[4]), r) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[5]), g) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[6]), b) + + drm_sm2fixp(matrix->matrix[7]); + + bf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[8]), r) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[9]), g) + + drm_fixp_mul(drm_sm2fixp(matrix->matrix[10]), b) + + drm_sm2fixp(matrix->matrix[11]); + + pixel->r = drm_fixp2int_round(rf); + pixel->g = drm_fixp2int_round(gf); + pixel->b = drm_fixp2int_round(bf); +} +EXPORT_SYMBOL_IF_KUNIT(apply_3x4_matrix); + +static void apply_colorop(struct pixel_argb_s32 *pixel, struct drm_colorop *colorop) +{ + struct drm_colorop_state *colorop_state = colorop->state; + struct drm_device *dev = colorop->dev; + + if (colorop->type == DRM_COLOROP_1D_CURVE) { + switch (colorop_state->curve_1d_type) { + case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: + pixel->r = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->r, LUT_RED); + pixel->g = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->g, LUT_GREEN); + pixel->b = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->b, LUT_BLUE); + break; + case DRM_COLOROP_1D_CURVE_SRGB_EOTF: + pixel->r = apply_lut_to_channel_value(&srgb_eotf, pixel->r, LUT_RED); + pixel->g = apply_lut_to_channel_value(&srgb_eotf, pixel->g, LUT_GREEN); + pixel->b = apply_lut_to_channel_value(&srgb_eotf, pixel->b, LUT_BLUE); + break; + default: + drm_WARN_ONCE(dev, true, + "unknown colorop 1D curve type %d\n", + colorop_state->curve_1d_type); + break; + } + } else if (colorop->type == DRM_COLOROP_CTM_3X4) { + if (colorop_state->data) + apply_3x4_matrix(pixel, + (struct drm_color_ctm_3x4 *)colorop_state->data->data); + } +} + +static void pre_blend_color_transform(const struct vkms_plane_state *plane_state, + struct line_buffer *output_buffer) +{ + struct pixel_argb_s32 pixel; + + for (size_t x = 0; x < output_buffer->n_pixels; x++) { + struct drm_colorop *colorop = plane_state->base.base.color_pipeline; + + /* + * Some operations, such as applying a BT709 encoding matrix, + * followed by a decoding matrix, require that we preserve + * values above 1.0 and below 0.0 until the end of the pipeline. + * + * Pack the 16-bit UNORM values into s32 to give us head-room to + * avoid clipping until we're at the end of the pipeline. Clip + * intentionally at the end of the pipeline before packing + * UNORM values back into u16. + */ + pixel.a = output_buffer->pixels[x].a; + pixel.r = output_buffer->pixels[x].r; + pixel.g = output_buffer->pixels[x].g; + pixel.b = output_buffer->pixels[x].b; + + while (colorop) { + struct drm_colorop_state *colorop_state; + + colorop_state = colorop->state; + + if (!colorop_state) + return; + + if (!colorop_state->bypass) + apply_colorop(&pixel, colorop); + + colorop = colorop->next; + } + + /* clamp values */ + output_buffer->pixels[x].a = clamp_val(pixel.a, 0, 0xffff); + output_buffer->pixels[x].r = clamp_val(pixel.r, 0, 0xffff); + output_buffer->pixels[x].g = clamp_val(pixel.g, 0, 0xffff); + output_buffer->pixels[x].b = clamp_val(pixel.b, 0, 0xffff); + } +} + /** * direction_for_rotation() - Get the correct reading direction for a given rotation * @@ -348,7 +449,7 @@ static void blend_line(struct vkms_plane_state *current_plane, int y, */ current_plane->pixel_read_line(current_plane, src_x_start, src_y_start, direction, pixel_count, &stage_buffer->pixels[dst_x_start]); - + pre_blend_color_transform(current_plane, stage_buffer); pre_mul_alpha_blend(stage_buffer, output_buffer, dst_x_start, pixel_count); } diff --git a/drivers/gpu/drm/vkms/vkms_composer.h b/drivers/gpu/drm/vkms/vkms_composer.h new file mode 100644 index 000000000000..04dd5646f672 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_composer.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_COMPOSER_H_ +#define _VKMS_COMPOSER_H_ + +#include <kunit/visibility.h> +#include "vkms_drv.h" + +/* + * This enum is related to the positions of the variables inside + * `struct drm_color_lut`, so the order of both needs to be the same. + */ +enum lut_channel { + LUT_RED = 0, + LUT_GREEN, + LUT_BLUE, + LUT_RESERVED +}; + +#if IS_ENABLED(CONFIG_KUNIT) +u16 lerp_u16(u16 a, u16 b, s64 t); +s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value); +u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, + enum lut_channel channel); +void apply_3x4_matrix(struct pixel_argb_s32 *pixel, const struct drm_color_ctm_3x4 *matrix); +#endif + +#endif /* _VKMS_COMPOSER_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index f8394a063ecf..8788df9edb7c 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -33,7 +33,8 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_create); struct vkms_config *vkms_config_default_create(bool enable_cursor, bool enable_writeback, - bool enable_overlay) + bool enable_overlay, + bool enable_plane_pipeline) { struct vkms_config *config; struct vkms_config_plane *plane_cfg; @@ -58,6 +59,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) goto err_alloc; + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); if (enable_overlay) { for (n = 0; n < NUM_OVERLAY_PLANES; n++) { @@ -67,6 +69,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) goto err_alloc; @@ -79,6 +82,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, goto err_alloc; vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) goto err_alloc; @@ -389,6 +393,7 @@ struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config) return ERR_PTR(-ENOMEM); plane_cfg->config = config; + plane_cfg->default_pipeline = false; vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index 4c8d668e7ef8..8f7f286a4bdd 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -49,6 +49,7 @@ struct vkms_config_plane { enum drm_plane_type type; struct xarray possible_crtcs; + bool default_pipeline; /* Internal usage */ struct vkms_plane *plane; @@ -203,7 +204,8 @@ struct vkms_config *vkms_config_create(const char *dev_name); */ struct vkms_config *vkms_config_default_create(bool enable_cursor, bool enable_writeback, - bool enable_overlay); + bool enable_overlay, + bool enable_plane_pipeline); /** * vkms_config_destroy() - Free a VKMS configuration @@ -289,6 +291,30 @@ vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg, } /** + * vkms_config_plane_get_default_pipeline() - Return if the plane will + * be created with the default pipeline + * @plane_cfg: Plane to get the information from + */ +static inline bool +vkms_config_plane_get_default_pipeline(struct vkms_config_plane *plane_cfg) +{ + return plane_cfg->default_pipeline; +} + +/** + * vkms_config_plane_set_default_pipeline() - Set if the plane will + * be created with the default pipeline + * @plane_cfg: Plane to configure the pipeline + * @default_pipeline: New default pipeline value + */ +static inline void +vkms_config_plane_set_default_pipeline(struct vkms_config_plane *plane_cfg, + bool default_pipeline) +{ + plane_cfg->default_pipeline = default_pipeline; +} + +/** * vkms_config_plane_attach_crtc - Attach a plane to a CRTC * @plane_cfg: Plane to attach * @crtc_cfg: CRTC to attach @plane_cfg to diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 1f80b1f126b6..dd1402f43773 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -51,6 +51,10 @@ static bool enable_overlay; module_param_named(enable_overlay, enable_overlay, bool, 0444); MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support"); +static bool enable_plane_pipeline; +module_param_named(enable_plane_pipeline, enable_plane_pipeline, bool, 0444); +MODULE_PARM_DESC(enable_plane_pipeline, "Enable/Disable plane pipeline support"); + static bool create_default_dev = true; module_param_named(create_default_dev, create_default_dev, bool, 0444); MODULE_PARM_DESC(create_default_dev, "Create or not the default VKMS device"); @@ -227,7 +231,8 @@ static int __init vkms_init(void) if (!create_default_dev) return 0; - config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay); + config = vkms_config_default_create(enable_cursor, enable_writeback, + enable_overlay, enable_plane_pipeline); if (IS_ERR(config)) return PTR_ERR(config); @@ -253,6 +258,7 @@ void vkms_destroy(struct vkms_config *config) fdev = config->dev->faux_dev; + drm_colorop_pipeline_destroy(&config->dev->drm); drm_dev_unregister(&config->dev->drm); drm_atomic_helper_shutdown(&config->dev->drm); devres_release_group(&fdev->dev, NULL); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index db260df1d4f6..0933e4ce0ff0 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -45,6 +45,10 @@ struct vkms_frame_info { unsigned int rotation; }; +struct pixel_argb_s32 { + s32 a, r, g, b; +}; + /** * struct pixel_argb_u16 - Internal representation of a pixel color. * @a: Alpha component value, stored in 16 bits, without padding, using @@ -225,6 +229,7 @@ struct vkms_output { }; struct vkms_config; +struct vkms_config_plane; /** * struct vkms_device - Description of a VKMS device @@ -298,10 +303,10 @@ int vkms_output_init(struct vkms_device *vkmsdev); * vkms_plane_init() - Initialize a plane * * @vkmsdev: VKMS device containing the plane - * @type: type of plane to initialize + * @plane_cfg: plane configuration */ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, - enum drm_plane_type type); + struct vkms_config_plane *plane_cfg); /* CRC Support */ const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, @@ -318,4 +323,7 @@ void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer /* Writeback */ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev, struct vkms_output *vkms_out); +/* Colorops */ +int vkms_initialize_colorops(struct drm_plane *plane); + #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_luts.c b/drivers/gpu/drm/vkms/vkms_luts.c new file mode 100644 index 000000000000..82cb792f10d8 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_luts.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <drm/drm_mode.h> + +#include "vkms_drv.h" +#include "vkms_luts.h" + +/* + * These luts were generated with a LUT generated based on + * skia's transfer function code. The LUT generator can be + * found at + * https://gitlab.freedesktop.org/hwentland/lutgen + */ + +static struct drm_color_lut linear_array[LUT_SIZE] = { + { 0x0, 0x0, 0x0, 0 }, + { 0x101, 0x101, 0x101, 0 }, + { 0x202, 0x202, 0x202, 0 }, + { 0x303, 0x303, 0x303, 0 }, + { 0x404, 0x404, 0x404, 0 }, + { 0x505, 0x505, 0x505, 0 }, + { 0x606, 0x606, 0x606, 0 }, + { 0x707, 0x707, 0x707, 0 }, + { 0x808, 0x808, 0x808, 0 }, + { 0x909, 0x909, 0x909, 0 }, + { 0xa0a, 0xa0a, 0xa0a, 0 }, + { 0xb0b, 0xb0b, 0xb0b, 0 }, + { 0xc0c, 0xc0c, 0xc0c, 0 }, + { 0xd0d, 0xd0d, 0xd0d, 0 }, + { 0xe0e, 0xe0e, 0xe0e, 0 }, + { 0xf0f, 0xf0f, 0xf0f, 0 }, + { 0x1010, 0x1010, 0x1010, 0 }, + { 0x1111, 0x1111, 0x1111, 0 }, + { 0x1212, 0x1212, 0x1212, 0 }, + { 0x1313, 0x1313, 0x1313, 0 }, + { 0x1414, 0x1414, 0x1414, 0 }, + { 0x1515, 0x1515, 0x1515, 0 }, + { 0x1616, 0x1616, 0x1616, 0 }, + { 0x1717, 0x1717, 0x1717, 0 }, + { 0x1818, 0x1818, 0x1818, 0 }, + { 0x1919, 0x1919, 0x1919, 0 }, + { 0x1a1a, 0x1a1a, 0x1a1a, 0 }, + { 0x1b1b, 0x1b1b, 0x1b1b, 0 }, + { 0x1c1c, 0x1c1c, 0x1c1c, 0 }, + { 0x1d1d, 0x1d1d, 0x1d1d, 0 }, + { 0x1e1e, 0x1e1e, 0x1e1e, 0 }, + { 0x1f1f, 0x1f1f, 0x1f1f, 0 }, + { 0x2020, 0x2020, 0x2020, 0 }, + { 0x2121, 0x2121, 0x2121, 0 }, + { 0x2222, 0x2222, 0x2222, 0 }, + { 0x2323, 0x2323, 0x2323, 0 }, + { 0x2424, 0x2424, 0x2424, 0 }, + { 0x2525, 0x2525, 0x2525, 0 }, + { 0x2626, 0x2626, 0x2626, 0 }, + { 0x2727, 0x2727, 0x2727, 0 }, + { 0x2828, 0x2828, 0x2828, 0 }, + { 0x2929, 0x2929, 0x2929, 0 }, + { 0x2a2a, 0x2a2a, 0x2a2a, 0 }, + { 0x2b2b, 0x2b2b, 0x2b2b, 0 }, + { 0x2c2c, 0x2c2c, 0x2c2c, 0 }, + { 0x2d2d, 0x2d2d, 0x2d2d, 0 }, + { 0x2e2e, 0x2e2e, 0x2e2e, 0 }, + { 0x2f2f, 0x2f2f, 0x2f2f, 0 }, + { 0x3030, 0x3030, 0x3030, 0 }, + { 0x3131, 0x3131, 0x3131, 0 }, + { 0x3232, 0x3232, 0x3232, 0 }, + { 0x3333, 0x3333, 0x3333, 0 }, + { 0x3434, 0x3434, 0x3434, 0 }, + { 0x3535, 0x3535, 0x3535, 0 }, + { 0x3636, 0x3636, 0x3636, 0 }, + { 0x3737, 0x3737, 0x3737, 0 }, + { 0x3838, 0x3838, 0x3838, 0 }, + { 0x3939, 0x3939, 0x3939, 0 }, + { 0x3a3a, 0x3a3a, 0x3a3a, 0 }, + { 0x3b3b, 0x3b3b, 0x3b3b, 0 }, + { 0x3c3c, 0x3c3c, 0x3c3c, 0 }, + { 0x3d3d, 0x3d3d, 0x3d3d, 0 }, + { 0x3e3e, 0x3e3e, 0x3e3e, 0 }, + { 0x3f3f, 0x3f3f, 0x3f3f, 0 }, + { 0x4040, 0x4040, 0x4040, 0 }, + { 0x4141, 0x4141, 0x4141, 0 }, + { 0x4242, 0x4242, 0x4242, 0 }, + { 0x4343, 0x4343, 0x4343, 0 }, + { 0x4444, 0x4444, 0x4444, 0 }, + { 0x4545, 0x4545, 0x4545, 0 }, + { 0x4646, 0x4646, 0x4646, 0 }, + { 0x4747, 0x4747, 0x4747, 0 }, + { 0x4848, 0x4848, 0x4848, 0 }, + { 0x4949, 0x4949, 0x4949, 0 }, + { 0x4a4a, 0x4a4a, 0x4a4a, 0 }, + { 0x4b4b, 0x4b4b, 0x4b4b, 0 }, + { 0x4c4c, 0x4c4c, 0x4c4c, 0 }, + { 0x4d4d, 0x4d4d, 0x4d4d, 0 }, + { 0x4e4e, 0x4e4e, 0x4e4e, 0 }, + { 0x4f4f, 0x4f4f, 0x4f4f, 0 }, + { 0x5050, 0x5050, 0x5050, 0 }, + { 0x5151, 0x5151, 0x5151, 0 }, + { 0x5252, 0x5252, 0x5252, 0 }, + { 0x5353, 0x5353, 0x5353, 0 }, + { 0x5454, 0x5454, 0x5454, 0 }, + { 0x5555, 0x5555, 0x5555, 0 }, + { 0x5656, 0x5656, 0x5656, 0 }, + { 0x5757, 0x5757, 0x5757, 0 }, + { 0x5858, 0x5858, 0x5858, 0 }, + { 0x5959, 0x5959, 0x5959, 0 }, + { 0x5a5a, 0x5a5a, 0x5a5a, 0 }, + { 0x5b5b, 0x5b5b, 0x5b5b, 0 }, + { 0x5c5c, 0x5c5c, 0x5c5c, 0 }, + { 0x5d5d, 0x5d5d, 0x5d5d, 0 }, + { 0x5e5e, 0x5e5e, 0x5e5e, 0 }, + { 0x5f5f, 0x5f5f, 0x5f5f, 0 }, + { 0x6060, 0x6060, 0x6060, 0 }, + { 0x6161, 0x6161, 0x6161, 0 }, + { 0x6262, 0x6262, 0x6262, 0 }, + { 0x6363, 0x6363, 0x6363, 0 }, + { 0x6464, 0x6464, 0x6464, 0 }, + { 0x6565, 0x6565, 0x6565, 0 }, + { 0x6666, 0x6666, 0x6666, 0 }, + { 0x6767, 0x6767, 0x6767, 0 }, + { 0x6868, 0x6868, 0x6868, 0 }, + { 0x6969, 0x6969, 0x6969, 0 }, + { 0x6a6a, 0x6a6a, 0x6a6a, 0 }, + { 0x6b6b, 0x6b6b, 0x6b6b, 0 }, + { 0x6c6c, 0x6c6c, 0x6c6c, 0 }, + { 0x6d6d, 0x6d6d, 0x6d6d, 0 }, + { 0x6e6e, 0x6e6e, 0x6e6e, 0 }, + { 0x6f6f, 0x6f6f, 0x6f6f, 0 }, + { 0x7070, 0x7070, 0x7070, 0 }, + { 0x7171, 0x7171, 0x7171, 0 }, + { 0x7272, 0x7272, 0x7272, 0 }, + { 0x7373, 0x7373, 0x7373, 0 }, + { 0x7474, 0x7474, 0x7474, 0 }, + { 0x7575, 0x7575, 0x7575, 0 }, + { 0x7676, 0x7676, 0x7676, 0 }, + { 0x7777, 0x7777, 0x7777, 0 }, + { 0x7878, 0x7878, 0x7878, 0 }, + { 0x7979, 0x7979, 0x7979, 0 }, + { 0x7a7a, 0x7a7a, 0x7a7a, 0 }, + { 0x7b7b, 0x7b7b, 0x7b7b, 0 }, + { 0x7c7c, 0x7c7c, 0x7c7c, 0 }, + { 0x7d7d, 0x7d7d, 0x7d7d, 0 }, + { 0x7e7e, 0x7e7e, 0x7e7e, 0 }, + { 0x7f7f, 0x7f7f, 0x7f7f, 0 }, + { 0x8080, 0x8080, 0x8080, 0 }, + { 0x8181, 0x8181, 0x8181, 0 }, + { 0x8282, 0x8282, 0x8282, 0 }, + { 0x8383, 0x8383, 0x8383, 0 }, + { 0x8484, 0x8484, 0x8484, 0 }, + { 0x8585, 0x8585, 0x8585, 0 }, + { 0x8686, 0x8686, 0x8686, 0 }, + { 0x8787, 0x8787, 0x8787, 0 }, + { 0x8888, 0x8888, 0x8888, 0 }, + { 0x8989, 0x8989, 0x8989, 0 }, + { 0x8a8a, 0x8a8a, 0x8a8a, 0 }, + { 0x8b8b, 0x8b8b, 0x8b8b, 0 }, + { 0x8c8c, 0x8c8c, 0x8c8c, 0 }, + { 0x8d8d, 0x8d8d, 0x8d8d, 0 }, + { 0x8e8e, 0x8e8e, 0x8e8e, 0 }, + { 0x8f8f, 0x8f8f, 0x8f8f, 0 }, + { 0x9090, 0x9090, 0x9090, 0 }, + { 0x9191, 0x9191, 0x9191, 0 }, + { 0x9292, 0x9292, 0x9292, 0 }, + { 0x9393, 0x9393, 0x9393, 0 }, + { 0x9494, 0x9494, 0x9494, 0 }, + { 0x9595, 0x9595, 0x9595, 0 }, + { 0x9696, 0x9696, 0x9696, 0 }, + { 0x9797, 0x9797, 0x9797, 0 }, + { 0x9898, 0x9898, 0x9898, 0 }, + { 0x9999, 0x9999, 0x9999, 0 }, + { 0x9a9a, 0x9a9a, 0x9a9a, 0 }, + { 0x9b9b, 0x9b9b, 0x9b9b, 0 }, + { 0x9c9c, 0x9c9c, 0x9c9c, 0 }, + { 0x9d9d, 0x9d9d, 0x9d9d, 0 }, + { 0x9e9e, 0x9e9e, 0x9e9e, 0 }, + { 0x9f9f, 0x9f9f, 0x9f9f, 0 }, + { 0xa0a0, 0xa0a0, 0xa0a0, 0 }, + { 0xa1a1, 0xa1a1, 0xa1a1, 0 }, + { 0xa2a2, 0xa2a2, 0xa2a2, 0 }, + { 0xa3a3, 0xa3a3, 0xa3a3, 0 }, + { 0xa4a4, 0xa4a4, 0xa4a4, 0 }, + { 0xa5a5, 0xa5a5, 0xa5a5, 0 }, + { 0xa6a6, 0xa6a6, 0xa6a6, 0 }, + { 0xa7a7, 0xa7a7, 0xa7a7, 0 }, + { 0xa8a8, 0xa8a8, 0xa8a8, 0 }, + { 0xa9a9, 0xa9a9, 0xa9a9, 0 }, + { 0xaaaa, 0xaaaa, 0xaaaa, 0 }, + { 0xabab, 0xabab, 0xabab, 0 }, + { 0xacac, 0xacac, 0xacac, 0 }, + { 0xadad, 0xadad, 0xadad, 0 }, + { 0xaeae, 0xaeae, 0xaeae, 0 }, + { 0xafaf, 0xafaf, 0xafaf, 0 }, + { 0xb0b0, 0xb0b0, 0xb0b0, 0 }, + { 0xb1b1, 0xb1b1, 0xb1b1, 0 }, + { 0xb2b2, 0xb2b2, 0xb2b2, 0 }, + { 0xb3b3, 0xb3b3, 0xb3b3, 0 }, + { 0xb4b4, 0xb4b4, 0xb4b4, 0 }, + { 0xb5b5, 0xb5b5, 0xb5b5, 0 }, + { 0xb6b6, 0xb6b6, 0xb6b6, 0 }, + { 0xb7b7, 0xb7b7, 0xb7b7, 0 }, + { 0xb8b8, 0xb8b8, 0xb8b8, 0 }, + { 0xb9b9, 0xb9b9, 0xb9b9, 0 }, + { 0xbaba, 0xbaba, 0xbaba, 0 }, + { 0xbbbb, 0xbbbb, 0xbbbb, 0 }, + { 0xbcbc, 0xbcbc, 0xbcbc, 0 }, + { 0xbdbd, 0xbdbd, 0xbdbd, 0 }, + { 0xbebe, 0xbebe, 0xbebe, 0 }, + { 0xbfbf, 0xbfbf, 0xbfbf, 0 }, + { 0xc0c0, 0xc0c0, 0xc0c0, 0 }, + { 0xc1c1, 0xc1c1, 0xc1c1, 0 }, + { 0xc2c2, 0xc2c2, 0xc2c2, 0 }, + { 0xc3c3, 0xc3c3, 0xc3c3, 0 }, + { 0xc4c4, 0xc4c4, 0xc4c4, 0 }, + { 0xc5c5, 0xc5c5, 0xc5c5, 0 }, + { 0xc6c6, 0xc6c6, 0xc6c6, 0 }, + { 0xc7c7, 0xc7c7, 0xc7c7, 0 }, + { 0xc8c8, 0xc8c8, 0xc8c8, 0 }, + { 0xc9c9, 0xc9c9, 0xc9c9, 0 }, + { 0xcaca, 0xcaca, 0xcaca, 0 }, + { 0xcbcb, 0xcbcb, 0xcbcb, 0 }, + { 0xcccc, 0xcccc, 0xcccc, 0 }, + { 0xcdcd, 0xcdcd, 0xcdcd, 0 }, + { 0xcece, 0xcece, 0xcece, 0 }, + { 0xcfcf, 0xcfcf, 0xcfcf, 0 }, + { 0xd0d0, 0xd0d0, 0xd0d0, 0 }, + { 0xd1d1, 0xd1d1, 0xd1d1, 0 }, + { 0xd2d2, 0xd2d2, 0xd2d2, 0 }, + { 0xd3d3, 0xd3d3, 0xd3d3, 0 }, + { 0xd4d4, 0xd4d4, 0xd4d4, 0 }, + { 0xd5d5, 0xd5d5, 0xd5d5, 0 }, + { 0xd6d6, 0xd6d6, 0xd6d6, 0 }, + { 0xd7d7, 0xd7d7, 0xd7d7, 0 }, + { 0xd8d8, 0xd8d8, 0xd8d8, 0 }, + { 0xd9d9, 0xd9d9, 0xd9d9, 0 }, + { 0xdada, 0xdada, 0xdada, 0 }, + { 0xdbdb, 0xdbdb, 0xdbdb, 0 }, + { 0xdcdc, 0xdcdc, 0xdcdc, 0 }, + { 0xdddd, 0xdddd, 0xdddd, 0 }, + { 0xdede, 0xdede, 0xdede, 0 }, + { 0xdfdf, 0xdfdf, 0xdfdf, 0 }, + { 0xe0e0, 0xe0e0, 0xe0e0, 0 }, + { 0xe1e1, 0xe1e1, 0xe1e1, 0 }, + { 0xe2e2, 0xe2e2, 0xe2e2, 0 }, + { 0xe3e3, 0xe3e3, 0xe3e3, 0 }, + { 0xe4e4, 0xe4e4, 0xe4e4, 0 }, + { 0xe5e5, 0xe5e5, 0xe5e5, 0 }, + { 0xe6e6, 0xe6e6, 0xe6e6, 0 }, + { 0xe7e7, 0xe7e7, 0xe7e7, 0 }, + { 0xe8e8, 0xe8e8, 0xe8e8, 0 }, + { 0xe9e9, 0xe9e9, 0xe9e9, 0 }, + { 0xeaea, 0xeaea, 0xeaea, 0 }, + { 0xebeb, 0xebeb, 0xebeb, 0 }, + { 0xecec, 0xecec, 0xecec, 0 }, + { 0xeded, 0xeded, 0xeded, 0 }, + { 0xeeee, 0xeeee, 0xeeee, 0 }, + { 0xefef, 0xefef, 0xefef, 0 }, + { 0xf0f0, 0xf0f0, 0xf0f0, 0 }, + { 0xf1f1, 0xf1f1, 0xf1f1, 0 }, + { 0xf2f2, 0xf2f2, 0xf2f2, 0 }, + { 0xf3f3, 0xf3f3, 0xf3f3, 0 }, + { 0xf4f4, 0xf4f4, 0xf4f4, 0 }, + { 0xf5f5, 0xf5f5, 0xf5f5, 0 }, + { 0xf6f6, 0xf6f6, 0xf6f6, 0 }, + { 0xf7f7, 0xf7f7, 0xf7f7, 0 }, + { 0xf8f8, 0xf8f8, 0xf8f8, 0 }, + { 0xf9f9, 0xf9f9, 0xf9f9, 0 }, + { 0xfafa, 0xfafa, 0xfafa, 0 }, + { 0xfbfb, 0xfbfb, 0xfbfb, 0 }, + { 0xfcfc, 0xfcfc, 0xfcfc, 0 }, + { 0xfdfd, 0xfdfd, 0xfdfd, 0 }, + { 0xfefe, 0xfefe, 0xfefe, 0 }, + { 0xffff, 0xffff, 0xffff, 0 }, +}; + +const struct vkms_color_lut linear_eotf = { + .base = linear_array, + .lut_length = LUT_SIZE, + .channel_value2index_ratio = 0xff00ffll +}; +EXPORT_SYMBOL(linear_eotf); + +static struct drm_color_lut srgb_array[LUT_SIZE] = { + { 0x0, 0x0, 0x0, 0 }, + { 0x13, 0x13, 0x13, 0 }, + { 0x27, 0x27, 0x27, 0 }, + { 0x3b, 0x3b, 0x3b, 0 }, + { 0x4f, 0x4f, 0x4f, 0 }, + { 0x63, 0x63, 0x63, 0 }, + { 0x77, 0x77, 0x77, 0 }, + { 0x8b, 0x8b, 0x8b, 0 }, + { 0x9f, 0x9f, 0x9f, 0 }, + { 0xb3, 0xb3, 0xb3, 0 }, + { 0xc6, 0xc6, 0xc6, 0 }, + { 0xdb, 0xdb, 0xdb, 0 }, + { 0xf0, 0xf0, 0xf0, 0 }, + { 0x107, 0x107, 0x107, 0 }, + { 0x11f, 0x11f, 0x11f, 0 }, + { 0x139, 0x139, 0x139, 0 }, + { 0x153, 0x153, 0x153, 0 }, + { 0x16f, 0x16f, 0x16f, 0 }, + { 0x18c, 0x18c, 0x18c, 0 }, + { 0x1aa, 0x1aa, 0x1aa, 0 }, + { 0x1ca, 0x1ca, 0x1ca, 0 }, + { 0x1eb, 0x1eb, 0x1eb, 0 }, + { 0x20d, 0x20d, 0x20d, 0 }, + { 0x231, 0x231, 0x231, 0 }, + { 0x256, 0x256, 0x256, 0 }, + { 0x27d, 0x27d, 0x27d, 0 }, + { 0x2a4, 0x2a4, 0x2a4, 0 }, + { 0x2ce, 0x2ce, 0x2ce, 0 }, + { 0x2f9, 0x2f9, 0x2f9, 0 }, + { 0x325, 0x325, 0x325, 0 }, + { 0x352, 0x352, 0x352, 0 }, + { 0x381, 0x381, 0x381, 0 }, + { 0x3b2, 0x3b2, 0x3b2, 0 }, + { 0x3e4, 0x3e4, 0x3e4, 0 }, + { 0x418, 0x418, 0x418, 0 }, + { 0x44d, 0x44d, 0x44d, 0 }, + { 0x484, 0x484, 0x484, 0 }, + { 0x4bc, 0x4bc, 0x4bc, 0 }, + { 0x4f6, 0x4f6, 0x4f6, 0 }, + { 0x531, 0x531, 0x531, 0 }, + { 0x56e, 0x56e, 0x56e, 0 }, + { 0x5ad, 0x5ad, 0x5ad, 0 }, + { 0x5ed, 0x5ed, 0x5ed, 0 }, + { 0x62f, 0x62f, 0x62f, 0 }, + { 0x672, 0x672, 0x672, 0 }, + { 0x6b7, 0x6b7, 0x6b7, 0 }, + { 0x6fe, 0x6fe, 0x6fe, 0 }, + { 0x746, 0x746, 0x746, 0 }, + { 0x791, 0x791, 0x791, 0 }, + { 0x7dc, 0x7dc, 0x7dc, 0 }, + { 0x82a, 0x82a, 0x82a, 0 }, + { 0x879, 0x879, 0x879, 0 }, + { 0x8ca, 0x8ca, 0x8ca, 0 }, + { 0x91d, 0x91d, 0x91d, 0 }, + { 0x971, 0x971, 0x971, 0 }, + { 0x9c7, 0x9c7, 0x9c7, 0 }, + { 0xa1f, 0xa1f, 0xa1f, 0 }, + { 0xa79, 0xa79, 0xa79, 0 }, + { 0xad4, 0xad4, 0xad4, 0 }, + { 0xb32, 0xb32, 0xb32, 0 }, + { 0xb91, 0xb91, 0xb91, 0 }, + { 0xbf2, 0xbf2, 0xbf2, 0 }, + { 0xc54, 0xc54, 0xc54, 0 }, + { 0xcb9, 0xcb9, 0xcb9, 0 }, + { 0xd1f, 0xd1f, 0xd1f, 0 }, + { 0xd88, 0xd88, 0xd88, 0 }, + { 0xdf2, 0xdf2, 0xdf2, 0 }, + { 0xe5e, 0xe5e, 0xe5e, 0 }, + { 0xecc, 0xecc, 0xecc, 0 }, + { 0xf3c, 0xf3c, 0xf3c, 0 }, + { 0xfad, 0xfad, 0xfad, 0 }, + { 0x1021, 0x1021, 0x1021, 0 }, + { 0x1096, 0x1096, 0x1096, 0 }, + { 0x110e, 0x110e, 0x110e, 0 }, + { 0x1187, 0x1187, 0x1187, 0 }, + { 0x1203, 0x1203, 0x1203, 0 }, + { 0x1280, 0x1280, 0x1280, 0 }, + { 0x12ff, 0x12ff, 0x12ff, 0 }, + { 0x1380, 0x1380, 0x1380, 0 }, + { 0x1404, 0x1404, 0x1404, 0 }, + { 0x1489, 0x1489, 0x1489, 0 }, + { 0x1510, 0x1510, 0x1510, 0 }, + { 0x1599, 0x1599, 0x1599, 0 }, + { 0x1624, 0x1624, 0x1624, 0 }, + { 0x16b2, 0x16b2, 0x16b2, 0 }, + { 0x1741, 0x1741, 0x1741, 0 }, + { 0x17d2, 0x17d2, 0x17d2, 0 }, + { 0x1865, 0x1865, 0x1865, 0 }, + { 0x18fb, 0x18fb, 0x18fb, 0 }, + { 0x1992, 0x1992, 0x1992, 0 }, + { 0x1a2c, 0x1a2c, 0x1a2c, 0 }, + { 0x1ac8, 0x1ac8, 0x1ac8, 0 }, + { 0x1b65, 0x1b65, 0x1b65, 0 }, + { 0x1c05, 0x1c05, 0x1c05, 0 }, + { 0x1ca7, 0x1ca7, 0x1ca7, 0 }, + { 0x1d4b, 0x1d4b, 0x1d4b, 0 }, + { 0x1df1, 0x1df1, 0x1df1, 0 }, + { 0x1e99, 0x1e99, 0x1e99, 0 }, + { 0x1f44, 0x1f44, 0x1f44, 0 }, + { 0x1ff0, 0x1ff0, 0x1ff0, 0 }, + { 0x209f, 0x209f, 0x209f, 0 }, + { 0x2150, 0x2150, 0x2150, 0 }, + { 0x2203, 0x2203, 0x2203, 0 }, + { 0x22b8, 0x22b8, 0x22b8, 0 }, + { 0x2370, 0x2370, 0x2370, 0 }, + { 0x2429, 0x2429, 0x2429, 0 }, + { 0x24e5, 0x24e5, 0x24e5, 0 }, + { 0x25a3, 0x25a3, 0x25a3, 0 }, + { 0x2663, 0x2663, 0x2663, 0 }, + { 0x2726, 0x2726, 0x2726, 0 }, + { 0x27ea, 0x27ea, 0x27ea, 0 }, + { 0x28b1, 0x28b1, 0x28b1, 0 }, + { 0x297a, 0x297a, 0x297a, 0 }, + { 0x2a45, 0x2a45, 0x2a45, 0 }, + { 0x2b13, 0x2b13, 0x2b13, 0 }, + { 0x2be3, 0x2be3, 0x2be3, 0 }, + { 0x2cb5, 0x2cb5, 0x2cb5, 0 }, + { 0x2d89, 0x2d89, 0x2d89, 0 }, + { 0x2e60, 0x2e60, 0x2e60, 0 }, + { 0x2f39, 0x2f39, 0x2f39, 0 }, + { 0x3014, 0x3014, 0x3014, 0 }, + { 0x30f2, 0x30f2, 0x30f2, 0 }, + { 0x31d2, 0x31d2, 0x31d2, 0 }, + { 0x32b4, 0x32b4, 0x32b4, 0 }, + { 0x3398, 0x3398, 0x3398, 0 }, + { 0x347f, 0x347f, 0x347f, 0 }, + { 0x3569, 0x3569, 0x3569, 0 }, + { 0x3654, 0x3654, 0x3654, 0 }, + { 0x3742, 0x3742, 0x3742, 0 }, + { 0x3832, 0x3832, 0x3832, 0 }, + { 0x3925, 0x3925, 0x3925, 0 }, + { 0x3a1a, 0x3a1a, 0x3a1a, 0 }, + { 0x3b11, 0x3b11, 0x3b11, 0 }, + { 0x3c0b, 0x3c0b, 0x3c0b, 0 }, + { 0x3d07, 0x3d07, 0x3d07, 0 }, + { 0x3e05, 0x3e05, 0x3e05, 0 }, + { 0x3f06, 0x3f06, 0x3f06, 0 }, + { 0x400a, 0x400a, 0x400a, 0 }, + { 0x410f, 0x410f, 0x410f, 0 }, + { 0x4218, 0x4218, 0x4218, 0 }, + { 0x4322, 0x4322, 0x4322, 0 }, + { 0x442f, 0x442f, 0x442f, 0 }, + { 0x453f, 0x453f, 0x453f, 0 }, + { 0x4650, 0x4650, 0x4650, 0 }, + { 0x4765, 0x4765, 0x4765, 0 }, + { 0x487c, 0x487c, 0x487c, 0 }, + { 0x4995, 0x4995, 0x4995, 0 }, + { 0x4ab1, 0x4ab1, 0x4ab1, 0 }, + { 0x4bcf, 0x4bcf, 0x4bcf, 0 }, + { 0x4cf0, 0x4cf0, 0x4cf0, 0 }, + { 0x4e13, 0x4e13, 0x4e13, 0 }, + { 0x4f39, 0x4f39, 0x4f39, 0 }, + { 0x5061, 0x5061, 0x5061, 0 }, + { 0x518b, 0x518b, 0x518b, 0 }, + { 0x52b9, 0x52b9, 0x52b9, 0 }, + { 0x53e8, 0x53e8, 0x53e8, 0 }, + { 0x551b, 0x551b, 0x551b, 0 }, + { 0x5650, 0x5650, 0x5650, 0 }, + { 0x5787, 0x5787, 0x5787, 0 }, + { 0x58c1, 0x58c1, 0x58c1, 0 }, + { 0x59fd, 0x59fd, 0x59fd, 0 }, + { 0x5b3c, 0x5b3c, 0x5b3c, 0 }, + { 0x5c7e, 0x5c7e, 0x5c7e, 0 }, + { 0x5dc2, 0x5dc2, 0x5dc2, 0 }, + { 0x5f09, 0x5f09, 0x5f09, 0 }, + { 0x6052, 0x6052, 0x6052, 0 }, + { 0x619e, 0x619e, 0x619e, 0 }, + { 0x62ec, 0x62ec, 0x62ec, 0 }, + { 0x643d, 0x643d, 0x643d, 0 }, + { 0x6591, 0x6591, 0x6591, 0 }, + { 0x66e7, 0x66e7, 0x66e7, 0 }, + { 0x6840, 0x6840, 0x6840, 0 }, + { 0x699b, 0x699b, 0x699b, 0 }, + { 0x6afa, 0x6afa, 0x6afa, 0 }, + { 0x6c5a, 0x6c5a, 0x6c5a, 0 }, + { 0x6dbe, 0x6dbe, 0x6dbe, 0 }, + { 0x6f24, 0x6f24, 0x6f24, 0 }, + { 0x708c, 0x708c, 0x708c, 0 }, + { 0x71f8, 0x71f8, 0x71f8, 0 }, + { 0x7366, 0x7366, 0x7366, 0 }, + { 0x74d6, 0x74d6, 0x74d6, 0 }, + { 0x764a, 0x764a, 0x764a, 0 }, + { 0x77c0, 0x77c0, 0x77c0, 0 }, + { 0x7938, 0x7938, 0x7938, 0 }, + { 0x7ab4, 0x7ab4, 0x7ab4, 0 }, + { 0x7c32, 0x7c32, 0x7c32, 0 }, + { 0x7db3, 0x7db3, 0x7db3, 0 }, + { 0x7f36, 0x7f36, 0x7f36, 0 }, + { 0x80bc, 0x80bc, 0x80bc, 0 }, + { 0x8245, 0x8245, 0x8245, 0 }, + { 0x83d1, 0x83d1, 0x83d1, 0 }, + { 0x855f, 0x855f, 0x855f, 0 }, + { 0x86f0, 0x86f0, 0x86f0, 0 }, + { 0x8884, 0x8884, 0x8884, 0 }, + { 0x8a1a, 0x8a1a, 0x8a1a, 0 }, + { 0x8bb4, 0x8bb4, 0x8bb4, 0 }, + { 0x8d50, 0x8d50, 0x8d50, 0 }, + { 0x8eee, 0x8eee, 0x8eee, 0 }, + { 0x9090, 0x9090, 0x9090, 0 }, + { 0x9234, 0x9234, 0x9234, 0 }, + { 0x93db, 0x93db, 0x93db, 0 }, + { 0x9585, 0x9585, 0x9585, 0 }, + { 0x9732, 0x9732, 0x9732, 0 }, + { 0x98e1, 0x98e1, 0x98e1, 0 }, + { 0x9a93, 0x9a93, 0x9a93, 0 }, + { 0x9c48, 0x9c48, 0x9c48, 0 }, + { 0x9e00, 0x9e00, 0x9e00, 0 }, + { 0x9fbb, 0x9fbb, 0x9fbb, 0 }, + { 0xa178, 0xa178, 0xa178, 0 }, + { 0xa338, 0xa338, 0xa338, 0 }, + { 0xa4fb, 0xa4fb, 0xa4fb, 0 }, + { 0xa6c1, 0xa6c1, 0xa6c1, 0 }, + { 0xa88a, 0xa88a, 0xa88a, 0 }, + { 0xaa56, 0xaa56, 0xaa56, 0 }, + { 0xac24, 0xac24, 0xac24, 0 }, + { 0xadf5, 0xadf5, 0xadf5, 0 }, + { 0xafc9, 0xafc9, 0xafc9, 0 }, + { 0xb1a0, 0xb1a0, 0xb1a0, 0 }, + { 0xb37a, 0xb37a, 0xb37a, 0 }, + { 0xb557, 0xb557, 0xb557, 0 }, + { 0xb736, 0xb736, 0xb736, 0 }, + { 0xb919, 0xb919, 0xb919, 0 }, + { 0xbafe, 0xbafe, 0xbafe, 0 }, + { 0xbce6, 0xbce6, 0xbce6, 0 }, + { 0xbed2, 0xbed2, 0xbed2, 0 }, + { 0xc0c0, 0xc0c0, 0xc0c0, 0 }, + { 0xc2b0, 0xc2b0, 0xc2b0, 0 }, + { 0xc4a4, 0xc4a4, 0xc4a4, 0 }, + { 0xc69b, 0xc69b, 0xc69b, 0 }, + { 0xc895, 0xc895, 0xc895, 0 }, + { 0xca91, 0xca91, 0xca91, 0 }, + { 0xcc91, 0xcc91, 0xcc91, 0 }, + { 0xce93, 0xce93, 0xce93, 0 }, + { 0xd098, 0xd098, 0xd098, 0 }, + { 0xd2a1, 0xd2a1, 0xd2a1, 0 }, + { 0xd4ac, 0xd4ac, 0xd4ac, 0 }, + { 0xd6ba, 0xd6ba, 0xd6ba, 0 }, + { 0xd8cb, 0xd8cb, 0xd8cb, 0 }, + { 0xdadf, 0xdadf, 0xdadf, 0 }, + { 0xdcf7, 0xdcf7, 0xdcf7, 0 }, + { 0xdf11, 0xdf11, 0xdf11, 0 }, + { 0xe12e, 0xe12e, 0xe12e, 0 }, + { 0xe34e, 0xe34e, 0xe34e, 0 }, + { 0xe571, 0xe571, 0xe571, 0 }, + { 0xe796, 0xe796, 0xe796, 0 }, + { 0xe9bf, 0xe9bf, 0xe9bf, 0 }, + { 0xebeb, 0xebeb, 0xebeb, 0 }, + { 0xee1a, 0xee1a, 0xee1a, 0 }, + { 0xf04c, 0xf04c, 0xf04c, 0 }, + { 0xf281, 0xf281, 0xf281, 0 }, + { 0xf4b9, 0xf4b9, 0xf4b9, 0 }, + { 0xf6f4, 0xf6f4, 0xf6f4, 0 }, + { 0xf932, 0xf932, 0xf932, 0 }, + { 0xfb73, 0xfb73, 0xfb73, 0 }, + { 0xfdb7, 0xfdb7, 0xfdb7, 0 }, + { 0xffff, 0xffff, 0xffff, 0 }, +}; + +const struct vkms_color_lut srgb_eotf = { + .base = srgb_array, + .lut_length = LUT_SIZE, + .channel_value2index_ratio = 0xff00ffll +}; +EXPORT_SYMBOL(srgb_eotf); + +static struct drm_color_lut srgb_inv_array[LUT_SIZE] = { + { 0x0, 0x0, 0x0, 0 }, + { 0xcc2, 0xcc2, 0xcc2, 0 }, + { 0x15be, 0x15be, 0x15be, 0 }, + { 0x1c56, 0x1c56, 0x1c56, 0 }, + { 0x21bd, 0x21bd, 0x21bd, 0 }, + { 0x2666, 0x2666, 0x2666, 0 }, + { 0x2a8a, 0x2a8a, 0x2a8a, 0 }, + { 0x2e4c, 0x2e4c, 0x2e4c, 0 }, + { 0x31c0, 0x31c0, 0x31c0, 0 }, + { 0x34f6, 0x34f6, 0x34f6, 0 }, + { 0x37f9, 0x37f9, 0x37f9, 0 }, + { 0x3acf, 0x3acf, 0x3acf, 0 }, + { 0x3d80, 0x3d80, 0x3d80, 0 }, + { 0x4010, 0x4010, 0x4010, 0 }, + { 0x4284, 0x4284, 0x4284, 0 }, + { 0x44dd, 0x44dd, 0x44dd, 0 }, + { 0x4720, 0x4720, 0x4720, 0 }, + { 0x494e, 0x494e, 0x494e, 0 }, + { 0x4b69, 0x4b69, 0x4b69, 0 }, + { 0x4d73, 0x4d73, 0x4d73, 0 }, + { 0x4f6e, 0x4f6e, 0x4f6e, 0 }, + { 0x5159, 0x5159, 0x5159, 0 }, + { 0x5337, 0x5337, 0x5337, 0 }, + { 0x5509, 0x5509, 0x5509, 0 }, + { 0x56cf, 0x56cf, 0x56cf, 0 }, + { 0x588a, 0x588a, 0x588a, 0 }, + { 0x5a3b, 0x5a3b, 0x5a3b, 0 }, + { 0x5be2, 0x5be2, 0x5be2, 0 }, + { 0x5d80, 0x5d80, 0x5d80, 0 }, + { 0x5f16, 0x5f16, 0x5f16, 0 }, + { 0x60a4, 0x60a4, 0x60a4, 0 }, + { 0x6229, 0x6229, 0x6229, 0 }, + { 0x63a8, 0x63a8, 0x63a8, 0 }, + { 0x6520, 0x6520, 0x6520, 0 }, + { 0x6691, 0x6691, 0x6691, 0 }, + { 0x67fc, 0x67fc, 0x67fc, 0 }, + { 0x6961, 0x6961, 0x6961, 0 }, + { 0x6ac0, 0x6ac0, 0x6ac0, 0 }, + { 0x6c19, 0x6c19, 0x6c19, 0 }, + { 0x6d6e, 0x6d6e, 0x6d6e, 0 }, + { 0x6ebd, 0x6ebd, 0x6ebd, 0 }, + { 0x7008, 0x7008, 0x7008, 0 }, + { 0x714d, 0x714d, 0x714d, 0 }, + { 0x728f, 0x728f, 0x728f, 0 }, + { 0x73cc, 0x73cc, 0x73cc, 0 }, + { 0x7504, 0x7504, 0x7504, 0 }, + { 0x7639, 0x7639, 0x7639, 0 }, + { 0x776a, 0x776a, 0x776a, 0 }, + { 0x7897, 0x7897, 0x7897, 0 }, + { 0x79c1, 0x79c1, 0x79c1, 0 }, + { 0x7ae7, 0x7ae7, 0x7ae7, 0 }, + { 0x7c09, 0x7c09, 0x7c09, 0 }, + { 0x7d28, 0x7d28, 0x7d28, 0 }, + { 0x7e44, 0x7e44, 0x7e44, 0 }, + { 0x7f5d, 0x7f5d, 0x7f5d, 0 }, + { 0x8073, 0x8073, 0x8073, 0 }, + { 0x8186, 0x8186, 0x8186, 0 }, + { 0x8296, 0x8296, 0x8296, 0 }, + { 0x83a4, 0x83a4, 0x83a4, 0 }, + { 0x84ae, 0x84ae, 0x84ae, 0 }, + { 0x85b6, 0x85b6, 0x85b6, 0 }, + { 0x86bc, 0x86bc, 0x86bc, 0 }, + { 0x87bf, 0x87bf, 0x87bf, 0 }, + { 0x88bf, 0x88bf, 0x88bf, 0 }, + { 0x89be, 0x89be, 0x89be, 0 }, + { 0x8ab9, 0x8ab9, 0x8ab9, 0 }, + { 0x8bb3, 0x8bb3, 0x8bb3, 0 }, + { 0x8cab, 0x8cab, 0x8cab, 0 }, + { 0x8da0, 0x8da0, 0x8da0, 0 }, + { 0x8e93, 0x8e93, 0x8e93, 0 }, + { 0x8f84, 0x8f84, 0x8f84, 0 }, + { 0x9073, 0x9073, 0x9073, 0 }, + { 0x9161, 0x9161, 0x9161, 0 }, + { 0x924c, 0x924c, 0x924c, 0 }, + { 0x9335, 0x9335, 0x9335, 0 }, + { 0x941d, 0x941d, 0x941d, 0 }, + { 0x9503, 0x9503, 0x9503, 0 }, + { 0x95e7, 0x95e7, 0x95e7, 0 }, + { 0x96c9, 0x96c9, 0x96c9, 0 }, + { 0x97aa, 0x97aa, 0x97aa, 0 }, + { 0x9889, 0x9889, 0x9889, 0 }, + { 0x9966, 0x9966, 0x9966, 0 }, + { 0x9a42, 0x9a42, 0x9a42, 0 }, + { 0x9b1c, 0x9b1c, 0x9b1c, 0 }, + { 0x9bf5, 0x9bf5, 0x9bf5, 0 }, + { 0x9ccc, 0x9ccc, 0x9ccc, 0 }, + { 0x9da1, 0x9da1, 0x9da1, 0 }, + { 0x9e76, 0x9e76, 0x9e76, 0 }, + { 0x9f49, 0x9f49, 0x9f49, 0 }, + { 0xa01a, 0xa01a, 0xa01a, 0 }, + { 0xa0ea, 0xa0ea, 0xa0ea, 0 }, + { 0xa1b9, 0xa1b9, 0xa1b9, 0 }, + { 0xa286, 0xa286, 0xa286, 0 }, + { 0xa352, 0xa352, 0xa352, 0 }, + { 0xa41d, 0xa41d, 0xa41d, 0 }, + { 0xa4e7, 0xa4e7, 0xa4e7, 0 }, + { 0xa5af, 0xa5af, 0xa5af, 0 }, + { 0xa676, 0xa676, 0xa676, 0 }, + { 0xa73c, 0xa73c, 0xa73c, 0 }, + { 0xa801, 0xa801, 0xa801, 0 }, + { 0xa8c5, 0xa8c5, 0xa8c5, 0 }, + { 0xa987, 0xa987, 0xa987, 0 }, + { 0xaa48, 0xaa48, 0xaa48, 0 }, + { 0xab09, 0xab09, 0xab09, 0 }, + { 0xabc8, 0xabc8, 0xabc8, 0 }, + { 0xac86, 0xac86, 0xac86, 0 }, + { 0xad43, 0xad43, 0xad43, 0 }, + { 0xadff, 0xadff, 0xadff, 0 }, + { 0xaeba, 0xaeba, 0xaeba, 0 }, + { 0xaf74, 0xaf74, 0xaf74, 0 }, + { 0xb02d, 0xb02d, 0xb02d, 0 }, + { 0xb0e5, 0xb0e5, 0xb0e5, 0 }, + { 0xb19c, 0xb19c, 0xb19c, 0 }, + { 0xb252, 0xb252, 0xb252, 0 }, + { 0xb307, 0xb307, 0xb307, 0 }, + { 0xb3bb, 0xb3bb, 0xb3bb, 0 }, + { 0xb46f, 0xb46f, 0xb46f, 0 }, + { 0xb521, 0xb521, 0xb521, 0 }, + { 0xb5d3, 0xb5d3, 0xb5d3, 0 }, + { 0xb683, 0xb683, 0xb683, 0 }, + { 0xb733, 0xb733, 0xb733, 0 }, + { 0xb7e2, 0xb7e2, 0xb7e2, 0 }, + { 0xb890, 0xb890, 0xb890, 0 }, + { 0xb93d, 0xb93d, 0xb93d, 0 }, + { 0xb9ea, 0xb9ea, 0xb9ea, 0 }, + { 0xba96, 0xba96, 0xba96, 0 }, + { 0xbb40, 0xbb40, 0xbb40, 0 }, + { 0xbbea, 0xbbea, 0xbbea, 0 }, + { 0xbc94, 0xbc94, 0xbc94, 0 }, + { 0xbd3c, 0xbd3c, 0xbd3c, 0 }, + { 0xbde4, 0xbde4, 0xbde4, 0 }, + { 0xbe8b, 0xbe8b, 0xbe8b, 0 }, + { 0xbf31, 0xbf31, 0xbf31, 0 }, + { 0xbfd7, 0xbfd7, 0xbfd7, 0 }, + { 0xc07b, 0xc07b, 0xc07b, 0 }, + { 0xc120, 0xc120, 0xc120, 0 }, + { 0xc1c3, 0xc1c3, 0xc1c3, 0 }, + { 0xc266, 0xc266, 0xc266, 0 }, + { 0xc308, 0xc308, 0xc308, 0 }, + { 0xc3a9, 0xc3a9, 0xc3a9, 0 }, + { 0xc449, 0xc449, 0xc449, 0 }, + { 0xc4e9, 0xc4e9, 0xc4e9, 0 }, + { 0xc589, 0xc589, 0xc589, 0 }, + { 0xc627, 0xc627, 0xc627, 0 }, + { 0xc6c5, 0xc6c5, 0xc6c5, 0 }, + { 0xc763, 0xc763, 0xc763, 0 }, + { 0xc7ff, 0xc7ff, 0xc7ff, 0 }, + { 0xc89b, 0xc89b, 0xc89b, 0 }, + { 0xc937, 0xc937, 0xc937, 0 }, + { 0xc9d2, 0xc9d2, 0xc9d2, 0 }, + { 0xca6c, 0xca6c, 0xca6c, 0 }, + { 0xcb06, 0xcb06, 0xcb06, 0 }, + { 0xcb9f, 0xcb9f, 0xcb9f, 0 }, + { 0xcc37, 0xcc37, 0xcc37, 0 }, + { 0xcccf, 0xcccf, 0xcccf, 0 }, + { 0xcd66, 0xcd66, 0xcd66, 0 }, + { 0xcdfd, 0xcdfd, 0xcdfd, 0 }, + { 0xce93, 0xce93, 0xce93, 0 }, + { 0xcf29, 0xcf29, 0xcf29, 0 }, + { 0xcfbe, 0xcfbe, 0xcfbe, 0 }, + { 0xd053, 0xd053, 0xd053, 0 }, + { 0xd0e7, 0xd0e7, 0xd0e7, 0 }, + { 0xd17a, 0xd17a, 0xd17a, 0 }, + { 0xd20d, 0xd20d, 0xd20d, 0 }, + { 0xd2a0, 0xd2a0, 0xd2a0, 0 }, + { 0xd331, 0xd331, 0xd331, 0 }, + { 0xd3c3, 0xd3c3, 0xd3c3, 0 }, + { 0xd454, 0xd454, 0xd454, 0 }, + { 0xd4e4, 0xd4e4, 0xd4e4, 0 }, + { 0xd574, 0xd574, 0xd574, 0 }, + { 0xd603, 0xd603, 0xd603, 0 }, + { 0xd692, 0xd692, 0xd692, 0 }, + { 0xd720, 0xd720, 0xd720, 0 }, + { 0xd7ae, 0xd7ae, 0xd7ae, 0 }, + { 0xd83c, 0xd83c, 0xd83c, 0 }, + { 0xd8c9, 0xd8c9, 0xd8c9, 0 }, + { 0xd955, 0xd955, 0xd955, 0 }, + { 0xd9e1, 0xd9e1, 0xd9e1, 0 }, + { 0xda6d, 0xda6d, 0xda6d, 0 }, + { 0xdaf8, 0xdaf8, 0xdaf8, 0 }, + { 0xdb83, 0xdb83, 0xdb83, 0 }, + { 0xdc0d, 0xdc0d, 0xdc0d, 0 }, + { 0xdc97, 0xdc97, 0xdc97, 0 }, + { 0xdd20, 0xdd20, 0xdd20, 0 }, + { 0xdda9, 0xdda9, 0xdda9, 0 }, + { 0xde31, 0xde31, 0xde31, 0 }, + { 0xdeb9, 0xdeb9, 0xdeb9, 0 }, + { 0xdf41, 0xdf41, 0xdf41, 0 }, + { 0xdfc8, 0xdfc8, 0xdfc8, 0 }, + { 0xe04f, 0xe04f, 0xe04f, 0 }, + { 0xe0d5, 0xe0d5, 0xe0d5, 0 }, + { 0xe15b, 0xe15b, 0xe15b, 0 }, + { 0xe1e0, 0xe1e0, 0xe1e0, 0 }, + { 0xe266, 0xe266, 0xe266, 0 }, + { 0xe2ea, 0xe2ea, 0xe2ea, 0 }, + { 0xe36f, 0xe36f, 0xe36f, 0 }, + { 0xe3f3, 0xe3f3, 0xe3f3, 0 }, + { 0xe476, 0xe476, 0xe476, 0 }, + { 0xe4f9, 0xe4f9, 0xe4f9, 0 }, + { 0xe57c, 0xe57c, 0xe57c, 0 }, + { 0xe5fe, 0xe5fe, 0xe5fe, 0 }, + { 0xe680, 0xe680, 0xe680, 0 }, + { 0xe702, 0xe702, 0xe702, 0 }, + { 0xe783, 0xe783, 0xe783, 0 }, + { 0xe804, 0xe804, 0xe804, 0 }, + { 0xe884, 0xe884, 0xe884, 0 }, + { 0xe905, 0xe905, 0xe905, 0 }, + { 0xe984, 0xe984, 0xe984, 0 }, + { 0xea04, 0xea04, 0xea04, 0 }, + { 0xea83, 0xea83, 0xea83, 0 }, + { 0xeb02, 0xeb02, 0xeb02, 0 }, + { 0xeb80, 0xeb80, 0xeb80, 0 }, + { 0xebfe, 0xebfe, 0xebfe, 0 }, + { 0xec7b, 0xec7b, 0xec7b, 0 }, + { 0xecf9, 0xecf9, 0xecf9, 0 }, + { 0xed76, 0xed76, 0xed76, 0 }, + { 0xedf2, 0xedf2, 0xedf2, 0 }, + { 0xee6f, 0xee6f, 0xee6f, 0 }, + { 0xeeeb, 0xeeeb, 0xeeeb, 0 }, + { 0xef66, 0xef66, 0xef66, 0 }, + { 0xefe2, 0xefe2, 0xefe2, 0 }, + { 0xf05d, 0xf05d, 0xf05d, 0 }, + { 0xf0d7, 0xf0d7, 0xf0d7, 0 }, + { 0xf152, 0xf152, 0xf152, 0 }, + { 0xf1cc, 0xf1cc, 0xf1cc, 0 }, + { 0xf245, 0xf245, 0xf245, 0 }, + { 0xf2bf, 0xf2bf, 0xf2bf, 0 }, + { 0xf338, 0xf338, 0xf338, 0 }, + { 0xf3b0, 0xf3b0, 0xf3b0, 0 }, + { 0xf429, 0xf429, 0xf429, 0 }, + { 0xf4a1, 0xf4a1, 0xf4a1, 0 }, + { 0xf519, 0xf519, 0xf519, 0 }, + { 0xf590, 0xf590, 0xf590, 0 }, + { 0xf608, 0xf608, 0xf608, 0 }, + { 0xf67e, 0xf67e, 0xf67e, 0 }, + { 0xf6f5, 0xf6f5, 0xf6f5, 0 }, + { 0xf76b, 0xf76b, 0xf76b, 0 }, + { 0xf7e1, 0xf7e1, 0xf7e1, 0 }, + { 0xf857, 0xf857, 0xf857, 0 }, + { 0xf8cd, 0xf8cd, 0xf8cd, 0 }, + { 0xf942, 0xf942, 0xf942, 0 }, + { 0xf9b7, 0xf9b7, 0xf9b7, 0 }, + { 0xfa2b, 0xfa2b, 0xfa2b, 0 }, + { 0xfaa0, 0xfaa0, 0xfaa0, 0 }, + { 0xfb14, 0xfb14, 0xfb14, 0 }, + { 0xfb88, 0xfb88, 0xfb88, 0 }, + { 0xfbfb, 0xfbfb, 0xfbfb, 0 }, + { 0xfc6e, 0xfc6e, 0xfc6e, 0 }, + { 0xfce1, 0xfce1, 0xfce1, 0 }, + { 0xfd54, 0xfd54, 0xfd54, 0 }, + { 0xfdc6, 0xfdc6, 0xfdc6, 0 }, + { 0xfe39, 0xfe39, 0xfe39, 0 }, + { 0xfeaa, 0xfeaa, 0xfeaa, 0 }, + { 0xff1c, 0xff1c, 0xff1c, 0 }, + { 0xff8d, 0xff8d, 0xff8d, 0 }, + { 0xffff, 0xffff, 0xffff, 0 }, +}; + +const struct vkms_color_lut srgb_inv_eotf = { + .base = srgb_inv_array, + .lut_length = LUT_SIZE, + .channel_value2index_ratio = 0xff00ffll +}; +EXPORT_SYMBOL(srgb_inv_eotf); diff --git a/drivers/gpu/drm/vkms/vkms_luts.h b/drivers/gpu/drm/vkms/vkms_luts.h new file mode 100644 index 000000000000..925a4a7b84e2 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_luts.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_LUTS_H_ +#define _VKMS_LUTS_H_ + +#define LUT_SIZE 256 + +extern const struct vkms_color_lut linear_eotf; +extern const struct vkms_color_lut srgb_eotf; +extern const struct vkms_color_lut srgb_inv_eotf; + +#endif /* _VKMS_LUTS_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 81d17b7dc0eb..86ce07a617f5 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -20,11 +20,7 @@ int vkms_output_init(struct vkms_device *vkmsdev) return -EINVAL; vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { - enum drm_plane_type type; - - type = vkms_config_plane_get_type(plane_cfg); - - plane_cfg->plane = vkms_plane_init(vkmsdev, type); + plane_cfg->plane = vkms_plane_init(vkmsdev, plane_cfg); if (IS_ERR(plane_cfg->plane)) { DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n"); return PTR_ERR(plane_cfg->plane); diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 89a9fc91e059..19fe6acad306 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ +#include "vkms_config.h" #include <linux/iosys-map.h> #include <drm/drm_atomic.h> @@ -218,7 +219,7 @@ static const struct drm_plane_helper_funcs vkms_plane_helper_funcs = { }; struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, - enum drm_plane_type type) + struct vkms_config_plane *plane_cfg) { struct drm_device *dev = &vkmsdev->drm; struct vkms_plane *plane; @@ -226,7 +227,8 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 0, &vkms_plane_funcs, vkms_formats, ARRAY_SIZE(vkms_formats), - NULL, type, NULL); + NULL, vkms_config_plane_get_type(plane_cfg), + NULL); if (IS_ERR(plane)) return plane; @@ -244,5 +246,8 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE); + if (vkms_config_plane_get_default_pipeline(plane_cfg)) + vkms_initialize_colorops(&plane->base); + return plane; } diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index e365df6af353..3f475f0e6545 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -71,6 +71,15 @@ u32 host1x_sync_readl(struct host1x *host1x, u32 r) return readl(sync_regs + r); } +#ifdef CONFIG_64BIT +u64 host1x_sync_readq(struct host1x *host1x, u32 r) +{ + void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; + + return readq(sync_regs + r); +} +#endif + void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r) { writel(v, ch->regs + r); diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index d3855a1c6b47..ef44618ed88a 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -179,6 +179,9 @@ void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r); u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r); void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r); u32 host1x_sync_readl(struct host1x *host1x, u32 r); +#ifdef CONFIG_64BIT +u64 host1x_sync_readq(struct host1x *host1x, u32 r); +#endif void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r); u32 host1x_ch_readl(struct host1x_channel *ch, u32 r); diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c index 415f8d7e4202..bd5b5ef62f35 100644 --- a/drivers/gpu/host1x/hw/intr_hw.c +++ b/drivers/gpu/host1x/hw/intr_hw.c @@ -11,26 +11,64 @@ #include "../intr.h" #include "../dev.h" +static void process_32_syncpts(struct host1x *host, unsigned long val, u32 reg_offset) +{ + unsigned int id; + + if (!val) + return; + + host1x_sync_writel(host, val, HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(reg_offset)); + host1x_sync_writel(host, val, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(reg_offset)); + + for_each_set_bit(id, &val, 32) + host1x_intr_handle_interrupt(host, reg_offset * 32 + id); +} + static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) { struct host1x_intr_irq_data *irq_data = dev_id; struct host1x *host = irq_data->host; unsigned long reg; - unsigned int i, id; + unsigned int i; +#if !defined(CONFIG_64BIT) for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 32); i += host->num_syncpt_irqs) { reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); - host1x_sync_writel(host, reg, - HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i)); - host1x_sync_writel(host, reg, + process_32_syncpts(host, reg, i); + } +#elif HOST1X_HW == 6 || HOST1X_HW == 7 + /* + * Tegra186 and Tegra194 have the first INT_STATUS register not 64-bit aligned, + * and only have one interrupt line. + */ + reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(0)); + process_32_syncpts(host, reg, 0); + + for (i = 1; i < (host->info->nb_pts / 32) - 1; i += 2) { + reg = host1x_sync_readq(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); - for_each_set_bit(id, ®, 32) - host1x_intr_handle_interrupt(host, i * 32 + id); + process_32_syncpts(host, lower_32_bits(reg), i); + process_32_syncpts(host, upper_32_bits(reg), i + 1); + } + + reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); + process_32_syncpts(host, reg, i); +#else + /* All 64-bit capable SoCs have number of syncpoints divisible by 64 */ + for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 64); + i += host->num_syncpt_irqs) { + reg = host1x_sync_readq(host, + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i * 2)); + + process_32_syncpts(host, lower_32_bits(reg), i * 2 + 0); + process_32_syncpts(host, upper_32_bits(reg), i * 2 + 1); } +#endif return IRQ_HANDLED; } @@ -68,12 +106,12 @@ host1x_intr_init_host_sync(struct host1x *host, u32 cpm) /* * Program threshold interrupt destination among 8 lines per VM, - * per syncpoint. For each group of 32 syncpoints (corresponding to one - * interrupt status register), direct to one interrupt line, going + * per syncpoint. For each group of 64 syncpoints (corresponding to two + * interrupt status registers), direct to one interrupt line, going * around in a round robin fashion. */ for (id = 0; id < host->info->nb_pts; id++) { - u32 reg_offset = id / 32; + u32 reg_offset = id / 64; u32 irq_index = reg_offset % host->num_syncpt_irqs; host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id)); |