diff options
| author | Simona Vetter <simona.vetter@ffwll.ch> | 2025-10-24 13:25:19 +0200 |
|---|---|---|
| committer | Simona Vetter <simona.vetter@ffwll.ch> | 2025-10-24 13:25:20 +0200 |
| commit | 098456f3141bf9e0c0d8973695ca38a03465ccd6 (patch) | |
| tree | 07d1d0013bd58f689f0687b0c64b0cfcc0fad5c1 /drivers/gpu/drm | |
| parent | 6200442de089468ff283becb81382d6ac23c25e9 (diff) | |
| parent | 7ea0468380216c10b73633b976d33efa8c12d375 (diff) | |
Merge tag 'drm-misc-next-2025-10-21' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for v6.19:
UAPI Changes:
amdxdna:
- Support reading last hardware error
Cross-subsystem Changes:
dma-buf:
- heaps: Create heap per CMA reserved location; Improve user-space documentation
Core Changes:
atomic:
- Clean up and improve state-handling interfaces, update drivers
bridge:
- Improve ref counting
buddy:
- Optimize block management
Driver Changes:
amdxdna:
- Fix runtime power management
- Support firmware debug output
ast:
- Set quirks for each chip model
atmel-hlcdc:
- Set LCDC_ATTRE register in plane disable
- Set correct values for plane scaler
bochs:
- Use vblank timer
bridge:
- synopsis: Support CEC; Init timer with correct frequency
cirrus-qemu:
- Use vblank timer
imx:
- Clean up
ivu:
- Update JSM API to 3.33.0
- Reset engine on more job errors
- Return correct error codes for jobs
komeda:
- Use drm_ logging functions
panel:
- edp: Support AUO B116XAN02.0
panfrost:
- Embed struct drm_driver in Panfrost device
- Improve error handling
- Clean up job handling
panthor:
- Support custom ASN_HASH for mt8196
renesas:
- rz-du: Fix dependencies
rockchip:
- dsi: Add support for RK3368
- Fix LUT size for RK3386
sitronix:
- Fix output position when clearing screens
qaic:
- Support dma-buf exports
- Support new firmware's READ_DATA implementation
- Replace kcalloc with memdup
- Replace snprintf() with sysfs_emit()
- Avoid overflows in arithmetics
- Clean up
- Fixes
qxl:
- Use vblank timer
rockchip:
- Clean up mode-setting code
vgem:
- Fix fence timer deadlock
virtgpu:
- Use vblank timer
Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://lore.kernel.org/r/20251021111837.GA40643@linux.fritz.box
Diffstat (limited to 'drivers/gpu/drm')
100 files changed, 1552 insertions, 781 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3d032c4e2dce..aa3736de238d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5219,7 +5219,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) dev_warn(adev->dev, "smart shift update failed\n"); if (notify_clients) - drm_client_dev_suspend(adev_to_drm(adev), false); + drm_client_dev_suspend(adev_to_drm(adev)); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -5353,7 +5353,7 @@ exit: flush_delayed_work(&adev->delayed_init_work); if (notify_clients) - drm_client_dev_resume(adev_to_drm(adev), false); + drm_client_dev_resume(adev_to_drm(adev)); amdgpu_ras_resume(adev); @@ -5958,7 +5958,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) if (r) goto out; - drm_client_dev_resume(adev_to_drm(tmp_adev), false); + drm_client_dev_resume(adev_to_drm(tmp_adev)); /* * The GPU enters bad state once faulty pages @@ -6293,7 +6293,7 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_client_dev_suspend(adev_to_drm(tmp_adev), false); + drm_client_dev_suspend(adev_to_drm(tmp_adev)); /* disable ras on ALL IPs */ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) && diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6597475e245d..32f7850abc61 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -12446,7 +12446,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, int j = state->num_private_objs-1; dm_atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); /* If i is not at the end of the array then the * last element needs to be moved to where i was @@ -12457,7 +12457,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, state->private_objs[j]; state->private_objs[j].ptr = NULL; - state->private_objs[j].state = NULL; + state->private_objs[j].state_to_destroy = NULL; state->private_objs[j].old_state = NULL; state->private_objs[j].new_state = NULL; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 2ad33559a33a..5a66948ffd24 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -111,6 +111,7 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc, static int komeda_crtc_prepare(struct komeda_crtc *kcrtc) { + struct drm_device *drm = kcrtc->base.dev; struct komeda_dev *mdev = kcrtc->base.dev->dev_private; struct komeda_pipeline *master = kcrtc->master; struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state); @@ -128,8 +129,8 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) err = mdev->funcs->change_opmode(mdev, new_mode); if (err) { - DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", - mdev->dpmode, new_mode); + drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); goto unlock; } @@ -142,18 +143,18 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) if (new_mode != KOMEDA_MODE_DUAL_DISP) { err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st)); if (err) - DRM_ERROR("failed to set aclk.\n"); + drm_err(drm, "failed to set aclk.\n"); err = clk_prepare_enable(mdev->aclk); if (err) - DRM_ERROR("failed to enable aclk.\n"); + drm_err(drm, "failed to enable aclk.\n"); } err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000); if (err) - DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id); + drm_err(drm, "failed to set pxlclk for pipe%d\n", master->id); err = clk_prepare_enable(master->pxlclk); if (err) - DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id); + drm_err(drm, "failed to enable pxl clk for pipe%d.\n", master->id); unlock: mutex_unlock(&mdev->lock); @@ -164,6 +165,7 @@ unlock: static int komeda_crtc_unprepare(struct komeda_crtc *kcrtc) { + struct drm_device *drm = kcrtc->base.dev; struct komeda_dev *mdev = kcrtc->base.dev->dev_private; struct komeda_pipeline *master = kcrtc->master; u32 new_mode; @@ -180,8 +182,8 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc) err = mdev->funcs->change_opmode(mdev, new_mode); if (err) { - DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", - mdev->dpmode, new_mode); + drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); goto unlock; } @@ -200,6 +202,7 @@ unlock: void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, struct komeda_events *evts) { + struct drm_device *drm = kcrtc->base.dev; struct drm_crtc *crtc = &kcrtc->base; u32 events = evts->pipes[kcrtc->master->id]; @@ -212,7 +215,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, if (wb_conn) drm_writeback_signal_completion(&wb_conn->base, 0); else - DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n", + drm_warn(drm, "CRTC[%d]: EOW happen but no wb_connector.\n", drm_crtc_index(&kcrtc->base)); } /* will handle it together with the write back support */ @@ -236,7 +239,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, crtc->state->event = NULL; drm_crtc_send_vblank_event(crtc, event); } else { - DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n", + drm_warn(drm, "CRTC[%d]: FLIP happened but no pending commit.\n", drm_crtc_index(&kcrtc->base)); } spin_unlock_irqrestore(&crtc->dev->event_lock, flags); @@ -309,7 +312,7 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, /* wait the flip take affect.*/ if (wait_for_completion_timeout(flip_done, HZ) == 0) { - DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id); + drm_err(drm, "wait pipe%d flip done timeout\n", kcrtc->master->id); if (!input_flip_done) { unsigned long flags; @@ -562,6 +565,7 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = { int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev) { + struct drm_device *drm = &kms->base; struct komeda_crtc *crtc; struct komeda_pipeline *master; char str[16]; @@ -581,7 +585,7 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, else sprintf(str, "None"); - DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n", + drm_info(drm, "CRTC-%d: master(pipe-%d) slave(%s).\n", kms->n_crtcs, master->id, str); kms->n_crtcs++; @@ -613,6 +617,7 @@ static int komeda_attach_bridge(struct device *dev, struct komeda_pipeline *pipe, struct drm_encoder *encoder) { + struct drm_device *drm = encoder->dev; struct drm_bridge *bridge; int err; @@ -624,7 +629,7 @@ static int komeda_attach_bridge(struct device *dev, err = drm_bridge_attach(encoder, bridge, NULL, 0); if (err) - dev_err(dev, "bridge_attach() failed for pipe: %s\n", + drm_err(drm, "bridge_attach() failed for pipe: %s\n", of_node_full_name(pipe->of_node)); return err; diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 87f2e5ee8790..f1a5014bcfa1 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -263,7 +263,7 @@ static int malidp_se_check_scaling(struct malidp_plane *mp, struct drm_plane_state *state) { struct drm_crtc_state *crtc_state = - drm_atomic_get_existing_crtc_state(state->state, state->crtc); + drm_atomic_get_new_crtc_state(state->state, state->crtc); struct malidp_crtc_state *mc; u32 src_w, src_h; int ret; diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c index cc47c032dbc1..dae81ebafdb4 100644 --- a/drivers/gpu/drm/armada/armada_plane.c +++ b/drivers/gpu/drm/armada/armada_plane.c @@ -94,12 +94,7 @@ int armada_drm_plane_atomic_check(struct drm_plane *plane, return 0; } - if (state) - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); - else - crtc_state = crtc->state; - + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 0, INT_MAX, true, false); diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c index 03b0dcea43d1..fa3bc23ce098 100644 --- a/drivers/gpu/drm/ast/ast_2000.c +++ b/drivers/gpu/drm/ast/ast_2000.c @@ -211,6 +211,11 @@ void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post) __ast_device_set_tx_chip(ast, tx_chip); } +static const struct ast_device_quirks ast_2000_device_quirks = { + .crtc_mem_req_threshold_low = 31, + .crtc_mem_req_threshold_high = 47, +}; + struct drm_device *ast_2000_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum ast_chip chip, @@ -228,7 +233,9 @@ struct drm_device *ast_2000_device_create(struct pci_dev *pdev, return ERR_CAST(ast); dev = &ast->base; - ast_device_init(ast, chip, config_mode, regs, ioregs); + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2000_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; ast_2000_detect_tx_chip(ast, need_post); diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c index 540972daec52..05aeb0624d41 100644 --- a/drivers/gpu/drm/ast/ast_2100.c +++ b/drivers/gpu/drm/ast/ast_2100.c @@ -432,6 +432,11 @@ static void ast_2100_detect_widescreen(struct ast_device *ast) ast->support_wuxga = true; } +static const struct ast_device_quirks ast_2100_device_quirks = { + .crtc_mem_req_threshold_low = 47, + .crtc_mem_req_threshold_high = 63, +}; + struct drm_device *ast_2100_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum ast_chip chip, @@ -449,7 +454,9 @@ struct drm_device *ast_2100_device_create(struct pci_dev *pdev, return ERR_CAST(ast); dev = &ast->base; - ast_device_init(ast, chip, config_mode, regs, ioregs); + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2100_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; ast_2000_detect_tx_chip(ast, need_post); diff --git a/drivers/gpu/drm/ast/ast_2200.c b/drivers/gpu/drm/ast/ast_2200.c index 4795966dc2a7..b64345d11ffa 100644 --- a/drivers/gpu/drm/ast/ast_2200.c +++ b/drivers/gpu/drm/ast/ast_2200.c @@ -43,6 +43,11 @@ static void ast_2200_detect_widescreen(struct ast_device *ast) ast->support_wuxga = true; } +static const struct ast_device_quirks ast_2200_device_quirks = { + .crtc_mem_req_threshold_low = 47, + .crtc_mem_req_threshold_high = 63, +}; + struct drm_device *ast_2200_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum ast_chip chip, @@ -60,7 +65,9 @@ struct drm_device *ast_2200_device_create(struct pci_dev *pdev, return ERR_CAST(ast); dev = &ast->base; - ast_device_init(ast, chip, config_mode, regs, ioregs); + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2200_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; ast_2000_detect_tx_chip(ast, need_post); diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c index d1d63e58f3d6..5f50d9f91ffd 100644 --- a/drivers/gpu/drm/ast/ast_2300.c +++ b/drivers/gpu/drm/ast/ast_2300.c @@ -1407,6 +1407,11 @@ static void ast_2300_detect_widescreen(struct ast_device *ast) ast->support_wuxga = true; } +static const struct ast_device_quirks ast_2300_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, +}; + struct drm_device *ast_2300_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum ast_chip chip, @@ -1424,7 +1429,9 @@ struct drm_device *ast_2300_device_create(struct pci_dev *pdev, return ERR_CAST(ast); dev = &ast->base; - ast_device_init(ast, chip, config_mode, regs, ioregs); + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2300_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; ast_2300_detect_tx_chip(ast); diff --git a/drivers/gpu/drm/ast/ast_2400.c b/drivers/gpu/drm/ast/ast_2400.c index 596338ea22f4..2e6befd24f91 100644 --- a/drivers/gpu/drm/ast/ast_2400.c +++ b/drivers/gpu/drm/ast/ast_2400.c @@ -44,6 +44,11 @@ static void ast_2400_detect_widescreen(struct ast_device *ast) ast->support_wuxga = true; } +static const struct ast_device_quirks ast_2400_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, +}; + struct drm_device *ast_2400_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum ast_chip chip, @@ -61,7 +66,9 @@ struct drm_device *ast_2400_device_create(struct pci_dev *pdev, return ERR_CAST(ast); dev = &ast->base; - ast_device_init(ast, chip, config_mode, regs, ioregs); + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2400_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; ast_2300_detect_tx_chip(ast); diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c index 2c56db644f06..2a52af0ded56 100644 --- a/drivers/gpu/drm/ast/ast_2500.c +++ b/drivers/gpu/drm/ast/ast_2500.c @@ -618,6 +618,12 @@ static void ast_2500_detect_widescreen(struct ast_device *ast) ast->support_wuxga = true; } +static const struct ast_device_quirks ast_2500_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, + .crtc_hsync_precatch_needed = true, +}; + struct drm_device *ast_2500_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum ast_chip chip, @@ -635,7 +641,9 @@ struct drm_device *ast_2500_device_create(struct pci_dev *pdev, return ERR_CAST(ast); dev = &ast->base; - ast_device_init(ast, chip, config_mode, regs, ioregs); + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2500_device_quirks); + + ast->dclk_table = ast_2500_dclk_table; ast_2300_detect_tx_chip(ast); diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c index 30490c473797..dee78fd5b022 100644 --- a/drivers/gpu/drm/ast/ast_2600.c +++ b/drivers/gpu/drm/ast/ast_2600.c @@ -59,6 +59,13 @@ static void ast_2600_detect_widescreen(struct ast_device *ast) ast->support_wuxga = true; } +static const struct ast_device_quirks ast_2600_device_quirks = { + .crtc_mem_req_threshold_low = 160, + .crtc_mem_req_threshold_high = 224, + .crtc_hsync_precatch_needed = true, + .crtc_hsync_add4_needed = true, +}; + struct drm_device *ast_2600_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum ast_chip chip, @@ -76,7 +83,9 @@ struct drm_device *ast_2600_device_create(struct pci_dev *pdev, return ERR_CAST(ast); dev = &ast->base; - ast_device_init(ast, chip, config_mode, regs, ioregs); + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2600_device_quirks); + + ast->dclk_table = ast_2500_dclk_table; ast_2300_detect_tx_chip(ast); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index a89735c6a462..b9a9b050b546 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -51,8 +51,10 @@ void ast_device_init(struct ast_device *ast, enum ast_chip chip, enum ast_config_mode config_mode, void __iomem *regs, - void __iomem *ioregs) + void __iomem *ioregs, + const struct ast_device_quirks *quirks) { + ast->quirks = quirks; ast->chip = chip; ast->config_mode = config_mode; ast->regs = regs; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 35c476c85b9a..7be36a358e74 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -164,9 +164,31 @@ to_ast_connector(struct drm_connector *connector) * Device */ +struct ast_device_quirks { + /* + * CRTC memory request threshold + */ + unsigned char crtc_mem_req_threshold_low; + unsigned char crtc_mem_req_threshold_high; + + /* + * Adjust hsync values to load next scanline early. Signalled + * by AST2500PreCatchCRT in VBIOS mode flags. + */ + bool crtc_hsync_precatch_needed; + + /* + * Workaround for modes with HSync Time that is not a multiple + * of 8 (e.g., 1920x1080@60Hz, HSync +44 pixels). + */ + bool crtc_hsync_add4_needed; +}; + struct ast_device { struct drm_device base; + const struct ast_device_quirks *quirks; + void __iomem *regs; void __iomem *ioregs; void __iomem *dp501_fw_buf; @@ -174,6 +196,8 @@ struct ast_device { enum ast_config_mode config_mode; enum ast_chip chip; + const struct ast_vbios_dclk_info *dclk_table; + void __iomem *vram; unsigned long vram_base; unsigned long vram_size; @@ -412,7 +436,8 @@ void ast_device_init(struct ast_device *ast, enum ast_chip chip, enum ast_config_mode config_mode, void __iomem *regs, - void __iomem *ioregs); + void __iomem *ioregs, + const struct ast_device_quirks *quirks); void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip); /* ast_2000.c */ diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 9b6a7c54fbb5..9ce874dba69c 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -241,16 +241,15 @@ static void ast_set_std_reg(struct ast_device *ast, ast_set_index_reg(ast, AST_IO_VGAGRI, i, stdtable->gr[i]); } -static void ast_set_crtc_reg(struct ast_device *ast, - struct drm_display_mode *mode, +static void ast_set_crtc_reg(struct ast_device *ast, struct drm_display_mode *mode, const struct ast_vbios_enhtable *vmode) { u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; - u16 temp, precache = 0; + u16 temp; + unsigned char crtc_hsync_precatch = 0; - if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) && - (vmode->flags & AST2500PreCatchCRT)) - precache = 40; + if (ast->quirks->crtc_hsync_precatch_needed && (vmode->flags & AST2500PreCatchCRT)) + crtc_hsync_precatch = 40; ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00); @@ -276,12 +275,12 @@ static void ast_set_crtc_reg(struct ast_device *ast, jregAD |= 0x01; /* HBE D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x03, 0xE0, (temp & 0x1f)); - temp = ((mode->crtc_hsync_start-precache) >> 3) - 1; + temp = ((mode->crtc_hsync_start - crtc_hsync_precatch) >> 3) - 1; if (temp & 0x100) jregAC |= 0x40; /* HRS D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x04, 0x00, temp); - temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f; + temp = (((mode->crtc_hsync_end - crtc_hsync_precatch) >> 3) - 1) & 0x3f; if (temp & 0x20) jregAD |= 0x04; /* HRE D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05)); @@ -289,8 +288,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAC, 0x00, jregAC); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAD, 0x00, jregAD); - // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels); - if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080)) + if (ast->quirks->crtc_hsync_add4_needed && mode->crtc_vdisplay == 1080) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x02); else ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x00); @@ -348,7 +346,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x09, 0xdf, jreg09); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAE, 0x00, (jregAE | 0x80)); - if (precache) + if (crtc_hsync_precatch) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x80); else ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x00); @@ -370,12 +368,7 @@ static void ast_set_dclk_reg(struct ast_device *ast, struct drm_display_mode *mode, const struct ast_vbios_enhtable *vmode) { - const struct ast_vbios_dclk_info *clk_info; - - if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) - clk_info = &ast_2500_dclk_table[vmode->dclk_index]; - else - clk_info = &ast_2000_dclk_table[vmode->dclk_index]; + const struct ast_vbios_dclk_info *clk_info = &ast->dclk_table[vmode->dclk_index]; ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2); @@ -415,20 +408,11 @@ static void ast_set_color_reg(struct ast_device *ast, static void ast_set_crtthd_reg(struct ast_device *ast) { - /* Set Threshold */ - if (IS_AST_GEN7(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0xe0); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0xa0); - } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x78); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x60); - } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x3f); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x2f); - } else { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x2f); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x1f); - } + u8 vgacra6 = ast->quirks->crtc_mem_req_threshold_low; + u8 vgacra7 = ast->quirks->crtc_mem_req_threshold_high; + + ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, vgacra7); + ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, vgacra6); } static void ast_set_sync_reg(struct ast_device *ast, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 0f7ffb3ced20..e0efc7309b1b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -20,6 +20,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -215,32 +216,32 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_XLCDC_CM), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_SD); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_XLCDC_SD, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n"); } regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_DISP), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_SYNC), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_PIXEL_CLK), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n"); clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); pinctrl_pm_select_sleep_state(dev->dev); @@ -269,32 +270,32 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c, if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_PIXEL_CLK, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_SYNC, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_DISP, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n"); if (crtc->dc->desc->is_xlcdc) { regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_CM); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_XLCDC_CM, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_SD); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_XLCDC_SD), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n"); } pm_runtime_put_sync(dev->dev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index fa8ad94e431a..acb017a2486b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -724,19 +724,19 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) ret = atmel_hlcdc_create_outputs(dev); if (ret) { - dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret); + drm_err(dev, "failed to create HLCDC outputs: %d\n", ret); return ret; } ret = atmel_hlcdc_create_planes(dev); if (ret) { - dev_err(dev->dev, "failed to create planes: %d\n", ret); + drm_err(dev, "failed to create planes: %d\n", ret); return ret; } ret = atmel_hlcdc_crtc_create(dev); if (ret) { - dev_err(dev->dev, "failed to create crtc\n"); + drm_err(dev, "failed to create crtc\n"); return ret; } @@ -778,7 +778,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { - dev_err(dev->dev, "failed to enable periph_clk\n"); + drm_err(dev, "failed to enable periph_clk\n"); return ret; } @@ -786,13 +786,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = drm_vblank_init(dev, 1); if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); + drm_err(dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); + drm_err(dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } @@ -802,7 +802,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { - dev_err(dev->dev, "failed to install IRQ handler\n"); + drm_err(dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index e1a0bb24b511..53d47f01db0b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -378,7 +378,8 @@ struct atmel_lcdc_dc_ops { void (*lcdc_update_buffers)(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state, u32 sr, int i); - void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane); + void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc); void (*lcdc_update_general_settings)(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state); void (*lcdc_atomic_update)(struct atmel_hlcdc_plane *plane, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 50fee6a93964..0b8a86afb096 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -15,6 +15,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_encoder.h> #include <drm/drm_of.h> +#include <drm/drm_print.h> #include <drm/drm_simple_kms_helper.h> #include "atmel_hlcdc_dc.h" @@ -92,7 +93,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep); of_node_put(ep); if (output->bus_fmt < 0) { - dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint); + drm_err(dev, "endpoint %d: invalid bus width\n", endpoint); return -EINVAL; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 4a7ba0918eca..38f60befd7d7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -365,13 +365,34 @@ void atmel_xlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane, xfactor); /* - * With YCbCr 4:2:2 and YCbYcr 4:2:0 window resampling, configuration - * register LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT is half + * With YCbCr 4:2:0 window resampling, configuration register + * LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT values are half * the value of yfactor and xfactor. + * + * On the other hand, with YCbCr 4:2:2 window resampling, only the + * configuration register LCDC_HEOCFG27.HXSCFACT value is half the value + * of the xfactor; the value of LCDC_HEOCFG25.VXSCFACT is yfactor (no + * division by 2). */ - if (state->base.fb->format->format == DRM_FORMAT_YUV420) { + switch (state->base.fb->format->format) { + /* YCbCr 4:2:2 */ + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_NV61: + xfactor /= 2; + break; + + /* YCbCr 4:2:0 */ + case DRM_FORMAT_YUV420: + case DRM_FORMAT_NV21: yfactor /= 2; xfactor /= 2; + break; + default: + break; } atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 2, @@ -714,7 +735,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, if (!hstate->base.crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, s->crtc); mode = &crtc_state->adjusted_mode; ret = drm_atomic_helper_check_plane_state(s, crtc_state, @@ -816,7 +837,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, return 0; } -static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) +static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc) { /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR, @@ -832,7 +854,8 @@ static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR); } -static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) +static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc) { /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IDR, @@ -842,6 +865,15 @@ static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_ENR, 0); + /* + * Updating XLCDC_xxxCFGx, XLCDC_xxxFBA and XLCDC_xxxEN, + * (where xxx indicates each layer) requires writing one to the + * Update Attribute field for each layer in LCDC_ATTRE register for SAM9X7. + */ + regmap_write(dc->hlcdc->regmap, ATMEL_XLCDC_ATTRE, ATMEL_XLCDC_BASE_UPDATE | + ATMEL_XLCDC_OVR1_UPDATE | ATMEL_XLCDC_OVR3_UPDATE | + ATMEL_XLCDC_HEO_UPDATE); + /* Clear all pending interrupts */ atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR); } @@ -852,7 +884,7 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p, struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private; - dc->desc->ops->lcdc_atomic_disable(plane); + dc->desc->ops->lcdc_atomic_disable(plane, dc); } static void atmel_hlcdc_atomic_update(struct atmel_hlcdc_plane *plane, @@ -1034,7 +1066,7 @@ static void atmel_hlcdc_irq_dbg(struct atmel_hlcdc_plane *plane, if (isr & (ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) | ATMEL_HLCDC_LAYER_OVR_IRQ(2))) - dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", + drm_dbg(plane->base.dev, "overrun on plane %s\n", desc->name); } @@ -1051,7 +1083,7 @@ static void atmel_xlcdc_irq_dbg(struct atmel_hlcdc_plane *plane, if (isr & (ATMEL_XLCDC_LAYER_OVR_IRQ(0) | ATMEL_XLCDC_LAYER_OVR_IRQ(1) | ATMEL_XLCDC_LAYER_OVR_IRQ(2))) - dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", + drm_dbg(plane->base.dev, "overrun on plane %s\n", desc->name); } @@ -1140,7 +1172,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p) if (state) { if (atmel_hlcdc_plane_alloc_dscrs(p, state)) { kfree(state); - dev_err(p->dev->dev, + drm_err(p->dev, "Failed to allocate initial plane state\n"); return; } diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 2c5e532410de..a46df7583bcf 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -61,6 +61,14 @@ config DRM_DW_HDMI_QP select DRM_KMS_HELPER select REGMAP_MMIO +config DRM_DW_HDMI_QP_CEC + bool "Synopsis Designware QP CEC interface" + depends on DRM_DW_HDMI_QP + select DRM_DISPLAY_HDMI_CEC_HELPER + help + Support the CEC interface which is part of the Synopsys + Designware HDMI QP block. + config DRM_DW_MIPI_DSI tristate select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 39332c57f2c5..4ba7b339eff6 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -18,6 +18,7 @@ #include <drm/bridge/dw_hdmi_qp.h> #include <drm/display/drm_hdmi_helper.h> +#include <drm/display/drm_hdmi_cec_helper.h> #include <drm/display/drm_hdmi_state_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -26,6 +27,8 @@ #include <drm/drm_edid.h> #include <drm/drm_modes.h> +#include <media/cec.h> + #include <sound/hdmi-codec.h> #include "dw-hdmi-qp.h" @@ -131,17 +134,34 @@ struct dw_hdmi_qp_i2c { bool is_segment; }; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC +struct dw_hdmi_qp_cec { + struct drm_connector *connector; + int irq; + u32 addresses; + struct cec_msg rx_msg; + u8 tx_status; + bool tx_done; + bool rx_done; +}; +#endif + struct dw_hdmi_qp { struct drm_bridge bridge; struct device *dev; struct dw_hdmi_qp_i2c *i2c; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC + struct dw_hdmi_qp_cec *cec; +#endif + struct { const struct dw_hdmi_qp_phy_ops *ops; void *data; } phy; + unsigned long ref_clk_rate; struct regmap *regm; unsigned long tmds_char_rate; @@ -965,6 +985,179 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge, } } +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC +static irqreturn_t dw_hdmi_qp_cec_hardirq(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_cec *cec = hdmi->cec; + irqreturn_t ret = IRQ_HANDLED; + u32 stat; + + stat = dw_hdmi_qp_read(hdmi, CEC_INT_STATUS); + if (stat == 0) + return IRQ_NONE; + + dw_hdmi_qp_write(hdmi, stat, CEC_INT_CLEAR); + + if (stat & CEC_STAT_LINE_ERR) { + cec->tx_status = CEC_TX_STATUS_ERROR; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } else if (stat & CEC_STAT_DONE) { + cec->tx_status = CEC_TX_STATUS_OK; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } else if (stat & CEC_STAT_NACK) { + cec->tx_status = CEC_TX_STATUS_NACK; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } + + if (stat & CEC_STAT_EOM) { + unsigned int len, i, val; + + val = dw_hdmi_qp_read(hdmi, CEC_RX_COUNT_STATUS); + len = (val & 0xf) + 1; + + if (len > sizeof(cec->rx_msg.msg)) + len = sizeof(cec->rx_msg.msg); + + for (i = 0; i < 4; i++) { + val = dw_hdmi_qp_read(hdmi, CEC_RX_DATA3_0 + i * 4); + cec->rx_msg.msg[i * 4] = val & 0xff; + cec->rx_msg.msg[i * 4 + 1] = (val >> 8) & 0xff; + cec->rx_msg.msg[i * 4 + 2] = (val >> 16) & 0xff; + cec->rx_msg.msg[i * 4 + 3] = (val >> 24) & 0xff; + } + + dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL); + + cec->rx_msg.len = len; + cec->rx_done = true; + + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +static irqreturn_t dw_hdmi_qp_cec_thread(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + if (cec->tx_done) { + cec->tx_done = false; + drm_connector_hdmi_cec_transmit_attempt_done(cec->connector, + cec->tx_status); + } + + if (cec->rx_done) { + cec->rx_done = false; + drm_connector_hdmi_cec_received_msg(cec->connector, &cec->rx_msg); + } + + return IRQ_HANDLED; +} + +static int dw_hdmi_qp_cec_init(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + cec->connector = connector; + + dw_hdmi_qp_write(hdmi, 0, CEC_TX_COUNT); + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N); + + return devm_request_threaded_irq(hdmi->dev, cec->irq, + dw_hdmi_qp_cec_hardirq, + dw_hdmi_qp_cec_thread, IRQF_SHARED, + dev_name(hdmi->dev), hdmi); +} + +static int dw_hdmi_qp_cec_log_addr(struct drm_bridge *bridge, u8 logical_addr) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + if (logical_addr == CEC_LOG_ADDR_INVALID) + cec->addresses = 0; + else + cec->addresses |= BIT(logical_addr) | CEC_ADDR_BROADCAST; + + dw_hdmi_qp_write(hdmi, cec->addresses, CEC_ADDR); + + return 0; +} + +static int dw_hdmi_qp_cec_enable(struct drm_bridge *bridge, bool enable) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + unsigned int irqs; + u32 swdisable; + + if (!enable) { + dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N); + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + + swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE); + swdisable = swdisable | CEC_SWDISABLE; + dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE); + } else { + swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE); + swdisable = swdisable & ~CEC_SWDISABLE; + dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE); + + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL); + + dw_hdmi_qp_cec_log_addr(bridge, CEC_LOG_ADDR_INVALID); + + irqs = CEC_STAT_LINE_ERR | CEC_STAT_NACK | CEC_STAT_EOM | + CEC_STAT_DONE; + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, irqs, CEC_INT_MASK_N); + } + + return 0; +} + +static int dw_hdmi_qp_cec_transmit(struct drm_bridge *bridge, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + unsigned int i; + u32 val; + + for (i = 0; i < msg->len; i++) { + if (!(i % 4)) + val = msg->msg[i]; + if ((i % 4) == 1) + val |= msg->msg[i] << 8; + if ((i % 4) == 2) + val |= msg->msg[i] << 16; + if ((i % 4) == 3) + val |= msg->msg[i] << 24; + + if (i == (msg->len - 1) || (i % 4) == 3) + dw_hdmi_qp_write(hdmi, val, CEC_TX_DATA3_0 + (i / 4) * 4); + } + + dw_hdmi_qp_write(hdmi, msg->len - 1, CEC_TX_COUNT); + dw_hdmi_qp_write(hdmi, CEC_CTRL_START, CEC_TX_CONTROL); + + return 0; +} +#else +#define dw_hdmi_qp_cec_init NULL +#define dw_hdmi_qp_cec_enable NULL +#define dw_hdmi_qp_cec_log_addr NULL +#define dw_hdmi_qp_cec_transmit NULL +#endif /* CONFIG_DRM_DW_HDMI_QP_CEC */ + static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -979,6 +1172,10 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .hdmi_audio_startup = dw_hdmi_qp_audio_enable, .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable, .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare, + .hdmi_cec_init = dw_hdmi_qp_cec_init, + .hdmi_cec_enable = dw_hdmi_qp_cec_enable, + .hdmi_cec_log_addr = dw_hdmi_qp_cec_log_addr, + .hdmi_cec_transmit = dw_hdmi_qp_cec_transmit, }; static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) @@ -1014,13 +1211,11 @@ static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi) { dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N); dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N); - dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0); + dw_hdmi_qp_write(hdmi, hdmi->ref_clk_rate, TIMER_BASE_CONFIG0); /* Software reset */ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); - dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0); - dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0); /* Clear DONE and ERROR interrupts */ @@ -1066,6 +1261,13 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->phy.ops = plat_data->phy_ops; hdmi->phy.data = plat_data->phy_data; + if (plat_data->ref_clk_rate) { + hdmi->ref_clk_rate = plat_data->ref_clk_rate; + } else { + hdmi->ref_clk_rate = 428571429; + dev_warn(dev, "Set ref_clk_rate to vendor default\n"); + } + dw_hdmi_qp_init_hw(hdmi); ret = devm_request_threaded_irq(dev, plat_data->main_irq, @@ -1093,6 +1295,22 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->bridge.hdmi_audio_dev = dev; hdmi->bridge.hdmi_audio_dai_port = 1; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC + if (plat_data->cec_irq) { + hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER; + hdmi->bridge.hdmi_cec_dev = dev; + hdmi->bridge.hdmi_cec_adapter_name = dev_name(dev); + + hdmi->cec = devm_kzalloc(hdmi->dev, sizeof(*hdmi->cec), GFP_KERNEL); + if (!hdmi->cec) + return ERR_PTR(-ENOMEM); + + hdmi->cec->irq = plat_data->cec_irq; + } else { + dev_warn(dev, "Disabled CEC support due to missing IRQ\n"); + } +#endif + ret = devm_drm_bridge_add(dev, &hdmi->bridge); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h index 72987e6c4689..91a15f82e32a 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -488,9 +488,23 @@ #define AUDPKT_VBIT_OVR0 0xf24 /* CEC Registers */ #define CEC_TX_CONTROL 0x1000 +#define CEC_CTRL_CLEAR BIT(0) +#define CEC_CTRL_START BIT(0) #define CEC_STATUS 0x1004 +#define CEC_STAT_DONE BIT(0) +#define CEC_STAT_NACK BIT(1) +#define CEC_STAT_ARBLOST BIT(2) +#define CEC_STAT_LINE_ERR BIT(3) +#define CEC_STAT_RETRANS_FAIL BIT(4) +#define CEC_STAT_DISCARD BIT(5) +#define CEC_STAT_TX_BUSY BIT(8) +#define CEC_STAT_RX_BUSY BIT(9) +#define CEC_STAT_DRIVE_ERR BIT(10) +#define CEC_STAT_EOM BIT(11) +#define CEC_STAT_NOTIFY_ERR BIT(12) #define CEC_CONFIG 0x1008 #define CEC_ADDR 0x100c +#define CEC_ADDR_BROADCAST BIT(15) #define CEC_TX_COUNT 0x1020 #define CEC_TX_DATA3_0 0x1024 #define CEC_TX_DATA7_4 0x1028 diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c index f894ba52bdb5..ec5ab9f30547 100644 --- a/drivers/gpu/drm/clients/drm_fbdev_client.c +++ b/drivers/gpu/drm/clients/drm_fbdev_client.c @@ -62,26 +62,20 @@ err_drm_err: return ret; } -static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +static int drm_fbdev_client_suspend(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - if (holds_console_lock) - drm_fb_helper_set_suspend(fb_helper, true); - else - drm_fb_helper_set_suspend_unlocked(fb_helper, true); + drm_fb_helper_set_suspend_unlocked(fb_helper, true); return 0; } -static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock) +static int drm_fbdev_client_resume(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - if (holds_console_lock) - drm_fb_helper_set_suspend(fb_helper, false); - else - drm_fb_helper_set_suspend_unlocked(fb_helper, false); + drm_fb_helper_set_suspend_unlocked(fb_helper, false); return 0; } diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c index d239f1e3c456..fd8556dd58ed 100644 --- a/drivers/gpu/drm/clients/drm_log.c +++ b/drivers/gpu/drm/clients/drm_log.c @@ -319,7 +319,7 @@ static int drm_log_client_hotplug(struct drm_client_dev *client) return 0; } -static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock) +static int drm_log_client_suspend(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); @@ -328,7 +328,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l return 0; } -static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock) +static int drm_log_client_resume(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index a5bdd6c10643..7b18be3ff9a3 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -618,6 +618,20 @@ static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_f * Bridge Connector Initialisation */ +static void drm_bridge_connector_put_bridges(struct drm_device *dev, void *data) +{ + struct drm_bridge_connector *bridge_connector = (struct drm_bridge_connector *)data; + + drm_bridge_put(bridge_connector->bridge_edid); + drm_bridge_put(bridge_connector->bridge_hpd); + drm_bridge_put(bridge_connector->bridge_detect); + drm_bridge_put(bridge_connector->bridge_modes); + drm_bridge_put(bridge_connector->bridge_hdmi); + drm_bridge_put(bridge_connector->bridge_hdmi_audio); + drm_bridge_put(bridge_connector->bridge_dp_audio); + drm_bridge_put(bridge_connector->bridge_hdmi_cec); +} + /** * drm_bridge_connector_init - Initialise a connector for a chain of bridges * @drm: the DRM device @@ -638,7 +652,15 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, struct drm_bridge_connector *bridge_connector; struct drm_connector *connector; struct i2c_adapter *ddc = NULL; - struct drm_bridge *panel_bridge = NULL; + struct drm_bridge *panel_bridge __free(drm_bridge_put) = NULL; + struct drm_bridge *bridge_edid __free(drm_bridge_put) = NULL; + struct drm_bridge *bridge_hpd __free(drm_bridge_put) = NULL; + struct drm_bridge *bridge_detect __free(drm_bridge_put) = NULL; + struct drm_bridge *bridge_modes __free(drm_bridge_put) = NULL; + struct drm_bridge *bridge_hdmi __free(drm_bridge_put) = NULL; + struct drm_bridge *bridge_hdmi_audio __free(drm_bridge_put) = NULL; + struct drm_bridge *bridge_dp_audio __free(drm_bridge_put) = NULL; + struct drm_bridge *bridge_hdmi_cec __free(drm_bridge_put) = NULL; unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); unsigned int max_bpc = 8; bool support_hdcp = false; @@ -649,6 +671,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (!bridge_connector) return ERR_PTR(-ENOMEM); + ret = drmm_add_action(drm, drm_bridge_connector_put_bridges, bridge_connector); + if (ret) + return ERR_PTR(ret); + bridge_connector->encoder = encoder; /* @@ -672,22 +698,30 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (!bridge->ycbcr_420_allowed) connector->ycbcr_420_allowed = false; - if (bridge->ops & DRM_BRIDGE_OP_EDID) - bridge_connector->bridge_edid = bridge; - if (bridge->ops & DRM_BRIDGE_OP_HPD) - bridge_connector->bridge_hpd = bridge; - if (bridge->ops & DRM_BRIDGE_OP_DETECT) - bridge_connector->bridge_detect = bridge; - if (bridge->ops & DRM_BRIDGE_OP_MODES) - bridge_connector->bridge_modes = bridge; + if (bridge->ops & DRM_BRIDGE_OP_EDID) { + drm_bridge_put(bridge_edid); + bridge_edid = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_HPD) { + drm_bridge_put(bridge_hpd); + bridge_hpd = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_DETECT) { + drm_bridge_put(bridge_detect); + bridge_detect = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_MODES) { + drm_bridge_put(bridge_modes); + bridge_modes = drm_bridge_get(bridge); + } if (bridge->ops & DRM_BRIDGE_OP_HDMI) { - if (bridge_connector->bridge_hdmi) + if (bridge_hdmi) return ERR_PTR(-EBUSY); if (!bridge->funcs->hdmi_write_infoframe || !bridge->funcs->hdmi_clear_infoframe) return ERR_PTR(-EINVAL); - bridge_connector->bridge_hdmi = bridge; + bridge_hdmi = drm_bridge_get(bridge); if (bridge->supported_formats) supported_formats = bridge->supported_formats; @@ -696,10 +730,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, } if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) { - if (bridge_connector->bridge_hdmi_audio) + if (bridge_hdmi_audio) return ERR_PTR(-EBUSY); - if (bridge_connector->bridge_dp_audio) + if (bridge_dp_audio) return ERR_PTR(-EBUSY); if (!bridge->hdmi_audio_max_i2s_playback_channels && @@ -710,14 +744,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->hdmi_audio_shutdown) return ERR_PTR(-EINVAL); - bridge_connector->bridge_hdmi_audio = bridge; + bridge_hdmi_audio = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) { - if (bridge_connector->bridge_dp_audio) + if (bridge_dp_audio) return ERR_PTR(-EBUSY); - if (bridge_connector->bridge_hdmi_audio) + if (bridge_hdmi_audio) return ERR_PTR(-EBUSY); if (!bridge->hdmi_audio_max_i2s_playback_channels && @@ -728,7 +762,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->dp_audio_shutdown) return ERR_PTR(-EINVAL); - bridge_connector->bridge_dp_audio = bridge; + bridge_dp_audio = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { @@ -739,10 +773,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, } if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { - if (bridge_connector->bridge_hdmi_cec) + if (bridge_hdmi_cec) return ERR_PTR(-EBUSY); - bridge_connector->bridge_hdmi_cec = bridge; + bridge_hdmi_cec = drm_bridge_get(bridge); if (!bridge->funcs->hdmi_cec_enable || !bridge->funcs->hdmi_cec_log_addr || @@ -762,7 +796,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, ddc = bridge->ddc; if (drm_bridge_is_panel(bridge)) - panel_bridge = bridge; + panel_bridge = drm_bridge_get(bridge); if (bridge->support_hdcp) support_hdcp = true; @@ -771,13 +805,13 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (connector_type == DRM_MODE_CONNECTOR_Unknown) return ERR_PTR(-EINVAL); - if (bridge_connector->bridge_hdmi) { + if (bridge_hdmi) { if (!connector->ycbcr_420_allowed) supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420); ret = drmm_connector_hdmi_init(drm, connector, - bridge_connector->bridge_hdmi->vendor, - bridge_connector->bridge_hdmi->product, + bridge_hdmi->vendor, + bridge_hdmi->product, &drm_bridge_connector_funcs, &drm_bridge_connector_hdmi_funcs, connector_type, ddc, @@ -793,15 +827,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, return ERR_PTR(ret); } - if (bridge_connector->bridge_hdmi_audio || - bridge_connector->bridge_dp_audio) { + if (bridge_hdmi_audio || bridge_dp_audio) { struct device *dev; struct drm_bridge *bridge; - if (bridge_connector->bridge_hdmi_audio) - bridge = bridge_connector->bridge_hdmi_audio; + if (bridge_hdmi_audio) + bridge = bridge_hdmi_audio; else - bridge = bridge_connector->bridge_dp_audio; + bridge = bridge_dp_audio; dev = bridge->hdmi_audio_dev; @@ -815,9 +848,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, return ERR_PTR(ret); } - if (bridge_connector->bridge_hdmi_cec && - bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { - struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; + if (bridge_hdmi_cec && + bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { + struct drm_bridge *bridge = bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_notifier_register(connector, NULL, @@ -826,9 +859,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, return ERR_PTR(ret); } - if (bridge_connector->bridge_hdmi_cec && - bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { - struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; + if (bridge_hdmi_cec && + bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { + struct drm_bridge *bridge = bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_register(connector, &drm_bridge_connector_hdmi_cec_funcs, @@ -841,9 +874,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs); - if (bridge_connector->bridge_hpd) + if (bridge_hpd) connector->polled = DRM_CONNECTOR_POLL_HPD; - else if (bridge_connector->bridge_detect) + else if (bridge_detect) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; @@ -854,6 +887,15 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER)) drm_connector_attach_content_protection_property(connector, true); + bridge_connector->bridge_edid = drm_bridge_get(bridge_edid); + bridge_connector->bridge_hpd = drm_bridge_get(bridge_hpd); + bridge_connector->bridge_detect = drm_bridge_get(bridge_detect); + bridge_connector->bridge_modes = drm_bridge_get(bridge_modes); + bridge_connector->bridge_hdmi = drm_bridge_get(bridge_hdmi); + bridge_connector->bridge_hdmi_audio = drm_bridge_get(bridge_hdmi_audio); + bridge_connector->bridge_dp_audio = drm_bridge_get(bridge_dp_audio); + bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge_hdmi_cec); + return connector; } EXPORT_SYMBOL_GPL(drm_bridge_connector_init); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index ed5359a71f7e..be2cb6e43cb0 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -207,9 +207,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; connector->funcs->atomic_destroy_state(connector, - state->connectors[i].state); + state->connectors[i].state_to_destroy); state->connectors[i].ptr = NULL; - state->connectors[i].state = NULL; + state->connectors[i].state_to_destroy = NULL; state->connectors[i].old_state = NULL; state->connectors[i].new_state = NULL; drm_connector_put(connector); @@ -222,10 +222,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; crtc->funcs->atomic_destroy_state(crtc, - state->crtcs[i].state); + state->crtcs[i].state_to_destroy); state->crtcs[i].ptr = NULL; - state->crtcs[i].state = NULL; + state->crtcs[i].state_to_destroy = NULL; state->crtcs[i].old_state = NULL; state->crtcs[i].new_state = NULL; @@ -242,9 +242,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; plane->funcs->atomic_destroy_state(plane, - state->planes[i].state); + state->planes[i].state_to_destroy); state->planes[i].ptr = NULL; - state->planes[i].state = NULL; + state->planes[i].state_to_destroy = NULL; state->planes[i].old_state = NULL; state->planes[i].new_state = NULL; } @@ -253,9 +253,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) struct drm_private_obj *obj = state->private_objs[i].ptr; obj->funcs->atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); state->private_objs[i].ptr = NULL; - state->private_objs[i].state = NULL; + state->private_objs[i].state_to_destroy = NULL; state->private_objs[i].old_state = NULL; state->private_objs[i].new_state = NULL; } @@ -349,7 +349,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, WARN_ON(!state->acquire_ctx); - crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (crtc_state) return crtc_state; @@ -361,7 +361,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, if (!crtc_state) return ERR_PTR(-ENOMEM); - state->crtcs[index].state = crtc_state; + state->crtcs[index].state_to_destroy = crtc_state; state->crtcs[index].old_state = crtc->state; state->crtcs[index].new_state = crtc_state; state->crtcs[index].ptr = crtc; @@ -480,8 +480,8 @@ static int drm_atomic_connector_check(struct drm_connector *connector, } if (state->crtc) - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, + state->crtc); if (writeback_job->fb && !crtc_state->active) { drm_dbg_atomic(connector->dev, @@ -534,7 +534,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, WARN_ON(plane->old_fb); WARN_ON(plane->crtc); - plane_state = drm_atomic_get_existing_plane_state(state, plane); + plane_state = drm_atomic_get_new_plane_state(state, plane); if (plane_state) return plane_state; @@ -546,7 +546,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, if (!plane_state) return ERR_PTR(-ENOMEM); - state->planes[index].state = plane_state; + state->planes[index].state_to_destroy = plane_state; state->planes[index].ptr = plane; state->planes[index].old_state = plane->state; state->planes[index].new_state = plane_state; @@ -831,14 +831,14 @@ struct drm_private_state * drm_atomic_get_private_obj_state(struct drm_atomic_state *state, struct drm_private_obj *obj) { - int index, num_objs, i, ret; + int index, num_objs, ret; size_t size; struct __drm_private_objs_state *arr; struct drm_private_state *obj_state; - for (i = 0; i < state->num_private_objs; i++) - if (obj == state->private_objs[i].ptr) - return state->private_objs[i].state; + obj_state = drm_atomic_get_new_private_obj_state(state, obj); + if (obj_state) + return obj_state; ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); if (ret) @@ -858,7 +858,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, if (!obj_state) return ERR_PTR(-ENOMEM); - state->private_objs[index].state = obj_state; + state->private_objs[index].state_to_destroy = obj_state; state->private_objs[index].old_state = obj->state; state->private_objs[index].new_state = obj_state; state->private_objs[index].ptr = obj; @@ -1152,15 +1152,16 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, state->num_connector = alloc; } - if (state->connectors[index].state) - return state->connectors[index].state; + connector_state = drm_atomic_get_new_connector_state(state, connector); + if (connector_state) + return connector_state; connector_state = connector->funcs->atomic_duplicate_state(connector); if (!connector_state) return ERR_PTR(-ENOMEM); drm_connector_get(connector); - state->connectors[index].state = connector_state; + state->connectors[index].state_to_destroy = connector_state; state->connectors[index].old_state = connector->state; state->connectors[index].new_state = connector_state; state->connectors[index].ptr = connector; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index d5ebe6ea0acb..5a473a274ff0 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3236,7 +3236,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_conn_state->state = state; new_conn_state->state = NULL; - state->connectors[i].state = old_conn_state; + state->connectors[i].state_to_destroy = old_conn_state; connector->state = new_conn_state; } @@ -3246,7 +3246,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_crtc_state->state = state; new_crtc_state->state = NULL; - state->crtcs[i].state = old_crtc_state; + state->crtcs[i].state_to_destroy = old_crtc_state; crtc->state = new_crtc_state; if (new_crtc_state->commit) { @@ -3266,7 +3266,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_plane_state->state = state; new_plane_state->state = NULL; - state->planes[i].state = old_plane_state; + state->planes[i].state_to_destroy = old_plane_state; plane->state = new_plane_state; } drm_panic_unlock(state->dev, flags); @@ -3277,7 +3277,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_obj_state->state = state; new_obj_state->state = NULL; - state->private_objs[i].state = old_obj_state; + state->private_objs[i].state_to_destroy = old_obj_state; obj->state = new_obj_state; } diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 630b5e6594e0..53e7ece36dd9 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1086,12 +1086,12 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, struct drm_encoder *encoder = bridge->encoder; struct drm_bridge_state *last_bridge_state; unsigned int i, num_out_bus_fmts = 0; - struct drm_bridge *last_bridge; u32 *out_bus_fmts; int ret = 0; - last_bridge = list_last_entry(&encoder->bridge_chain, - struct drm_bridge, chain_node); + struct drm_bridge *last_bridge __free(drm_bridge_put) = + drm_bridge_get(list_last_entry(&encoder->bridge_chain, + struct drm_bridge, chain_node)); last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, last_bridge); diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index a94061f373de..f2c92902e4a3 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -12,8 +12,17 @@ #include <drm/drm_buddy.h> +enum drm_buddy_free_tree { + DRM_BUDDY_CLEAR_TREE = 0, + DRM_BUDDY_DIRTY_TREE, + DRM_BUDDY_MAX_FREE_TREES, +}; + static struct kmem_cache *slab_blocks; +#define for_each_free_tree(tree) \ + for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++) + static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, struct drm_buddy_block *parent, unsigned int order, @@ -31,6 +40,8 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, block->header |= order; block->parent = parent; + RB_CLEAR_NODE(&block->rb); + BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); return block; } @@ -41,23 +52,64 @@ static void drm_block_free(struct drm_buddy *mm, kmem_cache_free(slab_blocks, block); } -static void list_insert_sorted(struct drm_buddy *mm, - struct drm_buddy_block *block) +static enum drm_buddy_free_tree +get_block_tree(struct drm_buddy_block *block) { - struct drm_buddy_block *node; - struct list_head *head; + return drm_buddy_block_is_clear(block) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; +} - head = &mm->free_list[drm_buddy_block_order(block)]; - if (list_empty(head)) { - list_add(&block->link, head); - return; - } +static struct drm_buddy_block * +rbtree_get_free_block(const struct rb_node *node) +{ + return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL; +} - list_for_each_entry(node, head, link) - if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node)) - break; +static struct drm_buddy_block * +rbtree_last_free_block(struct rb_root *root) +{ + return rbtree_get_free_block(rb_last(root)); +} - __list_add(&block->link, node->link.prev, &node->link); +static bool rbtree_is_empty(struct rb_root *root) +{ + return RB_EMPTY_ROOT(root); +} + +static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block, + const struct drm_buddy_block *node) +{ + return drm_buddy_block_offset(block) < drm_buddy_block_offset(node); +} + +static bool rbtree_block_offset_less(struct rb_node *block, + const struct rb_node *node) +{ + return drm_buddy_block_offset_less(rbtree_get_free_block(block), + rbtree_get_free_block(node)); +} + +static void rbtree_insert(struct drm_buddy *mm, + struct drm_buddy_block *block, + enum drm_buddy_free_tree tree) +{ + rb_add(&block->rb, + &mm->free_trees[tree][drm_buddy_block_order(block)], + rbtree_block_offset_less); +} + +static void rbtree_remove(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + unsigned int order = drm_buddy_block_order(block); + enum drm_buddy_free_tree tree; + struct rb_root *root; + + tree = get_block_tree(block); + root = &mm->free_trees[tree][order]; + + rb_erase(&block->rb, root); + RB_CLEAR_NODE(&block->rb); } static void clear_reset(struct drm_buddy_block *block) @@ -70,29 +122,34 @@ static void mark_cleared(struct drm_buddy_block *block) block->header |= DRM_BUDDY_HEADER_CLEAR; } -static void mark_allocated(struct drm_buddy_block *block) +static void mark_allocated(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_ALLOCATED; - list_del(&block->link); + rbtree_remove(mm, block); } static void mark_free(struct drm_buddy *mm, struct drm_buddy_block *block) { + enum drm_buddy_free_tree tree; + block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_FREE; - list_insert_sorted(mm, block); + tree = get_block_tree(block); + rbtree_insert(mm, block, tree); } -static void mark_split(struct drm_buddy_block *block) +static void mark_split(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_SPLIT; - list_del(&block->link); + rbtree_remove(mm, block); } static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) @@ -148,7 +205,7 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm, mark_cleared(parent); } - list_del(&buddy->link); + rbtree_remove(mm, buddy); if (force_merge && drm_buddy_block_is_clear(buddy)) mm->clear_avail -= drm_buddy_block_size(mm, buddy); @@ -169,7 +226,7 @@ static int __force_merge(struct drm_buddy *mm, u64 end, unsigned int min_order) { - unsigned int order; + unsigned int tree, order; int i; if (!min_order) @@ -178,44 +235,48 @@ static int __force_merge(struct drm_buddy *mm, if (min_order > mm->max_order) return -EINVAL; - for (i = min_order - 1; i >= 0; i--) { - struct drm_buddy_block *block, *prev; + for_each_free_tree(tree) { + for (i = min_order - 1; i >= 0; i--) { + struct rb_node *iter = rb_last(&mm->free_trees[tree][i]); - list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) { - struct drm_buddy_block *buddy; - u64 block_start, block_end; + while (iter) { + struct drm_buddy_block *block, *buddy; + u64 block_start, block_end; - if (!block->parent) - continue; + block = rbtree_get_free_block(iter); + iter = rb_prev(iter); - block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block) - 1; + if (!block || !block->parent) + continue; - if (!contains(start, end, block_start, block_end)) - continue; + block_start = drm_buddy_block_offset(block); + block_end = block_start + drm_buddy_block_size(mm, block) - 1; - buddy = __get_buddy(block); - if (!drm_buddy_block_is_free(buddy)) - continue; + if (!contains(start, end, block_start, block_end)) + continue; - WARN_ON(drm_buddy_block_is_clear(block) == - drm_buddy_block_is_clear(buddy)); + buddy = __get_buddy(block); + if (!drm_buddy_block_is_free(buddy)) + continue; - /* - * If the prev block is same as buddy, don't access the - * block in the next iteration as we would free the - * buddy block as part of the free function. - */ - if (prev == buddy) - prev = list_prev_entry(prev, link); + WARN_ON(drm_buddy_block_is_clear(block) == + drm_buddy_block_is_clear(buddy)); - list_del(&block->link); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + /* + * Advance to the next node when the current node is the buddy, + * as freeing the block will also remove its buddy from the tree. + */ + if (iter == &buddy->rb) + iter = rb_prev(iter); - order = __drm_buddy_free(mm, block, true); - if (order >= min_order) - return 0; + rbtree_remove(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); + + order = __drm_buddy_free(mm, block, true); + if (order >= min_order) + return 0; + } } } @@ -236,8 +297,8 @@ static int __force_merge(struct drm_buddy *mm, */ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) { - unsigned int i; - u64 offset; + unsigned int i, j, root_count = 0; + u64 offset = 0; if (size < chunk_size) return -EINVAL; @@ -258,14 +319,22 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); - mm->free_list = kmalloc_array(mm->max_order + 1, - sizeof(struct list_head), - GFP_KERNEL); - if (!mm->free_list) + mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES, + sizeof(*mm->free_trees), + GFP_KERNEL); + if (!mm->free_trees) return -ENOMEM; - for (i = 0; i <= mm->max_order; ++i) - INIT_LIST_HEAD(&mm->free_list[i]); + for_each_free_tree(i) { + mm->free_trees[i] = kmalloc_array(mm->max_order + 1, + sizeof(struct rb_root), + GFP_KERNEL); + if (!mm->free_trees[i]) + goto out_free_tree; + + for (j = 0; j <= mm->max_order; ++j) + mm->free_trees[i][j] = RB_ROOT; + } mm->n_roots = hweight64(size); @@ -273,10 +342,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) sizeof(struct drm_buddy_block *), GFP_KERNEL); if (!mm->roots) - goto out_free_list; - - offset = 0; - i = 0; + goto out_free_tree; /* * Split into power-of-two blocks, in case we are given a size that is @@ -296,24 +362,26 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) mark_free(mm, root); - BUG_ON(i > mm->max_order); + BUG_ON(root_count > mm->max_order); BUG_ON(drm_buddy_block_size(mm, root) < chunk_size); - mm->roots[i] = root; + mm->roots[root_count] = root; offset += root_size; size -= root_size; - i++; + root_count++; } while (size); return 0; out_free_roots: - while (i--) - drm_block_free(mm, mm->roots[i]); + while (root_count--) + drm_block_free(mm, mm->roots[root_count]); kfree(mm->roots); -out_free_list: - kfree(mm->free_list); +out_free_tree: + while (i--) + kfree(mm->free_trees[i]); + kfree(mm->free_trees); return -ENOMEM; } EXPORT_SYMBOL(drm_buddy_init); @@ -323,7 +391,7 @@ EXPORT_SYMBOL(drm_buddy_init); * * @mm: DRM buddy manager to free * - * Cleanup memory manager resources and the freelist + * Cleanup memory manager resources and the freetree */ void drm_buddy_fini(struct drm_buddy *mm) { @@ -349,8 +417,9 @@ void drm_buddy_fini(struct drm_buddy *mm) WARN_ON(mm->avail != mm->size); + for_each_free_tree(i) + kfree(mm->free_trees[i]); kfree(mm->roots); - kfree(mm->free_list); } EXPORT_SYMBOL(drm_buddy_fini); @@ -374,8 +443,7 @@ static int split_block(struct drm_buddy *mm, return -ENOMEM; } - mark_free(mm, block->left); - mark_free(mm, block->right); + mark_split(mm, block); if (drm_buddy_block_is_clear(block)) { mark_cleared(block->left); @@ -383,7 +451,8 @@ static int split_block(struct drm_buddy *mm, clear_reset(block); } - mark_split(block); + mark_free(mm, block->left); + mark_free(mm, block->right); return 0; } @@ -412,10 +481,11 @@ EXPORT_SYMBOL(drm_get_buddy); * @is_clear: blocks clear state * * Reset the clear state based on @is_clear value for each block - * in the freelist. + * in the freetree. */ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) { + enum drm_buddy_free_tree src_tree, dst_tree; u64 root_size, size, start; unsigned int order; int i; @@ -430,19 +500,24 @@ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) size -= root_size; } + src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + for (i = 0; i <= mm->max_order; ++i) { - struct drm_buddy_block *block; - - list_for_each_entry_reverse(block, &mm->free_list[i], link) { - if (is_clear != drm_buddy_block_is_clear(block)) { - if (is_clear) { - mark_cleared(block); - mm->clear_avail += drm_buddy_block_size(mm, block); - } else { - clear_reset(block); - mm->clear_avail -= drm_buddy_block_size(mm, block); - } + struct rb_root *root = &mm->free_trees[src_tree][i]; + struct drm_buddy_block *block, *tmp; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + rbtree_remove(mm, block); + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); } + + rbtree_insert(mm, block, dst_tree); } } } @@ -632,23 +707,17 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm, } static struct drm_buddy_block * -get_maxblock(struct drm_buddy *mm, unsigned int order, - unsigned long flags) +get_maxblock(struct drm_buddy *mm, + unsigned int order, + enum drm_buddy_free_tree tree) { struct drm_buddy_block *max_block = NULL, *block = NULL; + struct rb_root *root; unsigned int i; for (i = order; i <= mm->max_order; ++i) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + root = &mm->free_trees[tree][i]; + block = rbtree_last_free_block(root); if (!block) continue; @@ -667,46 +736,44 @@ get_maxblock(struct drm_buddy *mm, unsigned int order, } static struct drm_buddy_block * -alloc_from_freelist(struct drm_buddy *mm, +alloc_from_freetree(struct drm_buddy *mm, unsigned int order, unsigned long flags) { struct drm_buddy_block *block = NULL; + struct rb_root *root; + enum drm_buddy_free_tree tree; unsigned int tmp; int err; + tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { - block = get_maxblock(mm, order, flags); + block = get_maxblock(mm, order, tree); if (block) /* Store the obtained block order */ tmp = drm_buddy_block_order(block); } else { for (tmp = order; tmp <= mm->max_order; ++tmp) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + /* Get RB tree root for this order and tree */ + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); if (block) break; } } if (!block) { - /* Fallback method */ + /* Try allocating from the other tree */ + tree = (tree == DRM_BUDDY_CLEAR_TREE) ? + DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + for (tmp = order; tmp <= mm->max_order; ++tmp) { - if (!list_empty(&mm->free_list[tmp])) { - block = list_last_entry(&mm->free_list[tmp], - struct drm_buddy_block, - link); - if (block) - break; - } + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); + if (block) + break; } if (!block) @@ -771,7 +838,7 @@ static int __alloc_range(struct drm_buddy *mm, if (contains(start, end, block_start, block_end)) { if (drm_buddy_block_is_free(block)) { - mark_allocated(block); + mark_allocated(mm, block); total_allocated += drm_buddy_block_size(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) @@ -849,10 +916,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, { u64 rhs_offset, lhs_offset, lhs_size, filled; struct drm_buddy_block *block; - struct list_head *list; + unsigned int tree, order; LIST_HEAD(blocks_lhs); unsigned long pages; - unsigned int order; u64 modify_size; int err; @@ -862,35 +928,45 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, if (order == 0) return -ENOSPC; - list = &mm->free_list[order]; - if (list_empty(list)) - return -ENOSPC; + for_each_free_tree(tree) { + struct rb_root *root; + struct rb_node *iter; + + root = &mm->free_trees[tree][order]; + if (rbtree_is_empty(root)) + continue; - list_for_each_entry_reverse(block, list, link) { - /* Allocate blocks traversing RHS */ - rhs_offset = drm_buddy_block_offset(block); - err = __drm_buddy_alloc_range(mm, rhs_offset, size, - &filled, blocks); - if (!err || err != -ENOSPC) - return err; - - lhs_size = max((size - filled), min_block_size); - if (!IS_ALIGNED(lhs_size, min_block_size)) - lhs_size = round_up(lhs_size, min_block_size); - - /* Allocate blocks traversing LHS */ - lhs_offset = drm_buddy_block_offset(block) - lhs_size; - err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, - NULL, &blocks_lhs); - if (!err) { - list_splice(&blocks_lhs, blocks); - return 0; - } else if (err != -ENOSPC) { + iter = rb_last(root); + while (iter) { + block = rbtree_get_free_block(iter); + + /* Allocate blocks traversing RHS */ + rhs_offset = drm_buddy_block_offset(block); + err = __drm_buddy_alloc_range(mm, rhs_offset, size, + &filled, blocks); + if (!err || err != -ENOSPC) + return err; + + lhs_size = max((size - filled), min_block_size); + if (!IS_ALIGNED(lhs_size, min_block_size)) + lhs_size = round_up(lhs_size, min_block_size); + + /* Allocate blocks traversing LHS */ + lhs_offset = drm_buddy_block_offset(block) - lhs_size; + err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, + NULL, &blocks_lhs); + if (!err) { + list_splice(&blocks_lhs, blocks); + return 0; + } else if (err != -ENOSPC) { + drm_buddy_free_list_internal(mm, blocks); + return err; + } + /* Free blocks for the next iteration */ drm_buddy_free_list_internal(mm, blocks); - return err; + + iter = rb_prev(iter); } - /* Free blocks for the next iteration */ - drm_buddy_free_list_internal(mm, blocks); } return -ENOSPC; @@ -976,7 +1052,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm, list_add(&block->tmp_link, &dfs); err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); if (err) { - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -999,8 +1075,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, return __drm_buddy_alloc_range_bias(mm, start, end, order, flags); else - /* Allocate from freelist */ - return alloc_from_freelist(mm, order, flags); + /* Allocate from freetree */ + return alloc_from_freetree(mm, order, flags); } /** @@ -1017,8 +1093,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, * alloc_range_bias() called on range limitations, which traverses * the tree and returns the desired block. * - * alloc_from_freelist() called when *no* range restrictions - * are enforced, which picks the block from the freelist. + * alloc_from_freetree() called when *no* range restrictions + * are enforced, which picks the block from the freetree. * * Returns: * 0 on success, error code on failure. @@ -1120,7 +1196,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, } } while (1); - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -1201,12 +1277,18 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); for (order = mm->max_order; order >= 0; order--) { - struct drm_buddy_block *block; + struct drm_buddy_block *block, *tmp; + struct rb_root *root; u64 count = 0, free; + unsigned int tree; - list_for_each_entry(block, &mm->free_list[order], link) { - BUG_ON(!drm_buddy_block_is_free(block)); - count++; + for_each_free_tree(tree) { + root = &mm->free_trees[tree][order]; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + BUG_ON(!drm_buddy_block_is_free(block)); + count++; + } } drm_printf(p, "order-%2d ", order); diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c index c83196ad8b59..c3baeb4d4e6b 100644 --- a/drivers/gpu/drm/drm_client_event.c +++ b/drivers/gpu/drm/drm_client_event.c @@ -122,7 +122,7 @@ void drm_client_dev_restore(struct drm_device *dev) mutex_unlock(&dev->clientlist_mutex); } -static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +static int drm_client_suspend(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret = 0; @@ -131,7 +131,7 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_ return 0; if (client->funcs && client->funcs->suspend) - ret = client->funcs->suspend(client, holds_console_lock); + ret = client->funcs->suspend(client); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); client->suspended = true; @@ -139,20 +139,20 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_ return ret; } -void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock) +void drm_client_dev_suspend(struct drm_device *dev) { struct drm_client_dev *client; mutex_lock(&dev->clientlist_mutex); list_for_each_entry(client, &dev->clientlist, list) { if (!client->suspended) - drm_client_suspend(client, holds_console_lock); + drm_client_suspend(client); } mutex_unlock(&dev->clientlist_mutex); } EXPORT_SYMBOL(drm_client_dev_suspend); -static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock) +static int drm_client_resume(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret = 0; @@ -161,7 +161,7 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l return 0; if (client->funcs && client->funcs->resume) - ret = client->funcs->resume(client, holds_console_lock); + ret = client->funcs->resume(client); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); client->suspended = false; @@ -172,14 +172,14 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l return ret; } -void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock) +void drm_client_dev_resume(struct drm_device *dev) { struct drm_client_dev *client; mutex_lock(&dev->clientlist_mutex); list_for_each_entry(client, &dev->clientlist, list) { if (client->suspended) - drm_client_resume(client, holds_console_lock); + drm_client_resume(client); } mutex_unlock(&dev->clientlist_mutex); } diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index adbb73f00d68..18e753ade001 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -1048,7 +1048,7 @@ retry: plane_state->crtc->base.id, plane_state->crtc->name, fb->base.id); - crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); ret = drm_atomic_add_affected_connectors(state, plane_state->crtc); if (ret) diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 90760d0ca071..0bec6f66682b 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -967,7 +967,7 @@ drm_vram_helper_mode_valid_internal(struct drm_device *dev, max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT; - fbsize = mode->hdisplay * mode->vdisplay * max_bpp; + fbsize = (u32)mode->hdisplay * mode->vdisplay * max_bpp; fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); if (fbpages > max_fbpages) diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index e33c78fc8fbd..b488c91c20a5 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -691,7 +691,7 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev, const struct drm_simple_display_pipe_funcs *funcs, const struct drm_display_mode *mode, unsigned int rotation) { - size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16); + size_t bufsize = (u32)mode->vdisplay * mode->hdisplay * sizeof(u16); dbidev->drm.mode_config.preferred_depth = 16; diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index 988735560570..a57f6a10ada4 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -203,10 +203,10 @@ int drm_mode_config_helper_suspend(struct drm_device *dev) if (dev->mode_config.poll_enabled) drm_kms_helper_poll_disable(dev); - drm_client_dev_suspend(dev, false); + drm_client_dev_suspend(dev); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); /* * Don't enable polling if it was never initialized @@ -252,7 +252,7 @@ int drm_mode_config_helper_resume(struct drm_device *dev) DRM_ERROR("Failed to resume (%d)\n", ret); dev->mode_config.suspend_state = NULL; - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); /* * Don't enable polling if it is not initialized diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 7c3aa77186d3..6400070a4c9b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -58,7 +58,7 @@ static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state) struct drm_plane_state *state = &exynos_state->base; struct drm_crtc *crtc = state->crtc; struct drm_crtc_state *crtc_state = - drm_atomic_get_existing_crtc_state(state->state, crtc); + drm_atomic_get_new_crtc_state(state->state, crtc); struct drm_display_mode *mode = &crtc_state->adjusted_mode; int crtc_x, crtc_y; unsigned int crtc_w, crtc_h; diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index 3a208e956dff..76d77a736d84 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -69,7 +69,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format height = drm_rect_height(rect); len = drm_format_info_min_pitch(format, 0, width) * height; - buf = kmalloc(width * height, GFP_KERNEL); + buf = kmalloc_array(height, width, GFP_KERNEL); if (!buf) return 0; diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index a28c3710c4d5..89be8da79d3b 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -978,7 +978,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_runtime_pm_disable(&i915->runtime_pm); intel_power_domains_disable(display); - drm_client_dev_suspend(&i915->drm, false); + drm_client_dev_suspend(&i915->drm); if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&i915->drm); intel_display_driver_disable_user_access(display); @@ -1061,7 +1061,7 @@ static int i915_drm_suspend(struct drm_device *dev) /* We do a lot of poking in a lot of registers, make sure they work * properly. */ intel_power_domains_disable(display); - drm_client_dev_suspend(dev, false); + drm_client_dev_suspend(dev); if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(dev); intel_display_driver_disable_user_access(display); @@ -1245,7 +1245,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_opregion_resume(display); - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); intel_power_domains_enable(display); diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c index 86ecc22d0a55..d42f33d6f3fc 100644 --- a/drivers/gpu/drm/imx/dc/dc-ed.c +++ b/drivers/gpu/drm/imx/dc/dc-ed.c @@ -15,12 +15,12 @@ #include "dc-pe.h" #define PIXENGCFG_STATIC 0x8 -#define POWERDOWN BIT(4) -#define SYNC_MODE BIT(8) -#define SINGLE 0 #define DIV_MASK GENMASK(23, 16) #define DIV(x) FIELD_PREP(DIV_MASK, (x)) #define DIV_RESET 0x80 +#define SYNC_MODE BIT(8) +#define SINGLE 0 +#define POWERDOWN BIT(4) #define PIXENGCFG_DYNAMIC 0xc @@ -28,9 +28,9 @@ #define SYNC_TRIGGER BIT(0) #define STATICCONTROL 0x8 +#define PERFCOUNTMODE BIT(12) #define KICK_MODE BIT(8) #define EXTERNAL BIT(8) -#define PERFCOUNTMODE BIT(12) #define CONTROL 0xc #define GAMMAAPPLYENABLE BIT(0) diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c index 7f6c1852bf72..28f372be9247 100644 --- a/drivers/gpu/drm/imx/dc/dc-fg.c +++ b/drivers/gpu/drm/imx/dc/dc-fg.c @@ -56,9 +56,9 @@ #define FGINCTRL 0x5c #define FGINCTRLPANIC 0x60 -#define FGDM_MASK GENMASK(2, 0) -#define ENPRIMALPHA BIT(3) #define ENSECALPHA BIT(4) +#define ENPRIMALPHA BIT(3) +#define FGDM_MASK GENMASK(2, 0) #define FGCCR 0x64 #define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x)) diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c index f94c591c8158..1d8f74babef8 100644 --- a/drivers/gpu/drm/imx/dc/dc-fu.c +++ b/drivers/gpu/drm/imx/dc/dc-fu.c @@ -18,11 +18,11 @@ #define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x)) /* BURSTBUFFERMANAGEMENT */ +#define LINEMODE_MASK BIT(31) #define SETBURSTLENGTH_MASK GENMASK(12, 8) #define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x)) #define SETNUMBUFFERS_MASK GENMASK(7, 0) #define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x)) -#define LINEMODE_MASK BIT(31) /* SOURCEBUFFERATTRIBUTES */ #define BITSPERPIXEL_MASK GENMASK(21, 16) @@ -31,20 +31,20 @@ #define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1) /* SOURCEBUFFERDIMENSION */ -#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x)) #define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x)) +#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x)) /* LAYEROFFSET */ -#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x)) #define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x)) +#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x)) /* CLIPWINDOWOFFSET */ -#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x)) #define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x)) +#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x)) /* CLIPWINDOWDIMENSIONS */ -#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1) #define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1) +#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1) enum dc_linemode { /* diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h index e016e1ea5b4e..f678de3ca8c0 100644 --- a/drivers/gpu/drm/imx/dc/dc-fu.h +++ b/drivers/gpu/drm/imx/dc/dc-fu.h @@ -33,13 +33,13 @@ #define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x)) /* LAYERPROPERTY */ +#define SOURCEBUFFERENABLE BIT(31) #define YUVCONVERSIONMODE_MASK GENMASK(18, 17) #define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x)) -#define SOURCEBUFFERENABLE BIT(31) /* FRAMEDIMENSIONS */ -#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x)) #define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x)) +#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x)) /* CONTROL */ #define INPUTSELECT_MASK GENMASK(4, 3) diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c index 38f966625d38..ca1d714c8d6e 100644 --- a/drivers/gpu/drm/imx/dc/dc-lb.c +++ b/drivers/gpu/drm/imx/dc/dc-lb.c @@ -17,12 +17,12 @@ #include "dc-pe.h" #define PIXENGCFG_DYNAMIC 0x8 -#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0) -#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \ - FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x)) #define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8) #define PIXENGCFG_DYNAMIC_SEC_SEL(x) \ FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x)) +#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0) +#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \ + FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x)) #define STATICCONTROL 0x8 #define SHDTOKSEL_MASK GENMASK(4, 3) @@ -37,24 +37,24 @@ #define BLENDCONTROL 0x10 #define ALPHA_MASK GENMASK(23, 16) #define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x)) -#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0) -#define PRIM_C_BLD_FUNC(x) \ - FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x)) -#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4) -#define SEC_C_BLD_FUNC(x) \ - FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x)) -#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8) -#define PRIM_A_BLD_FUNC(x) \ - FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x)) #define SEC_A_BLD_FUNC_MASK GENMASK(14, 12) #define SEC_A_BLD_FUNC(x) \ FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x)) +#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8) +#define PRIM_A_BLD_FUNC(x) \ + FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x)) +#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4) +#define SEC_C_BLD_FUNC(x) \ + FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x)) +#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0) +#define PRIM_C_BLD_FUNC(x) \ + FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x)) #define POSITION 0x14 -#define XPOS_MASK GENMASK(15, 0) -#define XPOS(x) FIELD_PREP(XPOS_MASK, (x)) #define YPOS_MASK GENMASK(31, 16) #define YPOS(x) FIELD_PREP(YPOS_MASK, (x)) +#define XPOS_MASK GENMASK(15, 0) +#define XPOS(x) FIELD_PREP(XPOS_MASK, (x)) enum dc_lb_blend_func { DC_LAYERBLEND_BLEND_ZERO, diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c index d8b946fb90de..e40d5d66c5c1 100644 --- a/drivers/gpu/drm/imx/dc/dc-plane.c +++ b/drivers/gpu/drm/imx/dc/dc-plane.c @@ -106,7 +106,7 @@ dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) } crtc_state = - drm_atomic_get_existing_crtc_state(state, plane_state->crtc); + drm_atomic_get_new_crtc_state(state, plane_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c index ab6d32bad756..3a063a53c8df 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-plane.c +++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c @@ -159,8 +159,8 @@ static int dcss_plane_atomic_check(struct drm_plane *plane, dma_obj = drm_fb_dma_get_gem_obj(fb, 0); WARN_ON(!dma_obj); - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); hdisplay = crtc_state->adjusted_mode.hdisplay; vdisplay = crtc_state->adjusted_mode.vdisplay; diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c index 704c549750f9..df19560e41b4 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c @@ -386,8 +386,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, return -EINVAL; crtc_state = - drm_atomic_get_existing_crtc_state(state, - new_state->crtc); + drm_atomic_get_new_crtc_state(state, new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 9db1ceaed518..d3213fbf22be 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -247,8 +247,8 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct ingenic_drm_private_state *priv_state; unsigned int next_id; - priv_state = ingenic_drm_get_priv_state(priv, state); - if (WARN_ON(IS_ERR(priv_state))) + priv_state = ingenic_drm_get_new_priv_state(priv, state); + if (WARN_ON(!priv_state)) return; /* Set addresses of our DMA descriptor chains */ @@ -340,6 +340,7 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, crtc); struct ingenic_drm *priv = drm_crtc_get_priv(crtc); struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL; + struct ingenic_drm_private_state *priv_state; if (crtc_state->gamma_lut && drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) { @@ -347,6 +348,11 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + /* We will need the state in atomic_enable, so let's make sure it's part of the state */ + priv_state = ingenic_drm_get_priv_state(priv, state); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) { f1_state = drm_atomic_get_plane_state(crtc_state->state, &priv->f1); @@ -471,8 +477,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, if (priv->soc_info->plane_f0_not_working && plane == &priv->f0) return -EINVAL; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index 26ebf424d63e..32638a713241 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -580,7 +580,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -705,7 +705,7 @@ ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane, ipu->sharpness = val; if (state->crtc) { - crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index 9e0562aa2bcb..9562fe6711ff 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -129,8 +129,7 @@ static int kmb_plane_atomic_check(struct drm_plane *plane, } can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); crtc_state = - drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); + drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); return drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_NO_SCALING, diff --git a/drivers/gpu/drm/logicvc/logicvc_layer.c b/drivers/gpu/drm/logicvc/logicvc_layer.c index 464000aea765..eab4d773f92b 100644 --- a/drivers/gpu/drm/logicvc/logicvc_layer.c +++ b/drivers/gpu/drm/logicvc/logicvc_layer.c @@ -96,8 +96,8 @@ static int logicvc_plane_atomic_check(struct drm_plane *drm_plane, if (!new_state->crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(new_state->state, - new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c index aa9a97f9c4dc..2967a5cca069 100644 --- a/drivers/gpu/drm/loongson/lsdc_plane.c +++ b/drivers/gpu/drm/loongson/lsdc_plane.c @@ -196,7 +196,7 @@ static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane, return -EINVAL; } - crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); if (!crtc_state->active) return -EINVAL; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 02349bd44001..1b5667ddbb03 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -122,7 +122,8 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane, if (ret) return ret; - crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); return drm_atomic_helper_check_plane_state(plane->state, crtc_state, DRM_PLANE_NO_SCALING, diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 7c790406d533..4ca183fb61a9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -336,8 +336,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -373,8 +372,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane, int min_scale, max_scale; int ret; - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 54aed3656a4c..00515623a2cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -765,7 +765,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) { struct nouveau_display *disp = nouveau_display(dev); - drm_client_dev_suspend(dev, false); + drm_client_dev_suspend(dev); if (drm_drv_uses_atomic_modeset(dev)) { if (!runtime) { @@ -796,7 +796,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime) } } - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); } int diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 24a2ded08b45..d74ef6694c10 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -229,7 +229,7 @@ static int omap_plane_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); /* we should have a crtc state if the plane is attached to a crtc */ if (WARN_ON(!crtc_state)) return 0; diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index ad6b03b59b52..944c7c70de55 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1888,6 +1888,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x203d, &delay_200_500_e50, "B140HTN02.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x205c, &delay_200_500_e50, "B116XAN02.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x208d, &delay_200_500_e50, "B140HTN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"), diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 5d0dce10336b..ac05df2a54fe 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -74,7 +74,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev, spin_unlock_irqrestore(&pfdevfreq->lock, irqflags); - dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", + dev_dbg(pfdev->base.dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time, status->total_time, status->busy_time / (status->total_time / 100), status->current_frequency / 1000 / 1000); @@ -119,7 +119,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) int ret; struct dev_pm_opp *opp; unsigned long cur_freq; - struct device *dev = &pfdev->pdev->dev; + struct device *dev = pfdev->base.dev; struct devfreq *devfreq; struct thermal_cooling_device *cooling; struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index 04bec27449cb..c61b97af120c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -20,9 +20,9 @@ static int panfrost_reset_init(struct panfrost_device *pfdev) { - pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev); + pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->base.dev); if (IS_ERR(pfdev->rstc)) { - dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc)); + dev_err(pfdev->base.dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc)); return PTR_ERR(pfdev->rstc); } @@ -39,22 +39,22 @@ static int panfrost_clk_init(struct panfrost_device *pfdev) int err; unsigned long rate; - pfdev->clock = devm_clk_get(pfdev->dev, NULL); + pfdev->clock = devm_clk_get(pfdev->base.dev, NULL); if (IS_ERR(pfdev->clock)) { - dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock)); + dev_err(pfdev->base.dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock)); return PTR_ERR(pfdev->clock); } rate = clk_get_rate(pfdev->clock); - dev_info(pfdev->dev, "clock rate = %lu\n", rate); + dev_info(pfdev->base.dev, "clock rate = %lu\n", rate); err = clk_prepare_enable(pfdev->clock); if (err) return err; - pfdev->bus_clock = devm_clk_get_optional(pfdev->dev, "bus"); + pfdev->bus_clock = devm_clk_get_optional(pfdev->base.dev, "bus"); if (IS_ERR(pfdev->bus_clock)) { - dev_err(pfdev->dev, "get bus_clock failed %ld\n", + dev_err(pfdev->base.dev, "get bus_clock failed %ld\n", PTR_ERR(pfdev->bus_clock)); err = PTR_ERR(pfdev->bus_clock); goto disable_clock; @@ -62,7 +62,7 @@ static int panfrost_clk_init(struct panfrost_device *pfdev) if (pfdev->bus_clock) { rate = clk_get_rate(pfdev->bus_clock); - dev_info(pfdev->dev, "bus_clock rate = %lu\n", rate); + dev_info(pfdev->base.dev, "bus_clock rate = %lu\n", rate); err = clk_prepare_enable(pfdev->bus_clock); if (err) @@ -87,7 +87,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) { int ret, i; - pfdev->regulators = devm_kcalloc(pfdev->dev, pfdev->comp->num_supplies, + pfdev->regulators = devm_kcalloc(pfdev->base.dev, pfdev->comp->num_supplies, sizeof(*pfdev->regulators), GFP_KERNEL); if (!pfdev->regulators) @@ -96,12 +96,12 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) for (i = 0; i < pfdev->comp->num_supplies; i++) pfdev->regulators[i].supply = pfdev->comp->supply_names[i]; - ret = devm_regulator_bulk_get(pfdev->dev, + ret = devm_regulator_bulk_get(pfdev->base.dev, pfdev->comp->num_supplies, pfdev->regulators); if (ret < 0) { if (ret != -EPROBE_DEFER) - dev_err(pfdev->dev, "failed to get regulators: %d\n", + dev_err(pfdev->base.dev, "failed to get regulators: %d\n", ret); return ret; } @@ -109,7 +109,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) ret = regulator_bulk_enable(pfdev->comp->num_supplies, pfdev->regulators); if (ret < 0) { - dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret); + dev_err(pfdev->base.dev, "failed to enable regulators: %d\n", ret); return ret; } @@ -144,7 +144,7 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev) int err; int i, num_domains; - num_domains = of_count_phandle_with_args(pfdev->dev->of_node, + num_domains = of_count_phandle_with_args(pfdev->base.dev->of_node, "power-domains", "#power-domain-cells"); @@ -156,7 +156,7 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev) return 0; if (num_domains != pfdev->comp->num_pm_domains) { - dev_err(pfdev->dev, + dev_err(pfdev->base.dev, "Incorrect number of power domains: %d provided, %d needed\n", num_domains, pfdev->comp->num_pm_domains); return -EINVAL; @@ -168,20 +168,21 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev) for (i = 0; i < num_domains; i++) { pfdev->pm_domain_devs[i] = - dev_pm_domain_attach_by_name(pfdev->dev, - pfdev->comp->pm_domain_names[i]); + dev_pm_domain_attach_by_name(pfdev->base.dev, + pfdev->comp->pm_domain_names[i]); if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) { err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA; pfdev->pm_domain_devs[i] = NULL; - dev_err(pfdev->dev, + dev_err(pfdev->base.dev, "failed to get pm-domain %s(%d): %d\n", pfdev->comp->pm_domain_names[i], i, err); goto err; } - pfdev->pm_domain_links[i] = device_link_add(pfdev->dev, - pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME | - DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE); + pfdev->pm_domain_links[i] = + device_link_add(pfdev->base.dev, + pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME | + DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE); if (!pfdev->pm_domain_links[i]) { dev_err(pfdev->pm_domain_devs[i], "adding device link failed!\n"); @@ -220,20 +221,20 @@ int panfrost_device_init(struct panfrost_device *pfdev) err = panfrost_reset_init(pfdev); if (err) { - dev_err(pfdev->dev, "reset init failed %d\n", err); + dev_err(pfdev->base.dev, "reset init failed %d\n", err); goto out_pm_domain; } err = panfrost_clk_init(pfdev); if (err) { - dev_err(pfdev->dev, "clk init failed %d\n", err); + dev_err(pfdev->base.dev, "clk init failed %d\n", err); goto out_reset; } err = panfrost_devfreq_init(pfdev); if (err) { if (err != -EPROBE_DEFER) - dev_err(pfdev->dev, "devfreq init failed %d\n", err); + dev_err(pfdev->base.dev, "devfreq init failed %d\n", err); goto out_clk; } @@ -244,7 +245,7 @@ int panfrost_device_init(struct panfrost_device *pfdev) goto out_devfreq; } - pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0); + pfdev->iomem = devm_platform_ioremap_resource(to_platform_device(pfdev->base.dev), 0); if (IS_ERR(pfdev->iomem)) { err = PTR_ERR(pfdev->iomem); goto out_regulator; @@ -258,7 +259,7 @@ int panfrost_device_init(struct panfrost_device *pfdev) if (err) goto out_gpu; - err = panfrost_job_init(pfdev); + err = panfrost_jm_init(pfdev); if (err) goto out_mmu; @@ -268,7 +269,7 @@ int panfrost_device_init(struct panfrost_device *pfdev) return 0; out_job: - panfrost_job_fini(pfdev); + panfrost_jm_fini(pfdev); out_mmu: panfrost_mmu_fini(pfdev); out_gpu: @@ -289,7 +290,7 @@ out_pm_domain: void panfrost_device_fini(struct panfrost_device *pfdev) { panfrost_perfcnt_fini(pfdev); - panfrost_job_fini(pfdev); + panfrost_jm_fini(pfdev); panfrost_mmu_fini(pfdev); panfrost_gpu_fini(pfdev); panfrost_devfreq_fini(pfdev); @@ -399,13 +400,16 @@ bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev, return false; } -void panfrost_device_reset(struct panfrost_device *pfdev) +void panfrost_device_reset(struct panfrost_device *pfdev, bool enable_job_int) { panfrost_gpu_soft_reset(pfdev); panfrost_gpu_power_on(pfdev); panfrost_mmu_reset(pfdev); - panfrost_job_enable_interrupts(pfdev); + + panfrost_jm_reset_interrupts(pfdev); + if (enable_job_int) + panfrost_jm_enable_interrupts(pfdev); } static int panfrost_device_runtime_resume(struct device *dev) @@ -429,7 +433,7 @@ static int panfrost_device_runtime_resume(struct device *dev) } } - panfrost_device_reset(pfdev); + panfrost_device_reset(pfdev, true); panfrost_devfreq_resume(pfdev); return 0; @@ -447,11 +451,11 @@ static int panfrost_device_runtime_suspend(struct device *dev) { struct panfrost_device *pfdev = dev_get_drvdata(dev); - if (!panfrost_job_is_idle(pfdev)) + if (!panfrost_jm_is_idle(pfdev)) return -EBUSY; panfrost_devfreq_suspend(pfdev); - panfrost_job_suspend_irq(pfdev); + panfrost_jm_suspend_irq(pfdev); panfrost_mmu_suspend_irq(pfdev); panfrost_gpu_suspend_irq(pfdev); panfrost_gpu_power_off(pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 1e73efad02a8..e61c4329fd07 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -26,6 +26,10 @@ struct panfrost_perfcnt; #define MAX_PM_DOMAINS 5 +#define ALL_JS_INT_MASK \ + (GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | \ + GENMASK(NUM_JOB_SLOTS - 1, 0)) + enum panfrost_drv_comp_bits { PANFROST_COMP_BIT_GPU, PANFROST_COMP_BIT_JOB, @@ -124,9 +128,7 @@ struct panfrost_device_debugfs { }; struct panfrost_device { - struct device *dev; - struct drm_device *ddev; - struct platform_device *pdev; + struct drm_device base; int gpu_irq; int mmu_irq; @@ -145,7 +147,6 @@ struct panfrost_device { DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX); spinlock_t as_lock; - unsigned long as_in_use_mask; unsigned long as_alloc_mask; unsigned long as_faulty_mask; struct list_head as_lru_list; @@ -222,7 +223,7 @@ static inline bool panfrost_high_prio_allowed(struct drm_file *file) static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) { - return ddev->dev_private; + return container_of(ddev, struct panfrost_device, base); } static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id) @@ -248,7 +249,7 @@ int panfrost_unstable_ioctl_check(void); int panfrost_device_init(struct panfrost_device *pfdev); void panfrost_device_fini(struct panfrost_device *pfdev); -void panfrost_device_reset(struct panfrost_device *pfdev); +void panfrost_device_reset(struct panfrost_device *pfdev, bool enable_job_int); extern const struct dev_pm_ops panfrost_pm_ops; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 22350ce8a08f..1c3c574cd64a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -36,7 +36,7 @@ static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, { int ret; - ret = pm_runtime_resume_and_get(pfdev->dev); + ret = pm_runtime_resume_and_get(pfdev->base.dev); if (ret) return ret; @@ -44,14 +44,14 @@ static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, *arg = panfrost_timestamp_read(pfdev); panfrost_cycle_counter_put(pfdev); - pm_runtime_put(pfdev->dev); + pm_runtime_put(pfdev->base.dev); return 0; } static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) { struct drm_panfrost_get_param *param = data; - struct panfrost_device *pfdev = ddev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(ddev); int ret; if (param->pad != 0) @@ -283,7 +283,7 @@ fail: static int panfrost_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_file_priv *file_priv = file->driver_priv; struct drm_panfrost_submit *args = data; struct drm_syncobj *sync_out = NULL; @@ -457,7 +457,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, { struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_madvise *args = data; - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct drm_gem_object *gem_obj; struct panfrost_gem_object *bo; int ret = 0; @@ -590,7 +590,7 @@ static int panfrost_open(struct drm_device *dev, struct drm_file *file) { int ret; - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_file_priv *panfrost_priv; panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL); @@ -606,7 +606,7 @@ panfrost_open(struct drm_device *dev, struct drm_file *file) goto err_free; } - ret = panfrost_job_open(file); + ret = panfrost_jm_open(file); if (ret) goto err_job; @@ -625,7 +625,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file) struct panfrost_file_priv *panfrost_priv = file->driver_priv; panfrost_perfcnt_close(file); - panfrost_job_close(file); + panfrost_jm_close(file); panfrost_mmu_ctx_put(panfrost_priv->mmu); kfree(panfrost_priv); @@ -668,30 +668,25 @@ static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, * job spent on the GPU. */ - static const char * const engine_names[] = { - "fragment", "vertex-tiler", "compute-only" - }; - - BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS); - for (i = 0; i < NUM_JOB_SLOTS - 1; i++) { if (pfdev->profile_mode) { drm_printf(p, "drm-engine-%s:\t%llu ns\n", - engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]); + panfrost_engine_names[i], + panfrost_priv->engine_usage.elapsed_ns[i]); drm_printf(p, "drm-cycles-%s:\t%llu\n", - engine_names[i], panfrost_priv->engine_usage.cycles[i]); + panfrost_engine_names[i], + panfrost_priv->engine_usage.cycles[i]); } drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n", - engine_names[i], pfdev->pfdevfreq.fast_rate); + panfrost_engine_names[i], pfdev->pfdevfreq.fast_rate); drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n", - engine_names[i], pfdev->pfdevfreq.current_frequency); + panfrost_engine_names[i], pfdev->pfdevfreq.current_frequency); } } static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file) { - struct drm_device *dev = file->minor->dev; - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(file->minor->dev); panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p); @@ -708,8 +703,7 @@ static const struct file_operations panfrost_drm_driver_fops = { static int panthor_gems_show(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(node->minor->dev); panfrost_gem_debugfs_print_bos(pfdev, m); @@ -758,7 +752,8 @@ static int show_file_jm_ctxs(struct panfrost_file_priv *pfile, } static struct drm_info_list panthor_debugfs_list[] = { - {"gems", panthor_gems_show, 0, NULL}, + {"gems", + panthor_gems_show, 0, NULL}, }; static int panthor_gems_debugfs_init(struct drm_minor *minor) @@ -865,15 +860,12 @@ static const struct drm_driver panfrost_drm_driver = { static int panfrost_probe(struct platform_device *pdev) { struct panfrost_device *pfdev; - struct drm_device *ddev; int err; - pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL); - if (!pfdev) - return -ENOMEM; - - pfdev->pdev = pdev; - pfdev->dev = &pdev->dev; + pfdev = devm_drm_dev_alloc(&pdev->dev, &panfrost_drm_driver, + struct panfrost_device, base); + if (IS_ERR(pfdev)) + return PTR_ERR(pfdev); platform_set_drvdata(pdev, pfdev); @@ -883,14 +875,6 @@ static int panfrost_probe(struct platform_device *pdev) pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; - /* Allocate and initialize the DRM device. */ - ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); - if (IS_ERR(ddev)) - return PTR_ERR(ddev); - - ddev->dev_private = pfdev; - pfdev->ddev = ddev; - mutex_init(&pfdev->shrinker_lock); INIT_LIST_HEAD(&pfdev->shrinker_list); @@ -901,51 +885,47 @@ static int panfrost_probe(struct platform_device *pdev) goto err_out0; } - pm_runtime_set_active(pfdev->dev); - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_enable(pfdev->dev); - pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ - pm_runtime_use_autosuspend(pfdev->dev); + pm_runtime_set_active(pfdev->base.dev); + pm_runtime_mark_last_busy(pfdev->base.dev); + pm_runtime_enable(pfdev->base.dev); + pm_runtime_set_autosuspend_delay(pfdev->base.dev, 50); /* ~3 frames */ + pm_runtime_use_autosuspend(pfdev->base.dev); /* * Register the DRM device with the core and the connectors with * sysfs */ - err = drm_dev_register(ddev, 0); + err = drm_dev_register(&pfdev->base, 0); if (err < 0) goto err_out1; - err = panfrost_gem_shrinker_init(ddev); + err = panfrost_gem_shrinker_init(&pfdev->base); if (err) goto err_out2; return 0; err_out2: - drm_dev_unregister(ddev); + drm_dev_unregister(&pfdev->base); err_out1: - pm_runtime_disable(pfdev->dev); + pm_runtime_disable(pfdev->base.dev); panfrost_device_fini(pfdev); - pm_runtime_set_suspended(pfdev->dev); + pm_runtime_set_suspended(pfdev->base.dev); err_out0: - drm_dev_put(ddev); return err; } static void panfrost_remove(struct platform_device *pdev) { struct panfrost_device *pfdev = platform_get_drvdata(pdev); - struct drm_device *ddev = pfdev->ddev; - drm_dev_unregister(ddev); - panfrost_gem_shrinker_cleanup(ddev); + drm_dev_unregister(&pfdev->base); + panfrost_gem_shrinker_cleanup(&pfdev->base); - pm_runtime_get_sync(pfdev->dev); - pm_runtime_disable(pfdev->dev); + pm_runtime_get_sync(pfdev->base.dev); + pm_runtime_disable(pfdev->base.dev); panfrost_device_fini(pfdev); - pm_runtime_set_suspended(pfdev->dev); - - drm_dev_put(ddev); + pm_runtime_set_suspended(pfdev->base.dev); } static ssize_t profiling_show(struct device *dev, diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c index 4042afe2fbf4..3ed6c902d0a1 100644 --- a/drivers/gpu/drm/panfrost/panfrost_dump.c +++ b/drivers/gpu/drm/panfrost/panfrost_dump.c @@ -163,7 +163,7 @@ void panfrost_core_dump(struct panfrost_job *job) iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!iter.start) { - dev_warn(pfdev->dev, "failed to allocate devcoredump file\n"); + dev_warn(pfdev->base.dev, "failed to allocate devcoredump file\n"); return; } @@ -204,14 +204,14 @@ void panfrost_core_dump(struct panfrost_job *job) mapping = job->mappings[i]; if (!bo->base.sgt) { - dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n"); + dev_err(pfdev->base.dev, "Panfrost Dump: BO has no sgt, cannot dump\n"); iter.hdr->bomap.valid = 0; goto dump_header; } ret = drm_gem_vmap(&bo->base.base, &map); if (ret) { - dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n"); + dev_err(pfdev->base.dev, "Panfrost Dump: couldn't map Buffer Object\n"); iter.hdr->bomap.valid = 0; goto dump_header; } @@ -237,5 +237,5 @@ dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data + } panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data); - dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL); + dev_coredumpv(pfdev->base.dev, iter.start, iter.data - iter.start, GFP_KERNEL); } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 85d6289a6eda..0528de674a4f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -26,7 +26,7 @@ static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev, static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) { - struct panfrost_device *pfdev = bo->base.base.dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(bo->base.base.dev); if (list_empty(&bo->debugfs.node)) return; @@ -48,7 +48,7 @@ static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {} static void panfrost_gem_free_object(struct drm_gem_object *obj) { struct panfrost_gem_object *bo = to_panfrost_bo(obj); - struct panfrost_device *pfdev = obj->dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(obj->dev); /* * Make sure the BO is no longer inserted in the shrinker list before @@ -76,7 +76,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) for (i = 0; i < n_sgt; i++) { if (bo->sgts[i].sgl) { - dma_unmap_sgtable(pfdev->dev, &bo->sgts[i], + dma_unmap_sgtable(pfdev->base.dev, &bo->sgts[i], DMA_BIDIRECTIONAL, 0); sg_free_table(&bo->sgts[i]); } @@ -284,7 +284,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { */ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 02b60ea1433a..2fe967a90bcb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -97,7 +97,7 @@ panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) */ int panfrost_gem_shrinker_init(struct drm_device *dev) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); pfdev->shrinker = shrinker_alloc(0, "drm-panfrost"); if (!pfdev->shrinker) @@ -120,7 +120,7 @@ int panfrost_gem_shrinker_init(struct drm_device *dev) */ void panfrost_gem_shrinker_cleanup(struct drm_device *dev) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); if (pfdev->shrinker) shrinker_free(pfdev->shrinker); diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 174e190ba40f..8d049a07d393 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -36,12 +36,12 @@ static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data) u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32; address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO); - dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", + dev_warn(pfdev->base.dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", fault_status, panfrost_exception_name(fault_status & 0xFF), address); if (state & GPU_IRQ_MULTIPLE_FAULT) - dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n"); + dev_warn(pfdev->base.dev, "There were multiple GPU faults - some have not been reported\n"); gpu_write(pfdev, GPU_INT_MASK, 0); } @@ -72,13 +72,13 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000); if (ret) { - dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n"); + dev_err(pfdev->base.dev, "gpu soft reset timed out, attempting hard reset\n"); gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET); ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000); if (ret) { - dev_err(pfdev->dev, "gpu hard reset timed out\n"); + dev_err(pfdev->base.dev, "gpu hard reset timed out\n"); return ret; } } @@ -95,7 +95,7 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) * All in-flight jobs should have released their cycle * counter references upon reset, but let us make sure */ - if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0)) + if (drm_WARN_ON(&pfdev->base, atomic_read(&pfdev->cycle_counter.use_count) != 0)) atomic_set(&pfdev->cycle_counter.use_count, 0); return 0; @@ -240,9 +240,10 @@ static const struct panfrost_model gpu_models[] = { /* MediaTek MT8188 Mali-G57 MC3 */ GPU_MODEL(g57, 0x9093, GPU_REV(g57, 0, 0)), + {0}, }; -static void panfrost_gpu_init_features(struct panfrost_device *pfdev) +static int panfrost_gpu_init_features(struct panfrost_device *pfdev) { u32 gpu_id, num_js, major, minor, status, rev; const char *name = "unknown"; @@ -327,16 +328,22 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) break; } + if (!model->name) { + dev_err(pfdev->base.dev, "GPU model not found: mali-%s id rev %#x %#x\n", + name, gpu_id, rev); + return -ENODEV; + } + bitmap_from_u64(pfdev->features.hw_features, hw_feat); bitmap_from_u64(pfdev->features.hw_issues, hw_issues); - dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", + dev_info(pfdev->base.dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", name, gpu_id, major, minor, status); - dev_info(pfdev->dev, "features: %64pb, issues: %64pb", + dev_info(pfdev->base.dev, "features: %64pb, issues: %64pb", pfdev->features.hw_features, pfdev->features.hw_issues); - dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x", + dev_info(pfdev->base.dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x", pfdev->features.l2_features, pfdev->features.core_features, pfdev->features.tiler_features, @@ -345,8 +352,10 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) pfdev->features.as_present, pfdev->features.js_present); - dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx", + dev_info(pfdev->base.dev, "shader_present=0x%0llx l2_present=0x%0llx", pfdev->features.shader_present, pfdev->features.l2_present); + + return 0; } void panfrost_cycle_counter_get(struct panfrost_device *pfdev) @@ -411,7 +420,7 @@ static u64 panfrost_get_core_mask(struct panfrost_device *pfdev) */ core_mask = ~(pfdev->features.l2_present - 1) & (pfdev->features.l2_present - 2); - dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", + dev_info_once(pfdev->base.dev, "using only 1st core group (%lu cores from %lu)\n", hweight64(core_mask), hweight64(pfdev->features.shader_present)); @@ -432,7 +441,7 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) val, val == (pfdev->features.l2_present & core_mask), 10, 20000); if (ret) - dev_err(pfdev->dev, "error powering up gpu L2"); + dev_err(pfdev->base.dev, "error powering up gpu L2"); gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present & core_mask); @@ -440,13 +449,13 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) val, val == (pfdev->features.shader_present & core_mask), 10, 20000); if (ret) - dev_err(pfdev->dev, "error powering up gpu shader"); + dev_err(pfdev->base.dev, "error powering up gpu shader"); gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO, val, val == pfdev->features.tiler_present, 10, 1000); if (ret) - dev_err(pfdev->dev, "error powering up gpu tiler"); + dev_err(pfdev->base.dev, "error powering up gpu tiler"); } void panfrost_gpu_power_off(struct panfrost_device *pfdev) @@ -458,19 +467,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev) ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, val, !val, 1, 2000); if (ret) - dev_err(pfdev->dev, "shader power transition timeout"); + dev_err(pfdev->base.dev, "shader power transition timeout"); gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO, val, !val, 1, 2000); if (ret) - dev_err(pfdev->dev, "tiler power transition timeout"); + dev_err(pfdev->base.dev, "tiler power transition timeout"); gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present); ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, val, !val, 0, 2000); if (ret) - dev_err(pfdev->dev, "l2 power transition timeout"); + dev_err(pfdev->base.dev, "l2 power transition timeout"); } void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev) @@ -489,23 +498,26 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) if (err) return err; - panfrost_gpu_init_features(pfdev); + err = panfrost_gpu_init_features(pfdev); + if (err) + return err; - err = dma_set_mask_and_coherent(pfdev->dev, - DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); + err = dma_set_mask_and_coherent(pfdev->base.dev, + DMA_BIT_MASK(FIELD_GET(0xff00, + pfdev->features.mmu_features))); if (err) return err; - dma_set_max_seg_size(pfdev->dev, UINT_MAX); + dma_set_max_seg_size(pfdev->base.dev, UINT_MAX); - pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); + pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "gpu"); if (pfdev->gpu_irq < 0) return pfdev->gpu_irq; - err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, + err = devm_request_irq(pfdev->base.dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev); if (err) { - dev_err(pfdev->dev, "failed to request gpu irq"); + dev_err(pfdev->base.dev, "failed to request gpu irq"); return err; } @@ -525,9 +537,9 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev) if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) { /* Flush reduction only makes sense when the GPU is kept powered on between jobs */ - if (pm_runtime_get_if_in_use(pfdev->dev)) { + if (pm_runtime_get_if_in_use(pfdev->base.dev)) { flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID); - pm_runtime_put(pfdev->dev); + pm_runtime_put(pfdev->base.dev); return flush_id; } } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index c47d14eabbae..11894a6b9fcc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -28,6 +28,10 @@ #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) #define job_read(dev, reg) readl(dev->iomem + (reg)) +const char * const panfrost_engine_names[] = { + "fragment", "vertex-tiler", "compute-only" +}; + struct panfrost_queue_state { struct drm_gpu_scheduler sched; u64 fence_context; @@ -95,7 +99,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in if (!fence) return ERR_PTR(-ENOMEM); - fence->dev = pfdev->ddev; + fence->dev = &pfdev->base; fence->queue = js_num; fence->seqno = ++js->queue[js_num].emit_seqno; dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock, @@ -196,7 +200,7 @@ panfrost_enqueue_job(struct panfrost_device *pfdev, int slot, return 1; } -static void panfrost_job_hw_submit(struct panfrost_job *job, int js) +static int panfrost_job_hw_submit(struct panfrost_job *job, int js) { struct panfrost_device *pfdev = job->pfdev; unsigned int subslot; @@ -204,17 +208,22 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) u64 jc_head = job->jc; int ret; - panfrost_devfreq_record_busy(&pfdev->pfdevfreq); - - ret = pm_runtime_get_sync(pfdev->dev); + ret = pm_runtime_get_sync(pfdev->base.dev); if (ret < 0) - return; + goto err_hwsubmit; if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) { - return; + ret = -EINVAL; + goto err_hwsubmit; } - cfg = panfrost_mmu_as_get(pfdev, job->mmu); + ret = panfrost_mmu_as_get(pfdev, job->mmu); + if (ret < 0) + goto err_hwsubmit; + + cfg = ret; + + panfrost_devfreq_record_busy(&pfdev->pfdevfreq); job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head)); job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head)); @@ -257,11 +266,17 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) } job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); - dev_dbg(pfdev->dev, + dev_dbg(pfdev->base.dev, "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d", job, js, subslot, jc_head, cfg & 0xf); } spin_unlock(&pfdev->js->job_lock); + + return 0; + +err_hwsubmit: + pm_runtime_put_autosuspend(pfdev->base.dev); + return ret; } static int panfrost_acquire_object_fences(struct drm_gem_object **bos, @@ -384,6 +399,7 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) struct panfrost_device *pfdev = job->pfdev; int slot = panfrost_job_get_slot(job); struct dma_fence *fence = NULL; + int ret; if (job->ctx->destroyed) return ERR_PTR(-ECANCELED); @@ -405,27 +421,27 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) dma_fence_put(job->done_fence); job->done_fence = dma_fence_get(fence); - panfrost_job_hw_submit(job, slot); + ret = panfrost_job_hw_submit(job, slot); + if (ret) { + dma_fence_put(fence); + return ERR_PTR(ret); + } return fence; } -void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) +void panfrost_jm_reset_interrupts(struct panfrost_device *pfdev) { - int j; - u32 irq_mask = 0; + job_write(pfdev, JOB_INT_CLEAR, ALL_JS_INT_MASK); +} +void panfrost_jm_enable_interrupts(struct panfrost_device *pfdev) +{ clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); - - for (j = 0; j < NUM_JOB_SLOTS; j++) { - irq_mask |= MK_JS_MASK(j); - } - - job_write(pfdev, JOB_INT_CLEAR, irq_mask); - job_write(pfdev, JOB_INT_MASK, irq_mask); + job_write(pfdev, JOB_INT_MASK, ALL_JS_INT_MASK); } -void panfrost_job_suspend_irq(struct panfrost_device *pfdev) +void panfrost_jm_suspend_irq(struct panfrost_device *pfdev) { set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); @@ -442,12 +458,12 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, bool signal_fence = true; if (!panfrost_exception_is_fault(js_status)) { - dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x", + dev_dbg(pfdev->base.dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x", js, exception_name, job_read(pfdev, JS_HEAD_LO(js)), job_read(pfdev, JS_TAIL_LO(js))); } else { - dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", + dev_err(pfdev->base.dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", js, exception_name, job_read(pfdev, JS_HEAD_LO(js)), job_read(pfdev, JS_TAIL_LO(js))); @@ -479,7 +495,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, if (signal_fence) dma_fence_signal_locked(job->done_fence); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); if (panfrost_exception_needs_reset(pfdev, js_status)) { atomic_set(&pfdev->reset.pending, 1); @@ -487,8 +503,8 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, } } -static void panfrost_job_handle_done(struct panfrost_device *pfdev, - struct panfrost_job *job) +static void panfrost_jm_handle_done(struct panfrost_device *pfdev, + struct panfrost_job *job) { /* Set ->jc to 0 to avoid re-submitting an already finished job (can * happen when we receive the DONE interrupt while doing a GPU reset). @@ -498,10 +514,10 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev, panfrost_devfreq_record_idle(&pfdev->pfdevfreq); dma_fence_signal_locked(job->done_fence); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); } -static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) +static void panfrost_jm_handle_irq(struct panfrost_device *pfdev, u32 status) { struct panfrost_job *done[NUM_JOB_SLOTS][2] = {}; struct panfrost_job *failed[NUM_JOB_SLOTS] = {}; @@ -576,7 +592,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) } for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++) - panfrost_job_handle_done(pfdev, done[j][i]); + panfrost_jm_handle_done(pfdev, done[j][i]); } /* And finally we requeue jobs that were waiting in the second slot @@ -594,7 +610,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j); dma_fence_set_error(canceled->done_fence, -ECANCELED); - panfrost_job_handle_done(pfdev, canceled); + panfrost_jm_handle_done(pfdev, canceled); } else if (!atomic_read(&pfdev->reset.pending)) { /* Requeue the job we removed if no reset is pending */ job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START); @@ -602,15 +618,15 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) } } -static void panfrost_job_handle_irqs(struct panfrost_device *pfdev) +static void panfrost_jm_handle_irqs(struct panfrost_device *pfdev) { u32 status = job_read(pfdev, JOB_INT_RAWSTAT); while (status) { - pm_runtime_mark_last_busy(pfdev->dev); + pm_runtime_mark_last_busy(pfdev->base.dev); spin_lock(&pfdev->js->job_lock); - panfrost_job_handle_irq(pfdev, status); + panfrost_jm_handle_irq(pfdev, status); spin_unlock(&pfdev->js->job_lock); status = job_read(pfdev, JOB_INT_RAWSTAT); } @@ -688,10 +704,10 @@ panfrost_reset(struct panfrost_device *pfdev, 10, 10000); if (ret) - dev_err(pfdev->dev, "Soft-stop failed\n"); + dev_err(pfdev->base.dev, "Soft-stop failed\n"); /* Handle the remaining interrupts before we reset. */ - panfrost_job_handle_irqs(pfdev); + panfrost_jm_handle_irqs(pfdev); /* Remaining interrupts have been handled, but we might still have * stuck jobs. Let's make sure the PM counters stay balanced by @@ -706,7 +722,7 @@ panfrost_reset(struct panfrost_device *pfdev, if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT || pfdev->jobs[i][j]->is_profiled) panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev); - pm_runtime_put_noidle(pfdev->dev); + pm_runtime_put_noidle(pfdev->base.dev); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); } } @@ -714,12 +730,7 @@ panfrost_reset(struct panfrost_device *pfdev, spin_unlock(&pfdev->js->job_lock); /* Proceed with reset now. */ - panfrost_device_reset(pfdev); - - /* panfrost_device_reset() unmasks job interrupts, but we want to - * keep them masked a bit longer. - */ - job_write(pfdev, JOB_INT_MASK, 0); + panfrost_device_reset(pfdev, false); /* GPU has been reset, we can clear the reset pending bit. */ atomic_set(&pfdev->reset.pending, 0); @@ -741,9 +752,7 @@ panfrost_reset(struct panfrost_device *pfdev, drm_sched_start(&pfdev->js->queue[i].sched, 0); /* Re-enable job interrupts now that everything has been restarted. */ - job_write(pfdev, JOB_INT_MASK, - GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | - GENMASK(NUM_JOB_SLOTS - 1, 0)); + panfrost_jm_enable_interrupts(pfdev); dma_fence_end_signalling(cookie); } @@ -774,11 +783,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job synchronize_irq(pfdev->js->irq); if (dma_fence_is_signaled(job->done_fence)) { - dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n"); + dev_warn(pfdev->base.dev, "unexpectedly high interrupt latency\n"); return DRM_GPU_SCHED_STAT_NO_HANG; } - dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", + dev_err(pfdev->base.dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", js, job_read(pfdev, JS_CONFIG(js)), job_read(pfdev, JS_STATUS(js)), @@ -808,22 +817,20 @@ static const struct drm_sched_backend_ops panfrost_sched_ops = { .free_job = panfrost_job_free }; -static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data) +static irqreturn_t panfrost_jm_irq_handler_thread(int irq, void *data) { struct panfrost_device *pfdev = data; - panfrost_job_handle_irqs(pfdev); + panfrost_jm_handle_irqs(pfdev); /* Enable interrupts only if we're not about to get suspended */ if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended)) - job_write(pfdev, JOB_INT_MASK, - GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | - GENMASK(NUM_JOB_SLOTS - 1, 0)); + job_write(pfdev, JOB_INT_MASK, ALL_JS_INT_MASK); return IRQ_HANDLED; } -static irqreturn_t panfrost_job_irq_handler(int irq, void *data) +static irqreturn_t panfrost_jm_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; u32 status; @@ -839,19 +846,20 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) return IRQ_WAKE_THREAD; } -int panfrost_job_init(struct panfrost_device *pfdev) +int panfrost_jm_init(struct panfrost_device *pfdev) { struct drm_sched_init_args args = { .ops = &panfrost_sched_ops, .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = 2, .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), - .name = "pan_js", - .dev = pfdev->dev, + .dev = pfdev->base.dev, }; struct panfrost_job_slot *js; int ret, j; + BUILD_BUG_ON(ARRAY_SIZE(panfrost_engine_names) != NUM_JOB_SLOTS); + /* All GPUs have two entries per queue, but without jobchain * disambiguation stopping the right job in the close path is tricky, * so let's just advertise one entry in that case. @@ -859,24 +867,25 @@ int panfrost_job_init(struct panfrost_device *pfdev) if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) args.credit_limit = 1; - pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); + js = devm_kzalloc(pfdev->base.dev, sizeof(*js), GFP_KERNEL); if (!js) return -ENOMEM; + pfdev->js = js; INIT_WORK(&pfdev->reset.work, panfrost_reset_work); spin_lock_init(&js->job_lock); - js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job"); + js->irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "job"); if (js->irq < 0) return js->irq; - ret = devm_request_threaded_irq(pfdev->dev, js->irq, - panfrost_job_irq_handler, - panfrost_job_irq_handler_thread, + ret = devm_request_threaded_irq(pfdev->base.dev, js->irq, + panfrost_jm_irq_handler, + panfrost_jm_irq_handler_thread, IRQF_SHARED, KBUILD_MODNAME "-job", pfdev); if (ret) { - dev_err(pfdev->dev, "failed to request job irq"); + dev_err(pfdev->base.dev, "failed to request job irq"); return ret; } @@ -887,15 +896,17 @@ int panfrost_job_init(struct panfrost_device *pfdev) for (j = 0; j < NUM_JOB_SLOTS; j++) { js->queue[j].fence_context = dma_fence_context_alloc(1); + args.name = panfrost_engine_names[j]; ret = drm_sched_init(&js->queue[j].sched, &args); if (ret) { - dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); + dev_err(pfdev->base.dev, "Failed to create scheduler: %d.", ret); goto err_sched; } } - panfrost_job_enable_interrupts(pfdev); + panfrost_jm_reset_interrupts(pfdev); + panfrost_jm_enable_interrupts(pfdev); return 0; @@ -907,7 +918,7 @@ err_sched: return ret; } -void panfrost_job_fini(struct panfrost_device *pfdev) +void panfrost_jm_fini(struct panfrost_device *pfdev) { struct panfrost_job_slot *js = pfdev->js; int j; @@ -922,7 +933,7 @@ void panfrost_job_fini(struct panfrost_device *pfdev) destroy_workqueue(pfdev->reset.wq); } -int panfrost_job_open(struct drm_file *file) +int panfrost_jm_open(struct drm_file *file) { struct panfrost_file_priv *panfrost_priv = file->driver_priv; int ret; @@ -944,7 +955,7 @@ int panfrost_job_open(struct drm_file *file) return 0; } -void panfrost_job_close(struct drm_file *file) +void panfrost_jm_close(struct drm_file *file) { struct panfrost_file_priv *panfrost_priv = file->driver_priv; struct panfrost_jm_ctx *jm_ctx; @@ -956,7 +967,7 @@ void panfrost_job_close(struct drm_file *file) xa_destroy(&panfrost_priv->jm_ctxs); } -int panfrost_job_is_idle(struct panfrost_device *pfdev) +int panfrost_jm_is_idle(struct panfrost_device *pfdev) { struct panfrost_job_slot *js = pfdev->js; int i; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 5a30ff1503c6..c3f57e41a571 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -53,6 +53,8 @@ struct panfrost_jm_ctx { struct drm_sched_entity slot_entity[NUM_JOB_SLOTS]; }; +extern const char * const panfrost_engine_names[]; + int panfrost_jm_ctx_create(struct drm_file *file, struct drm_panfrost_jm_ctx_create *args); int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle); @@ -60,15 +62,16 @@ void panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx); struct panfrost_jm_ctx *panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx); struct panfrost_jm_ctx *panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle); -int panfrost_job_init(struct panfrost_device *pfdev); -void panfrost_job_fini(struct panfrost_device *pfdev); -int panfrost_job_open(struct drm_file *file); -void panfrost_job_close(struct drm_file *file); +int panfrost_jm_init(struct panfrost_device *pfdev); +void panfrost_jm_fini(struct panfrost_device *pfdev); +int panfrost_jm_open(struct drm_file *file); +void panfrost_jm_close(struct drm_file *file); +void panfrost_jm_reset_interrupts(struct panfrost_device *pfdev); +void panfrost_jm_enable_interrupts(struct panfrost_device *pfdev); +void panfrost_jm_suspend_irq(struct panfrost_device *pfdev); +int panfrost_jm_is_idle(struct panfrost_device *pfdev); int panfrost_job_get_slot(struct panfrost_job *job); int panfrost_job_push(struct panfrost_job *job); void panfrost_job_put(struct panfrost_job *job); -void panfrost_job_enable_interrupts(struct panfrost_device *pfdev); -void panfrost_job_suspend_irq(struct panfrost_device *pfdev); -int panfrost_job_is_idle(struct panfrost_device *pfdev); #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index f6b91c052cfb..02ccc05e23bb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -81,7 +81,7 @@ static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) if (ret) { /* The GPU hung, let's trigger a reset */ panfrost_device_schedule_reset(pfdev); - dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); + dev_err(pfdev->base.dev, "AS_ACTIVE bit stuck\n"); } return ret; @@ -222,7 +222,7 @@ static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu) struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg; struct panfrost_device *pfdev = mmu->pfdev; - if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr & + if (drm_WARN_ON(&pfdev->base, pgtbl_cfg->arm_lpae_s1_cfg.ttbr & ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK)) return -EINVAL; @@ -253,12 +253,12 @@ static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu, return mmu_cfg_init_mali_lpae(mmu); default: /* This should never happen */ - drm_WARN(pfdev->ddev, 1, "Invalid pgtable format"); + drm_WARN(&pfdev->base, 1, "Invalid pgtable format"); return -EINVAL; } } -u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) +int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) { int as; @@ -300,7 +300,10 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) if (!atomic_read(&lru_mmu->as_count)) break; } - WARN_ON(&lru_mmu->list == &pfdev->as_lru_list); + if (WARN_ON(&lru_mmu->list == &pfdev->as_lru_list)) { + as = -EBUSY; + goto out; + } list_del_init(&lru_mmu->list); as = lru_mmu->as; @@ -315,7 +318,9 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) atomic_set(&mmu->as_count, 1); list_add(&mmu->list, &pfdev->as_lru_list); - dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask); + dev_dbg(pfdev->base.dev, + "Assigned AS%d to mmu %p, alloc_mask=%lx", + as, mmu, pfdev->as_alloc_mask); panfrost_mmu_enable(pfdev, mmu); @@ -381,13 +386,30 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, if (mmu->as < 0) return; - pm_runtime_get_noresume(pfdev->dev); + pm_runtime_get_noresume(pfdev->base.dev); /* Flush the PTs only if we're already awake */ - if (pm_runtime_active(pfdev->dev)) + if (pm_runtime_active(pfdev->base.dev)) mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); +} + +static void mmu_unmap_range(struct panfrost_mmu *mmu, u64 iova, size_t len) +{ + struct io_pgtable_ops *ops = mmu->pgtbl_ops; + size_t pgsize, unmapped_len = 0; + size_t unmapped_page, pgcount; + + while (unmapped_len < len) { + pgsize = get_pgsize(iova, len - unmapped_len, &pgcount); + + unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL); + WARN_ON(unmapped_page != pgsize * pgcount); + + iova += pgsize * pgcount; + unmapped_len += pgsize * pgcount; + } } static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, @@ -396,22 +418,30 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, unsigned int count; struct scatterlist *sgl; struct io_pgtable_ops *ops = mmu->pgtbl_ops; + size_t total_mapped = 0; u64 start_iova = iova; + int ret; for_each_sgtable_dma_sg(sgt, sgl, count) { unsigned long paddr = sg_dma_address(sgl); size_t len = sg_dma_len(sgl); - dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); + dev_dbg(pfdev->base.dev, + "map: as=%d, iova=%llx, paddr=%lx, len=%zx", + mmu->as, iova, paddr, len); while (len) { size_t pgcount, mapped = 0; size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); - ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, + ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_KERNEL, &mapped); + if (ret) + goto err_unmap_pages; + /* Don't get stuck if things have gone wrong */ mapped = max(mapped, pgsize); + total_mapped += mapped; iova += mapped; paddr += mapped; len -= mapped; @@ -421,6 +451,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova); return 0; + +err_unmap_pages: + mmu_unmap_range(mmu, start_iova, total_mapped); + return ret; } int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) @@ -431,6 +465,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE; + int ret; if (WARN_ON(mapping->active)) return 0; @@ -442,11 +477,18 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); - mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, - prot, sgt); + ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, + prot, sgt); + if (ret) + goto err_put_pages; + mapping->active = true; return 0; + +err_put_pages: + drm_gem_shmem_put_pages_locked(shmem); + return ret; } void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) @@ -462,7 +504,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) if (WARN_ON(!mapping->active)) return; - dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", + dev_dbg(pfdev->base.dev, "unmap: as=%d, iova=%llx, len=%zx", mapping->mmu->as, iova, len); while (unmapped_len < len) { @@ -559,7 +601,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, bo = bomapping->obj; if (!bo->is_heap) { - dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", + dev_WARN(pfdev->base.dev, "matching BO is not heap type (GPU VA = %llx)", bomapping->mmnode.start << PAGE_SHIFT); ret = -EINVAL; goto err_bo; @@ -595,10 +637,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, refcount_set(&bo->base.pages_use_count, 1); } else { pages = bo->base.pages; - if (pages[page_offset]) { - /* Pages are already mapped, bail out. */ - goto out; - } + } + + sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; + if (sgt->sgl) { + /* Pages are already mapped, bail out. */ + goto out; } mapping = bo->base.base.filp->f_mapping; @@ -620,23 +664,24 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, } } - sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; ret = sg_alloc_table_from_pages(sgt, pages + page_offset, NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); if (ret) goto err_unlock; - ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); + ret = dma_map_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0); if (ret) goto err_map; - mmu_map_sg(pfdev, bomapping->mmu, addr, - IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt); + ret = mmu_map_sg(pfdev, bomapping->mmu, addr, + IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt); + if (ret) + goto err_mmu_map_sg; bomapping->active = true; bo->heap_rss_size += SZ_2M; - dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); + dev_dbg(pfdev->base.dev, "mapped page fault @ AS%d %llx", as, addr); out: dma_resv_unlock(obj->resv); @@ -645,6 +690,8 @@ out: return 0; +err_mmu_map_sg: + dma_unmap_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0); err_map: sg_free_table(sgt); err_unlock: @@ -662,13 +709,12 @@ static void panfrost_mmu_release_ctx(struct kref *kref) spin_lock(&pfdev->as_lock); if (mmu->as >= 0) { - pm_runtime_get_noresume(pfdev->dev); - if (pm_runtime_active(pfdev->dev)) + pm_runtime_get_noresume(pfdev->base.dev); + if (pm_runtime_active(pfdev->base.dev)) panfrost_mmu_disable(pfdev, mmu->as); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); clear_bit(mmu->as, &pfdev->as_alloc_mask); - clear_bit(mmu->as, &pfdev->as_in_use_mask); list_del(&mmu->list); } spin_unlock(&pfdev->as_lock); @@ -726,7 +772,7 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) { if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) { - dev_err_once(pfdev->dev, + dev_err_once(pfdev->base.dev, "AARCH64_4K page table not supported\n"); return ERR_PTR(-EINVAL); } @@ -755,7 +801,7 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev) .oas = pa_bits, .coherent_walk = pfdev->coherent, .tlb = &mmu_tlb_ops, - .iommu_dev = pfdev->dev, + .iommu_dev = pfdev->base.dev, }; mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu); @@ -848,7 +894,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) if (ret) { /* terminal fault, print info about the fault */ - dev_err(pfdev->dev, + dev_err(pfdev->base.dev, "Unhandled Page fault in AS%d at VA 0x%016llX\n" "Reason: %s\n" "raw fault status: 0x%X\n" @@ -896,18 +942,18 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) { int err; - pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); + pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "mmu"); if (pfdev->mmu_irq < 0) return pfdev->mmu_irq; - err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq, + err = devm_request_threaded_irq(pfdev->base.dev, pfdev->mmu_irq, panfrost_mmu_irq_handler, panfrost_mmu_irq_handler_thread, IRQF_SHARED, KBUILD_MODNAME "-mmu", pfdev); if (err) { - dev_err(pfdev->dev, "failed to request mmu irq"); + dev_err(pfdev->base.dev, "failed to request mmu irq"); return err; } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index 022a9a74a114..27c3c65ed074 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -4,6 +4,7 @@ #ifndef __PANFROST_MMU_H__ #define __PANFROST_MMU_H__ +struct panfrost_device; struct panfrost_gem_mapping; struct panfrost_file_priv; struct panfrost_mmu; @@ -16,7 +17,7 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev); void panfrost_mmu_reset(struct panfrost_device *pfdev); void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev); -u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); +int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 0dd62e8b2fa7..7020c0192e18 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -84,11 +84,11 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, else if (perfcnt->user) return -EBUSY; - ret = pm_runtime_get_sync(pfdev->dev); + ret = pm_runtime_get_sync(pfdev->base.dev); if (ret < 0) goto err_put_pm; - bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize); + bo = drm_gem_shmem_create(&pfdev->base, perfcnt->bosize); if (IS_ERR(bo)) { ret = PTR_ERR(bo); goto err_put_pm; @@ -130,9 +130,11 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, goto err_vunmap; } - perfcnt->user = user; + ret = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); + if (ret < 0) + goto err_vunmap; - as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); + as = ret; cfg = GPU_PERFCNT_CFG_AS(as) | GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL); @@ -164,6 +166,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, /* The BO ref is retained by the mapping. */ drm_gem_object_put(&bo->base); + perfcnt->user = user; + return 0; err_vunmap: @@ -175,7 +179,7 @@ err_close_bo: err_put_bo: drm_gem_object_put(&bo->base); err_put_pm: - pm_runtime_put(pfdev->dev); + pm_runtime_put(pfdev->base.dev); return ret; } @@ -203,7 +207,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); panfrost_gem_mapping_put(perfcnt->mapping); perfcnt->mapping = NULL; - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); return 0; } @@ -211,7 +215,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_panfrost_perfcnt_enable *req = data; int ret; @@ -238,7 +242,7 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_device *pfdev = to_panfrost_device(dev); struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_panfrost_perfcnt_dump *req = data; void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr; @@ -273,12 +277,12 @@ void panfrost_perfcnt_close(struct drm_file *file_priv) struct panfrost_device *pfdev = pfile->pfdev; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; - pm_runtime_get_sync(pfdev->dev); + pm_runtime_get_sync(pfdev->base.dev); mutex_lock(&perfcnt->lock); if (perfcnt->user == pfile) panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); - pm_runtime_put_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->base.dev); } int panfrost_perfcnt_init(struct panfrost_device *pfdev) @@ -316,7 +320,7 @@ int panfrost_perfcnt_init(struct panfrost_device *pfdev) COUNTERS_PER_BLOCK * BYTES_PER_COUNTER; } - perfcnt = devm_kzalloc(pfdev->dev, sizeof(*perfcnt), GFP_KERNEL); + perfcnt = devm_kzalloc(pfdev->base.dev, sizeof(*perfcnt), GFP_KERNEL); if (!perfcnt) return -ENOMEM; diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c index 3686515d368d..2df1d76d84a0 100644 --- a/drivers/gpu/drm/panthor/panthor_devfreq.c +++ b/drivers/gpu/drm/panthor/panthor_devfreq.c @@ -146,10 +146,9 @@ int panthor_devfreq_init(struct panthor_device *ptdev) ptdev->devfreq = pdevfreq; ret = devm_pm_opp_set_regulators(dev, reg_names); - if (ret) { + if (ret && ret != -ENODEV) { if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n"); - return ret; } diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 81df49880bd8..c7033d82cef5 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -172,6 +172,8 @@ int panthor_device_init(struct panthor_device *ptdev) struct page *p; int ret; + ptdev->soc_data = of_device_get_match_data(ptdev->base.dev); + init_completion(&ptdev->unplug.done); ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock); if (ret) diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index 4fc7cf2aeed5..9f0649ecfc4f 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -32,6 +32,17 @@ struct panthor_vm; struct panthor_vm_pool; /** + * struct panthor_soc_data - Panthor SoC Data + */ +struct panthor_soc_data { + /** @asn_hash_enable: True if GPU_L2_CONFIG_ASN_HASH_ENABLE must be set. */ + bool asn_hash_enable; + + /** @asn_hash: ASN_HASH values when asn_hash_enable is true. */ + u32 asn_hash[3]; +}; + +/** * enum panthor_device_pm_state - PM state */ enum panthor_device_pm_state { @@ -93,6 +104,9 @@ struct panthor_device { /** @base: Base drm_device. */ struct drm_device base; + /** @soc_data: Optional SoC data. */ + const struct panthor_soc_data *soc_data; + /** @phys_addr: Physical address of the iomem region. */ phys_addr_t phys_addr; diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index fdbe89ef7f43..fb4b293f17f0 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1682,7 +1682,13 @@ static struct attribute *panthor_attrs[] = { ATTRIBUTE_GROUPS(panthor); +static const struct panthor_soc_data soc_data_mediatek_mt8196 = { + .asn_hash_enable = true, + .asn_hash = { 0xb, 0xe, 0x0, }, +}; + static const struct of_device_id dt_match[] = { + { .compatible = "mediatek,mt8196-mali", .data = &soc_data_mediatek_mt8196, }, { .compatible = "rockchip,rk3588-mali" }, { .compatible = "arm,mali-valhall-csf" }, {} diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index db69449a5be0..9d98720ce03f 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -52,6 +52,28 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev) ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE); } +static void panthor_gpu_l2_config_set(struct panthor_device *ptdev) +{ + const struct panthor_soc_data *data = ptdev->soc_data; + u32 l2_config; + u32 i; + + if (!data || !data->asn_hash_enable) + return; + + if (GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) < 11) { + drm_err(&ptdev->base, "Custom ASN hash not supported by the device"); + return; + } + + for (i = 0; i < ARRAY_SIZE(data->asn_hash); i++) + gpu_write(ptdev, GPU_ASN_HASH(i), data->asn_hash[i]); + + l2_config = gpu_read(ptdev, GPU_L2_CONFIG); + l2_config |= GPU_L2_CONFIG_ASN_HASH_ENABLE; + gpu_write(ptdev, GPU_L2_CONFIG, l2_config); +} + static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status) { gpu_write(ptdev, GPU_INT_CLEAR, status); @@ -241,8 +263,9 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev) hweight64(ptdev->gpu_info.shader_present)); } - /* Set the desired coherency mode before the power up of L2 */ + /* Set the desired coherency mode and L2 config before the power up of L2 */ panthor_gpu_coherency_set(ptdev); + panthor_gpu_l2_config_set(ptdev); return panthor_gpu_power_on(ptdev, L2, 1, 20000); } diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h index 8bee76d01bf8..8fa69f33e911 100644 --- a/drivers/gpu/drm/panthor/panthor_regs.h +++ b/drivers/gpu/drm/panthor/panthor_regs.h @@ -64,6 +64,8 @@ #define GPU_FAULT_STATUS 0x3C #define GPU_FAULT_ADDR 0x40 +#define GPU_L2_CONFIG 0x48 +#define GPU_L2_CONFIG_ASN_HASH_ENABLE BIT(24) #define GPU_PWR_KEY 0x50 #define GPU_PWR_KEY_UNLOCK 0x2968A819 @@ -110,6 +112,8 @@ #define GPU_REVID 0x280 +#define GPU_ASN_HASH(n) (0x2C0 + ((n) * 4)) + #define GPU_COHERENCY_FEATURES 0x300 #define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index ae7e572b1b4a..b7d0e60c0de2 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -37,6 +37,8 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include "qxl_drv.h" #include "qxl_object.h" @@ -382,7 +384,25 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_device *dev = crtc->dev; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct drm_pending_vblank_event *event; + qxl_crtc_update_monitors_config(crtc, "flush"); + + spin_lock_irq(&dev->event_lock); + + event = crtc_state->event; + crtc_state->event = NULL; + + if (event) { + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + } + + spin_unlock_irq(&dev->event_lock); } static void qxl_crtc_destroy(struct drm_crtc *crtc) @@ -401,6 +421,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, @@ -455,11 +476,15 @@ static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { qxl_crtc_update_monitors_config(crtc, "enable"); + + drm_crtc_vblank_on(crtc); } static void qxl_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { + drm_crtc_vblank_off(crtc); + qxl_crtc_update_monitors_config(crtc, "disable"); } @@ -1276,6 +1301,10 @@ int qxl_modeset_init(struct qxl_device *qdev) qxl_display_read_client_monitors_config(qdev); + ret = drm_vblank_init(&qdev->ddev, qxl_num_crtc); + if (ret) + return ret; + drm_mode_config_reset(&qdev->ddev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 9e35b14e2bf0..60afaa8e56b4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1635,7 +1635,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, } if (notify_clients) - drm_client_dev_suspend(dev, false); + drm_client_dev_suspend(dev); return 0; } @@ -1739,7 +1739,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) radeon_pm_compute_clocks(rdev); if (notify_clients) - drm_client_dev_resume(dev, false); + drm_client_dev_resume(dev); return 0; } diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig index e57536fd6f4d..7f2ef7137ae5 100644 --- a/drivers/gpu/drm/renesas/rz-du/Kconfig +++ b/drivers/gpu/drm/renesas/rz-du/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config DRM_RZG2L_DU tristate "DRM Support for RZ/G2L Display Unit" - depends on ARCH_RZG2L || COMPILE_TEST + depends on ARCH_RENESAS || COMPILE_TEST depends on DRM && OF depends on VIDEO_RENESAS_VSP1 select DRM_CLIENT_SELECTION diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index d30f0983a53a..937f83cf42fc 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -335,15 +335,9 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp) return PTR_ERR(dp->grf); } - dp->grfclk = devm_clk_get(dev, "grf"); - if (PTR_ERR(dp->grfclk) == -ENOENT) { - dp->grfclk = NULL; - } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) { - return -EPROBE_DEFER; - } else if (IS_ERR(dp->grfclk)) { - DRM_DEV_ERROR(dev, "failed to get grf clock\n"); - return PTR_ERR(dp->grfclk); - } + dp->grfclk = devm_clk_get_optional(dev, "grf"); + if (IS_ERR(dp->grfclk)) + return dev_err_probe(dev, PTR_ERR(dp->grfclk), "failed to get grf clock\n"); dp->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(dp->pclk)) { diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 5523911b990d..de8405ee8241 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -163,6 +163,11 @@ #define RK3288_DSI0_LCDC_SEL BIT(6) #define RK3288_DSI1_LCDC_SEL BIT(9) +#define RK3368_GRF_SOC_CON7 0x41c +#define RK3368_DSI_FORCETXSTOPMODE (0xf << 7) +#define RK3368_DSI_FORCERXMODE BIT(6) +#define RK3368_DSI_TURNDISABLE BIT(5) + #define RK3399_GRF_SOC_CON20 0x6250 #define RK3399_DSI0_LCDC_SEL BIT(0) #define RK3399_DSI1_LCDC_SEL BIT(4) @@ -1528,6 +1533,18 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = { { /* sentinel */ } }; +static const struct rockchip_dw_dsi_chip_data rk3368_chip_data[] = { + { + .reg = 0xff960000, + .lanecfg1_grf_reg = RK3368_GRF_SOC_CON7, + .lanecfg1 = FIELD_PREP_WM16_CONST((RK3368_DSI_TURNDISABLE | + RK3368_DSI_FORCETXSTOPMODE | + RK3368_DSI_FORCERXMODE), 0), + .max_data_lanes = 4, + }, + { /* sentinel */ } +}; + static int rk3399_dphy_tx1rx1_init(struct phy *phy) { struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy); @@ -1688,6 +1705,9 @@ static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = { .compatible = "rockchip,rk3288-mipi-dsi", .data = &rk3288_chip_data, }, { + .compatible = "rockchip,rk3368-mipi-dsi", + .data = &rk3368_chip_data, + }, { .compatible = "rockchip,rk3399-mipi-dsi", .data = &rk3399_chip_data, }, { diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c index ed6e8f036f4b..931343b072ad 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c @@ -429,14 +429,15 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); + struct dw_hdmi_qp_plat_data plat_data = {}; const struct rockchip_hdmi_qp_cfg *cfg; - struct dw_hdmi_qp_plat_data plat_data; struct drm_device *drm = data; struct drm_connector *connector; struct drm_encoder *encoder; struct rockchip_hdmi_qp *hdmi; struct resource *res; struct clk_bulk_data *clks; + struct clk *ref_clk; int ret, irq, i; if (!pdev->dev.of_node) @@ -455,10 +456,8 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, return -ENODEV; if (!cfg->ctrl_ops || !cfg->ctrl_ops->io_init || - !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback) { - dev_err(dev, "Missing platform ctrl ops\n"); - return -ENODEV; - } + !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback) + return dev_err_probe(dev, -ENODEV, "Missing platform ctrl ops\n"); hdmi->ctrl_ops = cfg->ctrl_ops; hdmi->dev = &pdev->dev; @@ -471,10 +470,9 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, break; } } - if (hdmi->port_id < 0) { - dev_err(hdmi->dev, "Failed to match HDMI port ID\n"); - return hdmi->port_id; - } + if (hdmi->port_id < 0) + return dev_err_probe(hdmi->dev, hdmi->port_id, + "Failed to match HDMI port ID\n"); plat_data.phy_ops = cfg->phy_ops; plat_data.phy_data = hdmi; @@ -495,39 +493,38 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - if (IS_ERR(hdmi->regmap)) { - dev_err(hdmi->dev, "Unable to get rockchip,grf\n"); - return PTR_ERR(hdmi->regmap); - } + if (IS_ERR(hdmi->regmap)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->regmap), + "Unable to get rockchip,grf\n"); hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo-grf"); - if (IS_ERR(hdmi->vo_regmap)) { - dev_err(hdmi->dev, "Unable to get rockchip,vo-grf\n"); - return PTR_ERR(hdmi->vo_regmap); - } + if (IS_ERR(hdmi->vo_regmap)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->vo_regmap), + "Unable to get rockchip,vo-grf\n"); ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks); - if (ret < 0) { - dev_err(hdmi->dev, "Failed to get clocks: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(hdmi->dev, ret, "Failed to get clocks\n"); + + ref_clk = clk_get(hdmi->dev, "ref"); + if (IS_ERR(ref_clk)) + return dev_err_probe(hdmi->dev, PTR_ERR(ref_clk), + "Failed to get ref clock\n"); + + plat_data.ref_clk_rate = clk_get_rate(ref_clk); + clk_put(ref_clk); hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable", GPIOD_OUT_HIGH); - if (IS_ERR(hdmi->enable_gpio)) { - ret = PTR_ERR(hdmi->enable_gpio); - dev_err(hdmi->dev, "Failed to request enable GPIO: %d\n", ret); - return ret; - } + if (IS_ERR(hdmi->enable_gpio)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->enable_gpio), + "Failed to request enable GPIO\n"); hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0); - if (IS_ERR(hdmi->phy)) { - ret = PTR_ERR(hdmi->phy); - if (ret != -EPROBE_DEFER) - dev_err(hdmi->dev, "failed to get phy: %d\n", ret); - return ret; - } + if (IS_ERR(hdmi->phy)) + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->phy), + "Failed to get phy\n"); cfg->ctrl_ops->io_init(hdmi); @@ -537,6 +534,10 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, if (plat_data.main_irq < 0) return plat_data.main_irq; + plat_data.cec_irq = platform_get_irq_byname(pdev, "cec"); + if (plat_data.cec_irq < 0) + return plat_data.cec_irq; + irq = platform_get_irq_byname(pdev, "hpd"); if (irq < 0) return irq; @@ -556,17 +557,15 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master, hdmi->hdmi = dw_hdmi_qp_bind(pdev, encoder, &plat_data); if (IS_ERR(hdmi->hdmi)) { - ret = PTR_ERR(hdmi->hdmi); drm_encoder_cleanup(encoder); - return ret; + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->hdmi), + "Failed to bind dw-hdmi-qp"); } connector = drm_bridge_connector_init(drm, encoder); - if (IS_ERR(connector)) { - ret = PTR_ERR(connector); - dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret); - return ret; - } + if (IS_ERR(connector)) + return dev_err_probe(hdmi->dev, PTR_ERR(connector), + "Failed to init bridge connector\n"); return drm_connector_attach_encoder(connector, encoder); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index ba6b0528d1e5..5369b77ea434 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -826,8 +826,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -1092,7 +1091,8 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane, if (!plane->state->fb) return -EINVAL; - crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); /* Special case for asynchronous cursor updates. */ if (!crtc_state) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 7ec7bea5e38e..284c8a048034 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -1003,6 +1003,8 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, struct drm_rect *src = &pstate->src; int min_scale = FRAC_16_16(1, 8); int max_scale = FRAC_16_16(8, 1); + int src_x, src_w, src_h; + int dest_w, dest_h; int format; int ret; @@ -1013,7 +1015,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, vop2 = vp->vop2; vop2_data = vop2->data; - cstate = drm_atomic_get_existing_crtc_state(pstate->state, crtc); + cstate = drm_atomic_get_new_crtc_state(pstate->state, crtc); if (WARN_ON(!cstate)) return -EINVAL; @@ -1030,22 +1032,25 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, if (format < 0) return format; - if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 || - drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) { - drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n", - drm_rect_width(src) >> 16, drm_rect_height(src) >> 16, - drm_rect_width(dest), drm_rect_height(dest)); - pstate->visible = false; - return 0; + /* Co-ordinates have now been clipped */ + src_x = src->x1 >> 16; + src_w = drm_rect_width(src) >> 16; + src_h = drm_rect_height(src) >> 16; + dest_w = drm_rect_width(dest); + dest_h = drm_rect_height(dest); + + if (src_w < 4 || src_h < 4 || dest_w < 4 || dest_h < 4) { + drm_dbg_kms(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n", + src_w, src_h, dest_w, dest_h); + return -EINVAL; } - if (drm_rect_width(src) >> 16 > vop2_data->max_input.width || - drm_rect_height(src) >> 16 > vop2_data->max_input.height) { - drm_err(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n", - drm_rect_width(src) >> 16, - drm_rect_height(src) >> 16, - vop2_data->max_input.width, - vop2_data->max_input.height); + if (src_w > vop2_data->max_input.width || + src_h > vop2_data->max_input.height) { + drm_dbg_kms(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n", + src_w, src_h, + vop2_data->max_input.width, + vop2_data->max_input.height); return -EINVAL; } @@ -1053,8 +1058,8 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, * Src.x1 can be odd when do clip, but yuv plane start point * need align with 2 pixel. */ - if (fb->format->is_yuv && ((pstate->src.x1 >> 16) % 2)) { - drm_err(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n"); + if (fb->format->is_yuv && src_x % 2) { + drm_dbg_kms(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; } @@ -1140,7 +1145,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, struct vop2 *vop2 = win->vop2; struct drm_framebuffer *fb = pstate->fb; u32 bpp = vop2_get_bpp(fb->format); - u32 actual_w, actual_h, dsp_w, dsp_h; + u32 src_w, src_h, dsp_w, dsp_h; u32 act_info, dsp_info; u32 format; u32 afbc_format; @@ -1204,8 +1209,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, uv_mst = rk_obj->dma_addr + offset + fb->offsets[1]; } - actual_w = drm_rect_width(src) >> 16; - actual_h = drm_rect_height(src) >> 16; + src_w = drm_rect_width(src) >> 16; + src_h = drm_rect_height(src) >> 16; dsp_w = drm_rect_width(dest); if (dest->x1 + dsp_w > adjusted_mode->hdisplay) { @@ -1215,7 +1220,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, dsp_w = adjusted_mode->hdisplay - dest->x1; if (dsp_w < 4) dsp_w = 4; - actual_w = dsp_w * actual_w / drm_rect_width(dest); + src_w = dsp_w * src_w / drm_rect_width(dest); } dsp_h = drm_rect_height(dest); @@ -1227,35 +1232,35 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, dsp_h = adjusted_mode->vdisplay - dest->y1; if (dsp_h < 4) dsp_h = 4; - actual_h = dsp_h * actual_h / drm_rect_height(dest); + src_h = dsp_h * src_h / drm_rect_height(dest); } /* * This is workaround solution for IC design: - * esmart can't support scale down when actual_w % 16 == 1. + * esmart can't support scale down when src_w % 16 == 1. */ if (!(win->data->feature & WIN_FEATURE_AFBDC)) { - if (actual_w > dsp_w && (actual_w & 0xf) == 1) { + if (src_w > dsp_w && (src_w & 0xf) == 1) { drm_dbg_kms(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n", - vp->id, win->data->name, actual_w); - actual_w -= 1; + vp->id, win->data->name, src_w); + src_w -= 1; } } - if (afbc_en && actual_w % 4) { - drm_dbg_kms(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n", - vp->id, win->data->name, actual_w); - actual_w = ALIGN_DOWN(actual_w, 4); + if (afbc_en && src_w % 4) { + drm_dbg_kms(vop2->drm, "vp%d %s src_w[%d] not 4 pixel aligned\n", + vp->id, win->data->name, src_w); + src_w = ALIGN_DOWN(src_w, 4); } - act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff); + act_info = (src_h - 1) << 16 | ((src_w - 1) & 0xffff); dsp_info = (dsp_h - 1) << 16 | ((dsp_w - 1) & 0xffff); format = vop2_convert_format(fb->format->format); half_block_en = vop2_half_block_enable(pstate); drm_dbg(vop2->drm, "vp%d update %s[%dx%d->%dx%d@%dx%d] fmt[%p4cc_%s] addr[%pad]\n", - vp->id, win->data->name, actual_w, actual_h, dsp_w, dsp_h, + vp->id, win->data->name, src_w, src_h, dsp_w, dsp_h, dest->x1, dest->y1, &fb->format->format, afbc_en ? "AFBC" : "", &yrgb_mst); @@ -1284,7 +1289,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, if (fb->modifier & AFBC_FORMAT_MOD_YTR) afbc_format |= (1 << 4); - afbc_tile_num = ALIGN(actual_w, block_w) / block_w; + afbc_tile_num = ALIGN(src_w, block_w) / block_w; /* * AFBC pic_vir_width is count by pixel, this is different @@ -1362,8 +1367,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, if (rotate_90 || rotate_270) { act_info = swahw32(act_info); - actual_w = drm_rect_height(src) >> 16; - actual_h = drm_rect_width(src) >> 16; + src_w = drm_rect_height(src) >> 16; + src_h = drm_rect_width(src) >> 16; } vop2_win_write(win, VOP2_WIN_FORMAT, format); @@ -1379,7 +1384,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane, vop2_win_write(win, VOP2_WIN_UV_MST, uv_mst); } - vop2_setup_scale(vop2, win, actual_w, actual_h, dsp_w, dsp_h, fb->format->format); + vop2_setup_scale(vop2, win, src_w, src_h, dsp_w, dsp_h, fb->format->format); if (!vop2_cluster_window(win)) vop2_plane_setup_color_key(plane, 0); vop2_win_write(win, VOP2_WIN_ACT_INFO, act_info); @@ -2647,6 +2652,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) if (IS_ERR(vop2->map)) return PTR_ERR(vop2->map); + /* Set the bounds for framebuffer creation */ + drm->mode_config.min_width = 4; + drm->mode_config.min_height = 4; + drm->mode_config.max_width = vop2_data->max_input.width; + drm->mode_config.max_height = vop2_data->max_input.height; + ret = vop2_win_init(vop2); if (ret) return ret; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index d1f788763318..219f8c2fa88e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -880,6 +880,7 @@ static const struct vop_data rk3368_vop = { .win = rk3368_vop_win_data, .win_size = ARRAY_SIZE(rk3368_vop_win_data), .max_output = { 4096, 2160 }, + .lut_size = 1024, }; static const struct vop_intr rk3366_vop_intr = { diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c index a6c4a6738ded..32b91d65b768 100644 --- a/drivers/gpu/drm/sitronix/st7571-i2c.c +++ b/drivers/gpu/drm/sitronix/st7571-i2c.c @@ -263,6 +263,7 @@ static int st7571_fb_clear_screen(struct st7571_device *st7571) u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp; char pixelvalue = 0x00; + st7571_set_position(st7571, 0, 0); for (int i = 0; i < npixels; i++) regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1); diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c index f97be0040aab..94ac6ad6f306 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c @@ -206,8 +206,7 @@ static int sun8i_ui_layer_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index a09ee4097537..1f77e1d29845 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -327,8 +327,7 @@ static int sun8i_vi_layer_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 59d5c1ba145a..0f80da3544c9 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1033,7 +1033,7 @@ static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_a int min_scale, max_scale; int err; - crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index 7a0e523651f0..5f40b5343bd8 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -21,6 +21,110 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } +static void drm_test_buddy_fragmentation_performance(struct kunit *test) +{ + struct drm_buddy_block *block, *tmp; + int num_blocks, i, ret, count = 0; + LIST_HEAD(allocated_blocks); + unsigned long elapsed_ms; + LIST_HEAD(reverse_list); + LIST_HEAD(test_blocks); + LIST_HEAD(clear_list); + LIST_HEAD(dirty_list); + LIST_HEAD(free_list); + struct drm_buddy mm; + u64 mm_size = SZ_4G; + ktime_t start, end; + + /* + * Allocation under severe fragmentation + * + * Create severe fragmentation by allocating the entire 4 GiB address space + * as tiny 8 KiB blocks but forcing a 64 KiB alignment. The resulting pattern + * leaves many scattered holes. Split the allocations into two groups and + * return them with different flags to block coalescing, then repeatedly + * allocate and free 64 KiB blocks while timing the loop. This stresses how + * quickly the allocator can satisfy larger, aligned requests from a pool of + * highly fragmented space. + */ + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + "buddy_init failed\n"); + + num_blocks = mm_size / SZ_64K; + + start = ktime_get(); + /* Allocate with maximum fragmentation - 8K blocks with 64K alignment */ + for (i = 0; i < num_blocks; i++) + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, + &allocated_blocks, 0), + "buddy_alloc hit an error size=%u\n", SZ_8K); + + list_for_each_entry_safe(block, tmp, &allocated_blocks, link) { + if (count % 4 == 0 || count % 4 == 3) + list_move_tail(&block->link, &clear_list); + else + list_move_tail(&block->link, &dirty_list); + count++; + } + + /* Free with different flags to ensure no coalescing */ + drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED); + drm_buddy_free_list(&mm, &dirty_list, 0); + + for (i = 0; i < num_blocks; i++) + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K, + &test_blocks, 0), + "buddy_alloc hit an error size=%u\n", SZ_64K); + drm_buddy_free_list(&mm, &test_blocks, 0); + + end = ktime_get(); + elapsed_ms = ktime_to_ms(ktime_sub(end, start)); + + kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms); + + drm_buddy_fini(&mm); + + /* + * Reverse free order under fragmentation + * + * Construct a fragmented 4 GiB space by allocating every 8 KiB block with + * 64 KiB alignment, creating a dense scatter of small regions. Half of the + * blocks are selectively freed to form sparse gaps, while the remaining + * allocations are preserved, reordered in reverse, and released back with + * the cleared flag. This models a pathological reverse-ordered free pattern + * and measures how quickly the allocator can merge and reclaim space when + * deallocation occurs in the opposite order of allocation, exposing the + * cost difference between a linear freelist scan and an ordered tree lookup. + */ + ret = drm_buddy_init(&mm, mm_size, SZ_4K); + KUNIT_ASSERT_EQ(test, ret, 0); + + start = ktime_get(); + /* Allocate maximum fragmentation */ + for (i = 0; i < num_blocks; i++) + KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, + &allocated_blocks, 0), + "buddy_alloc hit an error size=%u\n", SZ_8K); + + list_for_each_entry_safe(block, tmp, &allocated_blocks, link) { + if (count % 2 == 0) + list_move_tail(&block->link, &free_list); + count++; + } + drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED); + + list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link) + list_move(&block->link, &reverse_list); + drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED); + + end = ktime_get(); + elapsed_ms = ktime_to_ms(ktime_sub(end, start)); + + kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms); + + drm_buddy_fini(&mm); +} + static void drm_test_buddy_alloc_range_bias(struct kunit *test) { u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem; @@ -772,6 +876,7 @@ static struct kunit_case drm_buddy_tests[] = { KUNIT_CASE(drm_test_buddy_alloc_contiguous), KUNIT_CASE(drm_test_buddy_alloc_clear), KUNIT_CASE(drm_test_buddy_alloc_range_bias), + KUNIT_CASE(drm_test_buddy_fragmentation_performance), {} }; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index b5f60b2b2d0e..5718d9d83a49 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -676,14 +676,7 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, if (!crtc_state->active) return 0; - if (state->planes[0].ptr != crtc->primary || - state->planes[0].state == NULL || - state->planes[0].state->crtc != crtc) { - dev_dbg(crtc->dev->dev, "CRTC primary plane must be present"); - return -EINVAL; - } - - return 0; + return drm_atomic_helper_check_crtc_primary_plane(crtc_state); } static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c index cf77a8ce7398..aa72ca679598 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c @@ -42,8 +42,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); /* we should have a crtc state if the plane is attached to a crtc */ if (WARN_ON(!crtc_state)) return 0; diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index d2d5e9f1269f..71e874c19610 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -22,6 +22,8 @@ #include <drm/drm_panic.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include <video/vga.h> @@ -526,6 +528,7 @@ static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct bochs_device *bochs = to_bochs_device(crtc->dev); bochs_hw_blank(bochs, false); + drm_crtc_vblank_on(crtc); } static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc, @@ -533,12 +536,14 @@ static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc, { struct bochs_device *bochs = to_bochs_device(crtc->dev); + drm_crtc_vblank_off(crtc); bochs_hw_blank(bochs, true); } static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = { .mode_set_nofb = bochs_crtc_helper_mode_set_nofb, .atomic_check = bochs_crtc_helper_atomic_check, + .atomic_flush = drm_crtc_vblank_atomic_flush, .atomic_enable = bochs_crtc_helper_atomic_enable, .atomic_disable = bochs_crtc_helper_atomic_disable, }; @@ -550,6 +555,7 @@ static const struct drm_crtc_funcs bochs_crtc_funcs = { .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static const struct drm_encoder_funcs bochs_encoder_funcs = { @@ -670,6 +676,10 @@ static int bochs_kms_init(struct bochs_device *bochs) drm_connector_attach_edid_property(connector); drm_connector_attach_encoder(connector, encoder); + ret = drm_vblank_init(dev, 1); + if (ret) + return ret; + drm_mode_config_reset(dev); return 0; diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c index 97a93adc5669..f728fa48ac88 100644 --- a/drivers/gpu/drm/tiny/cirrus-qemu.c +++ b/drivers/gpu/drm/tiny/cirrus-qemu.c @@ -45,6 +45,8 @@ #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_module.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #define DRIVER_NAME "cirrus-qemu" #define DRIVER_DESC "qemu cirrus vga" @@ -404,11 +406,15 @@ static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc, #endif drm_dev_exit(idx); + + drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs cirrus_crtc_helper_funcs = { .atomic_check = cirrus_crtc_helper_atomic_check, + .atomic_flush = drm_crtc_vblank_atomic_flush, .atomic_enable = cirrus_crtc_helper_atomic_enable, + .atomic_disable = drm_crtc_vblank_atomic_disable, }; static const struct drm_crtc_funcs cirrus_crtc_funcs = { @@ -418,6 +424,7 @@ static const struct drm_crtc_funcs cirrus_crtc_funcs = { .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static const struct drm_encoder_funcs cirrus_encoder_funcs = { @@ -493,6 +500,10 @@ static int cirrus_pipe_init(struct cirrus_device *cirrus) if (ret) return ret; + ret = drm_vblank_init(dev, 1); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index e2c82ad07eb4..d93d1bef6768 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -587,6 +587,9 @@ uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) { uint64_t usage; + if (WARN_ON_ONCE(!man->bdev)) + return 0; + spin_lock(&man->bdev->lru_lock); usage = man->usage; spin_unlock(&man->bdev->lru_lock); diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 9ff3bade9795..aa0dded595b6 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -262,8 +262,8 @@ static int vbox_primary_atomic_check(struct drm_plane *plane, struct drm_crtc_state *crtc_state = NULL; if (new_state->crtc) { - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; } @@ -344,8 +344,8 @@ static int vbox_cursor_atomic_check(struct drm_plane *plane, int ret; if (new_state->crtc) { - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; } diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 056d344c5411..b4a53f68865b 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -497,8 +497,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) u32 v_subsample = fb->format->vsub; int ret; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); if (!crtc_state) { DRM_DEBUG_KMS("Invalid crtc state\n"); return -EINVAL; @@ -875,8 +874,7 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) unsigned int vscale_factor; vc4_state = to_vc4_plane_state(state); - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode); /* The HVS is able to process 2 pixels/cycle when scaling the source, diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index fd76730fd38c..07db319c3d7f 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -79,7 +79,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile, dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1); - timer_setup(&fence->timer, vgem_fence_timeout, 0); + timer_setup(&fence->timer, vgem_fence_timeout, TIMER_IRQSAFE); /* We force the fence to expire within 10s to prevent driver hangs */ mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT); diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index c3315935d8bc..e972d9b015a9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -32,6 +32,8 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/drm_vblank.h> +#include <drm/drm_vblank_helper.h> #include "virtgpu_drv.h" @@ -55,6 +57,7 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + DRM_CRTC_VBLANK_TIMER_FUNCS, }; static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = { @@ -99,6 +102,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc) static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { + drm_crtc_vblank_on(crtc); } static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, @@ -108,6 +112,8 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + drm_crtc_vblank_off(crtc); + virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0); virtio_gpu_notify(vgdev); } @@ -121,9 +127,10 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, - crtc); + struct drm_device *dev = crtc->dev; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + struct drm_pending_vblank_event *event; /* * virtio-gpu can't do modeset and plane update operations @@ -133,6 +140,20 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, */ if (drm_atomic_crtc_needs_modeset(crtc_state)) output->needs_modeset = true; + + spin_lock_irq(&dev->event_lock); + + event = crtc_state->event; + crtc_state->event = NULL; + + if (event) { + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + } + + spin_unlock_irq(&dev->event_lock); } static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { @@ -257,6 +278,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) struct drm_encoder *encoder = &output->enc; struct drm_crtc *crtc = &output->crtc; struct drm_plane *primary, *cursor; + int ret; output->index = index; if (index == 0) { @@ -271,8 +293,10 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index); if (IS_ERR(cursor)) return PTR_ERR(cursor); - drm_crtc_init_with_planes(dev, crtc, primary, cursor, - &virtio_gpu_crtc_funcs, NULL); + ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, + &virtio_gpu_crtc_funcs, NULL); + if (ret) + return ret; drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, @@ -356,6 +380,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) for (i = 0 ; i < vgdev->num_scanouts; ++i) vgdev_output_init(vgdev, i); + ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts); + if (ret) + return ret; + drm_mode_config_reset(vgdev->ddev); return 0; } diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index bd79f24686dc..bac0790c6577 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -127,7 +127,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, return ret; drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { - plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); + plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane); WARN_ON(!plane_state); if (!plane_state->visible) @@ -143,7 +143,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, i = 0; drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { - plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); + plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane); if (!plane_state->visible) continue; diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index f5101f267c18..083c6904f8f1 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -323,7 +323,7 @@ void xe_display_pm_suspend(struct xe_device *xe) * properly. */ intel_power_domains_disable(display); - drm_client_dev_suspend(&xe->drm, false); + drm_client_dev_suspend(&xe->drm); if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&xe->drm); @@ -355,7 +355,7 @@ void xe_display_pm_shutdown(struct xe_device *xe) return; intel_power_domains_disable(display); - drm_client_dev_suspend(&xe->drm, false); + drm_client_dev_suspend(&xe->drm); if (intel_display_device_present(display)) { drm_kms_helper_poll_disable(&xe->drm); @@ -480,7 +480,7 @@ void xe_display_pm_resume(struct xe_device *xe) intel_opregion_resume(display); - drm_client_dev_resume(&xe->drm, false); + drm_client_dev_resume(&xe->drm); intel_power_domains_enable(display); } |