summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
diff options
context:
space:
mode:
authorSimona Vetter <simona.vetter@ffwll.ch>2025-10-31 22:08:23 +0100
committerSimona Vetter <simona.vetter@ffwll.ch>2025-10-31 22:08:24 +0100
commitf67d54e96bc9e4e20a927868f02c2e9d1aa09751 (patch)
tree97f69a36292bbca54427d258bba1a7b491a12f8e /drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
parentdc1af502d50b34f12464e09217ba3628e0563591 (diff)
parent1bc9d39275e08853ff15410b4d530b46b546affb (diff)
Merge tag 'amd-drm-next-6.19-2025-10-29' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.19-2025-10-29: amdgpu: - VPE idle handler fix - Re-enable DM idle optimizations - DCN3.0 fix - SMU fix - Powerplay fixes for fiji/iceland - License copy-pasta fixes - HDP eDP panel fix - Vblank fix - RAS fixes - SR-IOV updates - SMU 13 VCN reset fix - DMUB fixes - DC frame limit fix - Additional DC underflow logging - DCN 3.1.5 fixes - DC Analog encoders support - Enable DC on bonaire by default - UserQ fixes - Remove redundant pm_runtime_mark_last_busy() calls amdkfd: - Process cleanup fix - Misc fixes radeon: - devm migration fixes - Remove redundant pm_runtime_mark_last_busy() calls UAPI - Add ABM KMS property Proposed kwin changes: https://invent.kde.org/plasma/kwin/-/merge_requests/6028 Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patch.msgid.link/20251029205713.9480-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/mes_userqueue.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c41
1 files changed, 24 insertions, 17 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index 5c63480dda9c..b1ee9473d628 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -205,10 +205,10 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
struct mes_detect_and_reset_queue_input input;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
unsigned int hung_db_num = 0;
- int queue_id, r, i;
+ unsigned long queue_id;
u32 db_array[8];
+ int r, i;
if (db_array_size > 8) {
dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
@@ -227,16 +227,14 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
if (r) {
dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
} else if (hung_db_num) {
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (queue->queue_type == queue_type) {
- for (i = 0; i < hung_db_num; i++) {
- if (queue->doorbell_index == db_array[i]) {
- queue->state = AMDGPU_USERQ_STATE_HUNG;
- atomic_inc(&adev->gpu_reset_counter);
- amdgpu_userq_fence_driver_force_completion(queue);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
- }
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ if (queue->queue_type == queue_type) {
+ for (i = 0; i < hung_db_num; i++) {
+ if (queue->doorbell_index == db_array[i]) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ atomic_inc(&adev->gpu_reset_counter);
+ amdgpu_userq_fence_driver_force_completion(queue);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
}
}
}
@@ -254,7 +252,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
struct drm_amdgpu_userq_in *mqd_user = args_in;
struct amdgpu_mqd_prop *userq_props;
- struct amdgpu_gfx_shadow_info shadow_info;
int r;
/* Structure to initialize MQD for userqueue using generic MQD init function */
@@ -280,8 +277,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->doorbell_index = queue->doorbell_index;
userq_props->fence_address = queue->fence_drv->gpu_addr;
- if (adev->gfx.funcs->get_gfx_shadow_info)
- adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
@@ -299,7 +294,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
}
r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
- max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE));
+ 2048);
if (r)
goto free_mqd;
@@ -312,6 +307,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
kfree(compute_mqd);
} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
+ } else {
+ r = -EINVAL;
+ goto free_mqd;
+ }
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid GFX MQD\n");
@@ -335,6 +338,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
shadow_info.shadow_size);
if (r)
goto free_mqd;
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
+ shadow_info.csa_size);
+ if (r)
+ goto free_mqd;
kfree(mqd_gfx_v11);
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
@@ -353,7 +360,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
- shadow_info.csa_size);
+ 32);
if (r)
goto free_mqd;