diff options
| author | Dave Airlie <airlied@redhat.com> | 2025-11-18 06:58:01 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2025-11-18 07:01:26 +1000 |
| commit | f3a1d69f9b388271986f4efe1fd775df15b443c1 (patch) | |
| tree | b82a68c63a88b24b30ede3b4263fb3d2f2b6344d /drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | |
| parent | fd1a11ea111b083aa3d19f36516ecda5efa2b69f (diff) | |
| parent | ccd3b4c7c37fbbd3e5244d3c54ca24ae0a37810d (diff) | |
Merge tag 'amd-drm-next-6.19-2025-11-14' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.19-2025-11-14:
amdgpu:
- RAS updates
- GC12 DCC P2P fix
- Documentation fixes
- Power limit code cleanup
- Userq updates
- VRR fix
- SMART OLED support
- DSC refactor for DCN 3.5
- Replay updates
- DC clockgating updates
- HDCP refactor
- ISP fix
- SMU 13.0.12 updates
- JPEG 5.0.1 fix
- VCE1 support
- Enable DC by default on SI
- Refactor CIK and SI enablement
- Enable amdgpu by default for CI dGPUs
- XGMI fixes
- SR-IOV fixes
- Memory allocation critical path fixes
- Enable amdgpu by default on SI dGPUs
amdkfd:
- Relax checks on save area overallocations
- Fix GPU mappings after prefetch
radeon:
- Refactor CIK and SI enablement
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patch.msgid.link/20251114192553.442621-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 58 |
1 files changed, 17 insertions, 41 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3ef5bc95642c..9cab36322c16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->vm_hub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - struct dma_fence **fences; - unsigned i; + /* If anybody is waiting for a VMID let everybody wait for fairness */ if (!dma_fence_is_signaled(ring->vmid_wait)) { *fence = dma_fence_get(ring->vmid_wait); return 0; } - fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT); - if (!fences) - return -ENOMEM; - /* Check if we have an idle VMID */ - i = 0; - list_for_each_entry((*idle), &id_mgr->ids_lru, list) { + list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) { /* Don't use per engine and per process VMID at the same time */ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? NULL : ring; - fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); - if (!fences[i]) - break; - ++i; + *fence = amdgpu_sync_peek_fence(&(*idle)->active, r); + if (!(*fence)) + return 0; } - /* If we can't find a idle VMID to use, wait till one becomes available */ - if (&(*idle)->list == &id_mgr->ids_lru) { - u64 fence_context = adev->vm_manager.fence_context + ring->idx; - unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; - struct dma_fence_array *array; - unsigned j; - - *idle = NULL; - for (j = 0; j < i; ++j) - dma_fence_get(fences[j]); - - array = dma_fence_array_create(i, fences, fence_context, - seqno, true); - if (!array) { - for (j = 0; j < i; ++j) - dma_fence_put(fences[j]); - kfree(fences); - return -ENOMEM; - } - - *fence = dma_fence_get(&array->base); - dma_fence_put(ring->vmid_wait); - ring->vmid_wait = &array->base; - return 0; - } - kfree(fences); + /* + * If we can't find a idle VMID to use, wait on a fence from the least + * recently used in the hope that it will be available soon. + */ + *idle = NULL; + dma_fence_put(ring->vmid_wait); + ring->vmid_wait = dma_fence_get(*fence); + /* This is the reference we return */ + dma_fence_get(*fence); return 0; } @@ -313,7 +289,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * user of the VMID. */ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) return r; @@ -373,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, */ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) return r; @@ -426,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Remember this submission as user of the VMID */ r = amdgpu_sync_fence(&id->active, &job->base.s_fence->finished, - GFP_NOWAIT); + GFP_ATOMIC); if (r) goto error; |