diff options
| author | Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> | 2025-11-21 11:12:29 +0100 |
|---|---|---|
| committer | Christian König <christian.koenig@amd.com> | 2025-11-26 13:12:23 +0100 |
| commit | ddf055b80a544d6f36f77be5f0c6d3c80177d57c (patch) | |
| tree | 6b5ed4e62e9e1871dce456e01d1856e13161c00c /drivers/gpu/drm/ttm/ttm_resource.c | |
| parent | 6f53bcb4fe66967fd4a82547062e2ae98058eea6 (diff) | |
drm/ttm: rework pipelined eviction fence handling
Until now ttm stored a single pipelined eviction fence which means
drivers had to use a single entity for these evictions.
To lift this requirement, this commit allows up to 8 entities to
be used.
Ideally a dma_resv object would have been used as a container of
the eviction fences, but the locking rules makes it complex.
dma_resv all have the same ww_class, which means "Attempting to
lock more mutexes after ww_acquire_done." is an error.
One alternative considered was to introduced a 2nd ww_class for
specific resv to hold a single "transient" lock (= the resv lock
would only be held for a short period, without taking any other
locks).
The other option, is to statically reserve a fence array, and
extend the existing code to deal with N fences, instead of 1.
The driver is still responsible to reserve the correct number
of fence slots.
Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Link: https://lore.kernel.org/r/20251121101315.3585-20-pierre-eric.pelloux-prayer@amd.com
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_resource.c')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_resource.c | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 1a39c30f22fb..f5aa29dc6ec0 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -524,14 +524,15 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, { unsigned i; - spin_lock_init(&man->move_lock); man->bdev = bdev; man->size = size; man->usage = 0; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) INIT_LIST_HEAD(&man->lru[i]); - man->move = NULL; + spin_lock_init(&man->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) + man->eviction_fences[i] = NULL; } EXPORT_SYMBOL(ttm_resource_manager_init); @@ -552,7 +553,7 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, .no_wait_gpu = false, }; struct dma_fence *fence; - int ret; + int ret, i; do { ret = ttm_bo_evict_first(bdev, man, &ctx); @@ -562,18 +563,24 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, if (ret && ret != -ENOENT) return ret; - spin_lock(&man->move_lock); - fence = dma_fence_get(man->move); - spin_unlock(&man->move_lock); - - if (fence) { - ret = dma_fence_wait(fence, false); - dma_fence_put(fence); - if (ret) - return ret; + ret = 0; + + spin_lock(&man->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + fence = man->eviction_fences[i]; + if (fence && !dma_fence_is_signaled(fence)) { + dma_fence_get(fence); + spin_unlock(&man->eviction_lock); + ret = dma_fence_wait(fence, false); + dma_fence_put(fence); + if (ret) + return ret; + spin_lock(&man->eviction_lock); + } } + spin_unlock(&man->eviction_lock); - return 0; + return ret; } EXPORT_SYMBOL(ttm_resource_manager_evict_all); |