diff options
| author | Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> | 2025-11-21 11:12:29 +0100 |
|---|---|---|
| committer | Christian König <christian.koenig@amd.com> | 2025-11-26 13:12:23 +0100 |
| commit | ddf055b80a544d6f36f77be5f0c6d3c80177d57c (patch) | |
| tree | 6b5ed4e62e9e1871dce456e01d1856e13161c00c /drivers/gpu/drm/ttm/ttm_bo_util.c | |
| parent | 6f53bcb4fe66967fd4a82547062e2ae98058eea6 (diff) | |
drm/ttm: rework pipelined eviction fence handling
Until now ttm stored a single pipelined eviction fence which means
drivers had to use a single entity for these evictions.
To lift this requirement, this commit allows up to 8 entities to
be used.
Ideally a dma_resv object would have been used as a container of
the eviction fences, but the locking rules makes it complex.
dma_resv all have the same ww_class, which means "Attempting to
lock more mutexes after ww_acquire_done." is an error.
One alternative considered was to introduced a 2nd ww_class for
specific resv to hold a single "transient" lock (= the resv lock
would only be held for a short period, without taking any other
locks).
The other option, is to statically reserve a fence array, and
extend the existing code to deal with N fences, instead of 1.
The driver is still responsible to reserve the correct number
of fence slots.
Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Link: https://lore.kernel.org/r/20251121101315.3585-20-pierre-eric.pelloux-prayer@amd.com
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 38 |
1 files changed, 31 insertions, 7 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index acbbca9d5c92..2ff35d55e462 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -258,7 +258,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); - ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); + ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES); if (ret) { dma_resv_unlock(&fbo->base.base._resv); kfree(fbo); @@ -646,20 +646,44 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, { struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *from; + struct dma_fence *tmp; + int i; from = ttm_manager_type(bdev, bo->resource->mem_type); /** * BO doesn't have a TTM we need to bind/unbind. Just remember - * this eviction and free up the allocation + * this eviction and free up the allocation. + * The fence will be saved in the first free slot or in the slot + * already used to store a fence from the same context. Since + * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for + * evictions we should always find a slot to use. */ - spin_lock(&from->move_lock); - if (!from->move || dma_fence_is_later(fence, from->move)) { - dma_fence_put(from->move); - from->move = dma_fence_get(fence); + spin_lock(&from->eviction_lock); + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { + tmp = from->eviction_fences[i]; + if (!tmp) + break; + if (fence->context != tmp->context) + continue; + if (dma_fence_is_later(fence, tmp)) { + dma_fence_put(tmp); + break; + } + goto unlock; + } + if (i < TTM_NUM_MOVE_FENCES) { + from->eviction_fences[i] = dma_fence_get(fence); + } else { + WARN(1, "not enough fence slots for all fence contexts"); + spin_unlock(&from->eviction_lock); + dma_fence_wait(fence, false); + goto end; } - spin_unlock(&from->move_lock); +unlock: + spin_unlock(&from->eviction_lock); +end: ttm_resource_free(bo, &bo->resource); } |