summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2025-10-06 12:45:25 +0200
committerAlex Deucher <alexander.deucher@amd.com>2025-10-07 14:09:19 -0400
commita107aeb6a2150dd552673caefc771e2222d584de (patch)
treea9f04c643fff3f14c309ca774aad0149da1d6c00 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parentddbfac152830e38d488ff8e45ab7eaf5d72f8527 (diff)
drm/amdgpu: partially revert "revert to old status lock handling v3"
The CI systems are pointing out list corruptions, so we still need to fix something here. Keep the asserts, but revert the lock changes for now. Fixes: 59e4405e9ee2 ("drm/amdgpu: revert to old status lock handling v3") Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c146
1 files changed, 91 insertions, 55 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 2823208d8624..c1a801203949 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -153,10 +153,12 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
vm_bo->moved = true;
amdgpu_vm_assert_locked(vm);
+ spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@@ -169,7 +171,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -183,7 +187,9 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
+ spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@@ -197,9 +203,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->invalidated_lock);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
- spin_unlock(&vm_bo->vm->invalidated_lock);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -212,9 +218,10 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
{
- amdgpu_vm_assert_locked(vm_bo->vm);
vm_bo->moved = true;
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -228,10 +235,13 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- if (vm_bo->bo->parent)
+ if (vm_bo->bo->parent) {
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
- else
+ spin_unlock(&vm_bo->vm->status_lock);
+ } else {
amdgpu_vm_bo_idle(vm_bo);
+ }
}
/**
@@ -245,7 +255,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -259,13 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
- spin_lock(&vm->invalidated_lock);
+ amdgpu_vm_assert_locked(vm);
+
+ spin_lock(&vm->status_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
- spin_unlock(&vm->invalidated_lock);
- amdgpu_vm_assert_locked(vm);
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -275,13 +287,14 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
else if (bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}
+ spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_update_shared - helper to update shared memory stat
* @base: base structure for tracking BO usage in a VM
*
- * Takes the vm stats_lock and updates the shared memory stat. If the basic
+ * Takes the vm status_lock and updates the shared memory stat. If the basic
* stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
* as well.
*/
@@ -294,7 +307,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
bool shared;
dma_resv_assert_held(bo->tbo.base.resv);
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
base->shared = shared;
@@ -306,7 +319,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
vm->stats[bo_memtype].drm.private += size;
}
}
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
}
/**
@@ -331,11 +344,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
* be bo->tbo.resource
* @sign: if we should add (+1) or subtract (-1) from the stat
*
- * Caller need to have the vm stats_lock held. Useful for when multiple update
+ * Caller need to have the vm status_lock held. Useful for when multiple update
* need to happen at the same time.
*/
static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
- struct ttm_resource *res, int sign)
+ struct ttm_resource *res, int sign)
{
struct amdgpu_vm *vm = base->vm;
struct amdgpu_bo *bo = base->bo;
@@ -359,8 +372,7 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
*/
if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
vm->stats[res_memtype].drm.purgeable += size;
- if (!(bo->preferred_domains &
- amdgpu_mem_type_to_domain(res_memtype)))
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
vm->stats[bo_memtype].evicted += size;
}
}
@@ -379,9 +391,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
{
struct amdgpu_vm *vm = base->vm;
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
amdgpu_vm_update_stats_locked(base, res, sign);
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
}
/**
@@ -407,10 +419,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -469,10 +481,10 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
int ret;
/* We can only trust prev->next while holding the lock */
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
while (!list_is_head(prev->next, &vm->done)) {
bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
bo = bo_va->base.bo;
if (bo) {
@@ -480,10 +492,10 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
if (unlikely(ret))
return ret;
}
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
prev = prev->next;
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -579,7 +591,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
- struct amdgpu_vm_bo_base *bo_base, *tmp;
+ struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_bo *bo;
int r;
@@ -592,7 +604,13 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
- list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
bo = bo_base->bo;
r = validate(param, bo);
@@ -605,21 +623,26 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
+ spin_lock(&vm->status_lock);
}
+ while (ticket && !list_empty(&vm->evicted_user)) {
+ bo_base = list_first_entry(&vm->evicted_user,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
- if (ticket) {
- list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user,
- vm_status) {
- bo = bo_base->bo;
- dma_resv_assert_held(bo->tbo.base.resv);
+ bo = bo_base->bo;
+ dma_resv_assert_held(bo->tbo.base.resv);
- r = validate(param, bo);
- if (r)
- return r;
+ r = validate(param, bo);
+ if (r)
+ return r;
- amdgpu_vm_bo_invalidated(bo_base);
- }
+ amdgpu_vm_bo_invalidated(bo_base);
+
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@@ -648,7 +671,9 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
+ spin_lock(&vm->status_lock);
ret &= list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
spin_lock(&vm->immediate.lock);
ret &= !vm->immediate.stopped;
@@ -939,13 +964,18 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
- struct amdgpu_vm_bo_base *entry, *tmp;
+ struct amdgpu_vm_bo_base *entry;
bool flush_tlb_needed = false;
+ LIST_HEAD(relocated);
int r, idx;
amdgpu_vm_assert_locked(vm);
- if (list_empty(&vm->relocated))
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->relocated, &relocated);
+ spin_unlock(&vm->status_lock);
+
+ if (list_empty(&relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -961,7 +991,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
- list_for_each_entry(entry, &vm->relocated, vm_status) {
+ list_for_each_entry(entry, &relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@@ -977,7 +1007,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
- list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) {
+ while (!list_empty(&relocated)) {
+ entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
+ vm_status);
amdgpu_vm_bo_idle(entry);
}
@@ -1204,9 +1236,9 @@ error_free:
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
}
/**
@@ -1573,24 +1605,29 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket)
{
- struct amdgpu_bo_va *bo_va, *tmp;
+ struct amdgpu_bo_va *bo_va;
struct dma_resv *resv;
bool clear, unlock;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
+ spin_lock(&vm->status_lock);
}
- spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!adev->debug_vm && dma_resv_trylock(resv)) {
@@ -1622,9 +1659,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -2153,9 +2190,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2263,10 +2300,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- spin_lock(&vm->stats_lock);
+ spin_lock(&vm->status_lock);
amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
- spin_unlock(&vm->stats_lock);
+ spin_unlock(&vm->status_lock);
}
amdgpu_vm_bo_invalidate(bo, evicted);
@@ -2534,12 +2571,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
- spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->invalidated);
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
INIT_KFIFO(vm->faults);
- spin_lock_init(&vm->stats_lock);
r = amdgpu_vm_init_entities(adev, vm);
if (r)
@@ -3015,6 +3051,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
amdgpu_vm_assert_locked(vm);
+ spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@@ -3052,13 +3089,11 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
- spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
- spin_unlock(&vm->invalidated_lock);
total_invalidated_objs = id;
id = 0;
@@ -3068,6 +3103,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
+ spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,