summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2025-11-11 12:32:07 +0200
committerJani Nikula <jani.nikula@intel.com>2025-11-11 12:32:07 +0200
commit1c1960f57151e36f550d1a53352f832d5aaa5f8f (patch)
tree102cdd74d069bfcc1c83f99db00ae59f82515c5c /drivers/gpu/drm/scheduler/sched_main.c
parente109f644b871df8440c886a69cdce971ed533088 (diff)
parente237dfe70867f02de223e36340fe5f8b0fe0eada (diff)
Merge drm/drm-next into drm-intel-next
Primarily sync with the drm_print.h changes from drm-misc. Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 46119aacb809..2463ced2427a 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -965,13 +965,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
dma_resv_assert_held(resv);
dma_resv_for_each_fence(&cursor, resv, usage, fence) {
- /* Make sure to grab an additional ref on the added fence */
- dma_fence_get(fence);
- ret = drm_sched_job_add_dependency(job, fence);
- if (ret) {
- dma_fence_put(fence);
+ /*
+ * As drm_sched_job_add_dependency always consumes the fence
+ * reference (even when it fails), and dma_resv_for_each_fence
+ * is not obtaining one, we need to grab one before calling.
+ */
+ ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
+ if (ret)
return ret;
- }
}
return 0;
}
@@ -1236,8 +1237,13 @@ static void drm_sched_run_job_work(struct work_struct *w)
/* Find entity with a ready job */
entity = drm_sched_select_entity(sched);
- if (!entity)
- return; /* No more work */
+ if (!entity) {
+ /*
+ * Either no more work to do, or the next ready job needs more
+ * credits than the scheduler has currently available.
+ */
+ return;
+ }
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
@@ -1419,7 +1425,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
- list_for_each_entry(s_entity, &rq->entities, list)
+ list_for_each_entry(s_entity, &rq->entities, list) {
/*
* Prevents reinsertion and marks job_queue as idle,
* it will be removed from the rq in drm_sched_entity_fini()
@@ -1440,8 +1446,15 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
* For now, this remains a potential race in all
* drivers that keep entities alive for longer than
* the scheduler.
+ *
+ * The READ_ONCE() is there to make the lockless read
+ * (warning about the lockless write below) slightly
+ * less broken...
*/
+ if (!READ_ONCE(s_entity->stopped))
+ dev_warn(sched->dev, "Tearing down scheduler with active entities!\n");
s_entity->stopped = true;
+ }
spin_unlock(&rq->lock);
kfree(sched->sched_rq[i]);
}