summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2025-12-05 10:21:11 +1000
committerDave Airlie <airlied@redhat.com>2025-12-05 10:21:19 +1000
commit86fafc584c1977e2bfa05a88bfb8eae78f984f5b (patch)
tree49d73a6ebbc611fdf44bf244ff374db7b857ddc5 /drivers/gpu
parente73c2262041abd630699224159646aa31e5b7697 (diff)
parent3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9 (diff)
Merge tag 'drm-xe-next-fixes-2025-12-04' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next
Driver Changes: - Fix a memory leak (Mika) - Fix a 64-bit division (Michal Wajdeczko) - vf migration fix (Matt Brost) - LRC pause Fix (Tomasz lis) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/aTIGiHJnnMtqbDOO@fedora
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c47
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.c1
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h4
5 files changed, 36 insertions, 23 deletions
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 9955397aaaa9..c7a77a3a9681 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -54,13 +54,14 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
struct drm_sched_job *s_job;
+ bool restore_replay = false;
list_for_each_entry(s_job, &sched->base.pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *hw_fence = s_fence->parent;
- if (to_xe_sched_job(s_job)->skip_emit ||
- (hw_fence && !dma_fence_is_signaled(hw_fence)))
+ restore_replay |= to_xe_sched_job(s_job)->restore_replay;
+ if (restore_replay || (hw_fence && !dma_fence_is_signaled(hw_fence)))
sched->base.ops->run_job(s_job);
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 62f6cc45a764..59c5c6b4d994 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -711,7 +711,7 @@ static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
if (num_vfs > 56)
return SZ_64M - SZ_8M;
- return rounddown_pow_of_two(shareable / num_vfs);
+ return rounddown_pow_of_two(div_u64(shareable, num_vfs));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index d4ffdb71ef3d..ed7be50b2f72 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -822,7 +822,7 @@ static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
- if (!job->skip_emit || job->last_replay) {
+ if (!job->restore_replay || job->last_replay) {
if (xe_exec_queue_is_parallel(q))
wq_item_append(q);
else
@@ -881,10 +881,10 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
register_exec_queue(q, GUC_CONTEXT_NORMAL);
- if (!job->skip_emit)
+ if (!job->restore_replay)
q->ring_ops->emit_job(job);
submit_exec_queue(q, job);
- job->skip_emit = false;
+ job->restore_replay = false;
}
/*
@@ -2112,6 +2112,18 @@ static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
q->guc->resume_time = 0;
}
+static void lrc_parallel_clear(struct xe_lrc *lrc)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ struct iosys_map map = xe_lrc_parallel_map(lrc);
+ int i;
+
+ for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
+ parallel_write(xe, map, wq[i],
+ FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, 0));
+}
+
/*
* This function is quite complex but only real way to ensure no state is lost
* during VF resume flows. The function scans the queue state, make adjustments
@@ -2135,8 +2147,8 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
guc_exec_queue_revert_pending_state_change(guc, q);
if (xe_exec_queue_is_parallel(q)) {
- struct xe_device *xe = guc_to_xe(guc);
- struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
+ /* Pairs with WRITE_ONCE in __xe_exec_queue_init */
+ struct xe_lrc *lrc = READ_ONCE(q->lrc[0]);
/*
* NOP existing WQ commands that may contain stale GGTT
@@ -2144,14 +2156,14 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
* seems to get confused if the WQ head/tail pointers are
* adjusted.
*/
- for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
- parallel_write(xe, map, wq[i],
- FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
- FIELD_PREP(WQ_LEN_MASK, 0));
+ if (lrc)
+ lrc_parallel_clear(lrc);
}
job = xe_sched_first_pending_job(sched);
if (job) {
+ job->restore_replay = true;
+
/*
* Adjust software tail so jobs submitted overwrite previous
* position in ring buffer with new GGTT addresses.
@@ -2241,17 +2253,18 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
- struct drm_sched_job *s_job;
struct xe_sched_job *job = NULL;
+ bool restore_replay = false;
- list_for_each_entry(s_job, &sched->base.pending_list, list) {
- job = to_xe_sched_job(s_job);
-
- xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
- q->guc->id, xe_sched_job_seqno(job));
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ restore_replay |= job->restore_replay;
+ if (restore_replay) {
+ xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
+ q->guc->id, xe_sched_job_seqno(job));
- q->ring_ops->emit_job(job);
- job->skip_emit = true;
+ q->ring_ops->emit_job(job);
+ job->restore_replay = true;
+ }
}
if (job)
diff --git a/drivers/gpu/drm/xe/xe_pagefault.c b/drivers/gpu/drm/xe/xe_pagefault.c
index fe3e40145012..afb06598b6e1 100644
--- a/drivers/gpu/drm/xe/xe_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_pagefault.c
@@ -102,7 +102,6 @@ retry_userptr:
/* Lock VM and BOs dma-resv */
xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
- drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
err = xe_pagefault_begin(&exec, vma, tile->mem.vram,
needs_vram == 1);
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index d26612abb4ca..7c4c54fe920a 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -63,8 +63,8 @@ struct xe_sched_job {
bool ring_ops_flush_tlb;
/** @ggtt: mapped in ggtt. */
bool ggtt;
- /** @skip_emit: skip emitting the job */
- bool skip_emit;
+ /** @restore_replay: job being replayed for restore */
+ bool restore_replay;
/** @last_replay: last job being replayed */
bool last_replay;
/** @ptrs: per instance pointers. */