summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2025-12-05 10:16:14 +1000
committerDave Airlie <airlied@redhat.com>2025-12-05 10:16:25 +1000
commit55a271a0f7e073032e9c8b1c1e30916d90be5a8a (patch)
treee8d567bd14e995ed4df1076df274a2c14bf81500 /drivers/gpu/drm
parent0692602defb0c273f80dec9c564ca50726404aca (diff)
parent1f5556ec8b9efbb784aeb3536e147182dee73d0f (diff)
Merge tag 'topic/xe-vfio-2025-12-01' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next
Cross-subsystem Changes: - Add device specific vfio_pci driver variant for intel graphics (Michal Winiarski) Driver Changes: - Add scope-based cleanup helper for runtime PM (Matt Roper) - Additional xe driver prerequisites and exports (Michal Winiarski) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/aS1bNpqeem6PIHrA@fedora
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/xe/Makefile4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c9
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c17
-rw-r--r--drivers/gpu/drm/xe/xe_pci.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c21
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h17
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.c35
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vfio.c80
10 files changed, 184 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index e4b273b025d2..a1f86a59d687 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -184,6 +184,10 @@ xe-$(CONFIG_PCI_IOV) += \
xe_sriov_pf_sysfs.o \
xe_tile_sriov_pf_debugfs.o
+ifeq ($(CONFIG_PCI_IOV),y)
+ xe-$(CONFIG_XE_VFIO_PCI) += xe_sriov_vfio.o
+endif
+
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
xe-y += tests/xe_kunit_helpers.o
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index d5d918ddce4f..3174a8dee779 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -17,6 +17,7 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_guc.h"
#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
@@ -1023,6 +1024,12 @@ static void action_ring_cleanup(void *arg)
ptr_ring_cleanup(r, destroy_pf_packet);
}
+static void pf_gt_migration_check_support(struct xe_gt *gt)
+{
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) < MAKE_GUC_VER(70, 54, 0))
+ xe_sriov_pf_migration_disable(gt_to_xe(gt), "requires GuC version >= 70.54.0");
+}
+
/**
* xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
* @gt: the &xe_gt
@@ -1039,6 +1046,8 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ pf_gt_migration_check_support(gt);
+
if (!pf_migration_supported(gt))
return 0;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index bbe6f8e65844..0e39a0255b99 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -1223,6 +1223,23 @@ static struct pci_driver xe_pci_driver = {
#endif
};
+/**
+ * xe_pci_to_pf_device() - Get PF &xe_device.
+ * @pdev: the VF &pci_dev device
+ *
+ * Return: pointer to PF &xe_device, NULL otherwise.
+ */
+struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev)
+{
+ struct drm_device *drm;
+
+ drm = pci_iov_get_pf_drvdata(pdev, &xe_pci_driver);
+ if (IS_ERR(drm))
+ return NULL;
+
+ return to_xe_device(drm);
+}
+
int xe_register_pci_driver(void)
{
return pci_register_driver(&xe_pci_driver);
diff --git a/drivers/gpu/drm/xe/xe_pci.h b/drivers/gpu/drm/xe/xe_pci.h
index 611c1209b14c..11bcc5fe2c5b 100644
--- a/drivers/gpu/drm/xe/xe_pci.h
+++ b/drivers/gpu/drm/xe/xe_pci.h
@@ -6,7 +6,10 @@
#ifndef _XE_PCI_H_
#define _XE_PCI_H_
+struct pci_dev;
+
int xe_register_pci_driver(void);
void xe_unregister_pci_driver(void);
+struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev);
#endif
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 44924512830f..766922530265 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -726,6 +726,13 @@ static void xe_pm_runtime_lockdep_prime(void)
/**
* xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
* @xe: xe device instance
+ *
+ * When possible, scope-based runtime PM (through guard(xe_pm_runtime)) is
+ * be preferred over direct usage of this function. Manual get/put handling
+ * should only be used when the function contains goto-based logic which
+ * can break scope-based handling, or when the lifetime of the runtime PM
+ * reference does not match a specific scope (e.g., runtime PM obtained in one
+ * function and released in a different one).
*/
void xe_pm_runtime_get(struct xe_device *xe)
{
@@ -758,6 +765,13 @@ void xe_pm_runtime_put(struct xe_device *xe)
* xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
* @xe: xe device instance
*
+ * When possible, scope-based runtime PM (through
+ * ACQUIRE(xe_pm_runtime_ioctl, ...)) is be preferred over direct usage of this
+ * function. Manual get/put handling should only be used when the function
+ * contains goto-based logic which can break scope-based handling, or when the
+ * lifetime of the runtime PM reference does not match a specific scope (e.g.,
+ * runtime PM obtained in one function and released in a different one).
+ *
* Returns: Any number greater than or equal to 0 for success, negative error
* code otherwise.
*/
@@ -827,6 +841,13 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
* It will warn if not protected.
* The reference should be put back after this function regardless, since it
* will always bump the usage counter, regardless.
+ *
+ * When possible, scope-based runtime PM (through guard(xe_pm_runtime_noresume))
+ * is be preferred over direct usage of this function. Manual get/put handling
+ * should only be used when the function contains goto-based logic which can
+ * break scope-based handling, or when the lifetime of the runtime PM reference
+ * does not match a specific scope (e.g., runtime PM obtained in one function
+ * and released in a different one).
*/
void xe_pm_runtime_get_noresume(struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index f7f89a18b6fc..6b27039e7b2d 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -6,6 +6,7 @@
#ifndef _XE_PM_H_
#define _XE_PM_H_
+#include <linux/cleanup.h>
#include <linux/pm_runtime.h>
#define DEFAULT_VRAM_THRESHOLD 300 /* in MB */
@@ -37,4 +38,20 @@ int xe_pm_block_on_suspend(struct xe_device *xe);
void xe_pm_might_block_on_suspend(void);
int xe_pm_module_init(void);
+static inline void __xe_pm_runtime_noop(struct xe_device *xe) {}
+
+DEFINE_GUARD(xe_pm_runtime, struct xe_device *,
+ xe_pm_runtime_get(_T), xe_pm_runtime_put(_T))
+DEFINE_GUARD(xe_pm_runtime_noresume, struct xe_device *,
+ xe_pm_runtime_get_noresume(_T), xe_pm_runtime_put(_T))
+DEFINE_GUARD_COND(xe_pm_runtime, _ioctl, xe_pm_runtime_get_ioctl(_T), _RET >= 0)
+
+/*
+ * Used when a function needs to release runtime PM in all possible cases
+ * and error paths, but the wakeref was already acquired by a different
+ * function (i.e., get() has already happened so only a put() is needed).
+ */
+DEFINE_GUARD(xe_pm_runtime_release_only, struct xe_device *,
+ __xe_pm_runtime_noop(_T), xe_pm_runtime_put(_T));
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
index de06cc690fc8..6c4b16409cc9 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
@@ -46,13 +46,37 @@ bool xe_sriov_pf_migration_supported(struct xe_device *xe)
{
xe_assert(xe, IS_SRIOV_PF(xe));
- return xe->sriov.pf.migration.supported;
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG) || !xe->sriov.pf.migration.disabled;
}
-static bool pf_check_migration_support(struct xe_device *xe)
+/**
+ * xe_sriov_pf_migration_disable() - Turn off SR-IOV VF migration support on PF.
+ * @xe: the &xe_device instance.
+ * @fmt: format string for the log message, to be combined with following VAs.
+ */
+void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+ xe_sriov_notice(xe, "migration %s: %pV\n",
+ IS_ENABLED(CONFIG_DRM_XE_DEBUG) ?
+ "missing prerequisite" : "disabled",
+ &vaf);
+ va_end(va_args);
+
+ xe->sriov.pf.migration.disabled = true;
+}
+
+static void pf_migration_check_support(struct xe_device *xe)
{
- /* XXX: for now this is for feature enabling only */
- return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+ if (!xe_device_has_memirq(xe))
+ xe_sriov_pf_migration_disable(xe, "requires memory-based IRQ support");
}
static void pf_migration_cleanup(void *arg)
@@ -77,7 +101,8 @@ int xe_sriov_pf_migration_init(struct xe_device *xe)
xe_assert(xe, IS_SRIOV_PF(xe));
- xe->sriov.pf.migration.supported = pf_check_migration_support(xe);
+ pf_migration_check_support(xe);
+
if (!xe_sriov_pf_migration_supported(xe))
return 0;
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
index b806298a0bb6..f8f408df8481 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
@@ -14,6 +14,7 @@ struct xe_sriov_packet;
int xe_sriov_pf_migration_init(struct xe_device *xe);
bool xe_sriov_pf_migration_supported(struct xe_device *xe);
+void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...);
int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
struct xe_sriov_packet *data);
struct xe_sriov_packet *
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
index 363d673ee1dd..7d9a8a278d91 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
@@ -14,8 +14,8 @@
* struct xe_sriov_pf_migration - Xe device level VF migration data
*/
struct xe_sriov_pf_migration {
- /** @supported: indicates whether VF migration feature is supported */
- bool supported;
+ /** @disabled: indicates whether VF migration feature is disabled */
+ bool disabled;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c b/drivers/gpu/drm/xe/xe_sriov_vfio.c
new file mode 100644
index 000000000000..e9a7615bb5c5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/intel/xe_sriov_vfio.h>
+#include <linux/cleanup.h>
+
+#include "xe_pci.h"
+#include "xe_pm.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_migration.h"
+
+struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev)
+{
+ return xe_pci_to_pf_device(pdev);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
+
+bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+
+ return xe_sriov_pf_migration_supported(xe);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, "xe-vfio-pci");
+
+#define DEFINE_XE_SRIOV_VFIO_FUNCTION(_type, _func, _impl) \
+_type xe_sriov_vfio_##_func(struct xe_device *xe, unsigned int vfid) \
+{ \
+ if (!IS_SRIOV_PF(xe)) \
+ return -EPERM; \
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe)) \
+ return -EINVAL; \
+ \
+ guard(xe_pm_runtime_noresume)(xe); \
+ \
+ return xe_sriov_pf_##_impl(xe, vfid); \
+} \
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_##_func, "xe-vfio-pci")
+
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, wait_flr_done, control_wait_flr);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, suspend_device, control_pause_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_device, control_resume_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_enter, control_trigger_save_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_exit, control_finish_save_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_enter, control_trigger_restore_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_exit, control_finish_restore_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, error, control_stop_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(ssize_t, stop_copy_size, migration_size);
+
+ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
+ return -EINVAL;
+
+ guard(xe_pm_runtime_noresume)(xe);
+
+ return xe_sriov_pf_migration_read(xe, vfid, buf, len);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci");
+
+ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
+ return -EINVAL;
+
+ guard(xe_pm_runtime_noresume)(xe);
+
+ return xe_sriov_pf_migration_write(xe, vfid, buf, len);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci");