summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_gpuvm.c
diff options
context:
space:
mode:
authorBoris Brezillon <boris.brezillon@collabora.com>2025-08-19 21:50:55 +0530
committerMatthew Brost <matthew.brost@intel.com>2025-08-19 21:19:31 -0700
commit000a45dce7adc13e45b2925b383e39f32e5f3004 (patch)
treee039967de1449e7284e53511b292e4d33c46ae36 /drivers/gpu/drm/drm_gpuvm.c
parentf1f2a22b8683d7ac38821d4508d4549a2f0c0a0a (diff)
drm/gpuvm: Pass map arguments through a struct
We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](), so, before we do that, let's pass arguments through a struct instead of changing each call site every time a new optional argument is added. Cc: Danilo Krummrich <dakr@kernel.org> Cc: Brendan King <Brendan.King@imgtec.com> Cc: Matt Coster <matt.coster@imgtec.com> Cc: Boris Brezillon <bbrezillon@kernel.org> Cc: Caterina Shablia <caterina.shablia@collabora.com> Cc: Rob Clark <robin.clark@oss.qualcomm.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: <dri-devel@lists.freedesktop.org> Co-developed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Acked-by: Danilo Krummrich <dakr@kernel.org> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Rob Clark <robin.clark@oss.qualcomm.com> Reviewed-by: Matt Coster <matt.coster@imgtec.com> # imagination/pvr_vm.c Acked-by: Matt Coster <matt.coster@imgtec.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250819162058.2777306-2-himal.prasad.ghimiray@intel.com
Diffstat (limited to 'drivers/gpu/drm/drm_gpuvm.c')
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c74
1 files changed, 31 insertions, 43 deletions
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index bbc7fecb6f4a..6c18cec70f11 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -486,13 +486,18 @@
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
+ * struct drm_gpuvm_map_req map_req = {
+ * .map.va.addr = addr,
+ * .map.va.range = range,
+ * .map.gem.obj = obj,
+ * .map.gem.offset = offset,
+ * };
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
* struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
- * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
- * obj, offset);
+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
@@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
+ const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_op op = {};
op.op = DRM_GPUVA_OP_MAP;
- op.map.va.addr = addr;
- op.map.va.range = range;
- op.map.gem.obj = obj;
- op.map.gem.offset = offset;
+ op.map.va.addr = req->map.va.addr;
+ op.map.va.range = req->map.va.range;
+ op.map.gem.obj = req->map.gem.obj;
+ op.map.gem.offset = req->map.gem.offset;
return fn->sm_step_map(&op, priv);
}
@@ -2102,10 +2106,14 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
struct drm_gpuva *va, *next;
+
+ u64 req_offset = req->map.gem.offset;
+ u64 req_range = req->map.va.range;
+ u64 req_addr = req->map.va.addr;
u64 req_end = req_addr + req_range;
int ret;
@@ -2236,9 +2244,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
}
}
- return op_map_cb(ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return op_map_cb(ops, priv, req);
}
static int
@@ -2303,10 +2309,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to struct drm_gpuvm_map_req
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2336,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
*/
int
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
@@ -2343,9 +2345,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2421,10 +2421,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @exec: the &drm_exec locking context
* @num_fences: for newly mapped objects, the # of fences to reserve
- * @req_addr: the start address of the range to unmap
- * @req_range: the range of the mappings to unmap
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to drm_gpuvm_map_req struct
*
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
@@ -2445,9 +2442,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
* break;
* case DRIVER_OP_MAP:
- * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
- * op->addr, op->range,
- * obj, op->obj_offset);
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
* break;
* }
*
@@ -2478,18 +2473,17 @@ static const struct drm_gpuvm_ops lock_ops = {
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ struct drm_gpuvm_map_req *req)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+
if (req_obj) {
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
if (ret)
return ret;
}
- return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2611,10 +2605,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: map request arguments
*
* This function creates a list of operations to perform splitting and merging
* of existent mapping(s) with the newly requested one.
@@ -2642,8 +2633,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
*/
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_ops *ops;
struct {
@@ -2661,9 +2651,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
args.vm = gpuvm;
args.ops = ops;
- ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
- req_addr, req_range,
- req_obj, req_offset);
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
if (ret)
goto err_free_ops;