drm/gpuvm: Pass map arguments through a struct
We are about to pass more arguments to drm_gpuvm_sm_map[_ops_create](), so, before we do that, let's pass arguments through a struct instead of changing each call site every time a new optional argument is added. Cc: Danilo Krummrich <dakr@kernel.org> Cc: Brendan King <Brendan.King@imgtec.com> Cc: Matt Coster <matt.coster@imgtec.com> Cc: Boris Brezillon <bbrezillon@kernel.org> Cc: Caterina Shablia <caterina.shablia@collabora.com> Cc: Rob Clark <robin.clark@oss.qualcomm.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: <dri-devel@lists.freedesktop.org> Co-developed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Acked-by: Danilo Krummrich <dakr@kernel.org> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Rob Clark <robin.clark@oss.qualcomm.com> Reviewed-by: Matt Coster <matt.coster@imgtec.com> # imagination/pvr_vm.c Acked-by: Matt Coster <matt.coster@imgtec.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250819162058.2777306-2-himal.prasad.ghimiray@intel.com
This commit is contained in:
parent
f1f2a22b86
commit
000a45dce7
|
@ -486,13 +486,18 @@
|
||||||
* u64 addr, u64 range,
|
* u64 addr, u64 range,
|
||||||
* struct drm_gem_object *obj, u64 offset)
|
* struct drm_gem_object *obj, u64 offset)
|
||||||
* {
|
* {
|
||||||
|
* struct drm_gpuvm_map_req map_req = {
|
||||||
|
* .map.va.addr = addr,
|
||||||
|
* .map.va.range = range,
|
||||||
|
* .map.gem.obj = obj,
|
||||||
|
* .map.gem.offset = offset,
|
||||||
|
* };
|
||||||
* struct drm_gpuva_ops *ops;
|
* struct drm_gpuva_ops *ops;
|
||||||
* struct drm_gpuva_op *op
|
* struct drm_gpuva_op *op
|
||||||
* struct drm_gpuvm_bo *vm_bo;
|
* struct drm_gpuvm_bo *vm_bo;
|
||||||
*
|
*
|
||||||
* driver_lock_va_space();
|
* driver_lock_va_space();
|
||||||
* ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
|
* ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
|
||||||
* obj, offset);
|
|
||||||
* if (IS_ERR(ops))
|
* if (IS_ERR(ops))
|
||||||
* return PTR_ERR(ops);
|
* return PTR_ERR(ops);
|
||||||
*
|
*
|
||||||
|
@ -2054,16 +2059,15 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
|
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
|
||||||
u64 addr, u64 range,
|
const struct drm_gpuvm_map_req *req)
|
||||||
struct drm_gem_object *obj, u64 offset)
|
|
||||||
{
|
{
|
||||||
struct drm_gpuva_op op = {};
|
struct drm_gpuva_op op = {};
|
||||||
|
|
||||||
op.op = DRM_GPUVA_OP_MAP;
|
op.op = DRM_GPUVA_OP_MAP;
|
||||||
op.map.va.addr = addr;
|
op.map.va.addr = req->map.va.addr;
|
||||||
op.map.va.range = range;
|
op.map.va.range = req->map.va.range;
|
||||||
op.map.gem.obj = obj;
|
op.map.gem.obj = req->map.gem.obj;
|
||||||
op.map.gem.offset = offset;
|
op.map.gem.offset = req->map.gem.offset;
|
||||||
|
|
||||||
return fn->sm_step_map(&op, priv);
|
return fn->sm_step_map(&op, priv);
|
||||||
}
|
}
|
||||||
|
@ -2102,10 +2106,14 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
|
||||||
static int
|
static int
|
||||||
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
|
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
|
||||||
const struct drm_gpuvm_ops *ops, void *priv,
|
const struct drm_gpuvm_ops *ops, void *priv,
|
||||||
u64 req_addr, u64 req_range,
|
const struct drm_gpuvm_map_req *req)
|
||||||
struct drm_gem_object *req_obj, u64 req_offset)
|
|
||||||
{
|
{
|
||||||
|
struct drm_gem_object *req_obj = req->map.gem.obj;
|
||||||
struct drm_gpuva *va, *next;
|
struct drm_gpuva *va, *next;
|
||||||
|
|
||||||
|
u64 req_offset = req->map.gem.offset;
|
||||||
|
u64 req_range = req->map.va.range;
|
||||||
|
u64 req_addr = req->map.va.addr;
|
||||||
u64 req_end = req_addr + req_range;
|
u64 req_end = req_addr + req_range;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -2236,9 +2244,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return op_map_cb(ops, priv,
|
return op_map_cb(ops, priv, req);
|
||||||
req_addr, req_range,
|
|
||||||
req_obj, req_offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -2303,10 +2309,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
|
||||||
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
|
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
|
||||||
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
||||||
* @priv: pointer to a driver private data structure
|
* @priv: pointer to a driver private data structure
|
||||||
* @req_addr: the start address of the new mapping
|
* @req: ptr to struct drm_gpuvm_map_req
|
||||||
* @req_range: the range of the new mapping
|
|
||||||
* @req_obj: the &drm_gem_object to map
|
|
||||||
* @req_offset: the offset within the &drm_gem_object
|
|
||||||
*
|
*
|
||||||
* This function iterates the given range of the GPU VA space. It utilizes the
|
* This function iterates the given range of the GPU VA space. It utilizes the
|
||||||
* &drm_gpuvm_ops to call back into the driver providing the split and merge
|
* &drm_gpuvm_ops to call back into the driver providing the split and merge
|
||||||
|
@ -2333,8 +2336,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
|
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
|
||||||
u64 req_addr, u64 req_range,
|
const struct drm_gpuvm_map_req *req)
|
||||||
struct drm_gem_object *req_obj, u64 req_offset)
|
|
||||||
{
|
{
|
||||||
const struct drm_gpuvm_ops *ops = gpuvm->ops;
|
const struct drm_gpuvm_ops *ops = gpuvm->ops;
|
||||||
|
|
||||||
|
@ -2343,9 +2345,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
|
||||||
ops->sm_step_unmap)))
|
ops->sm_step_unmap)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return __drm_gpuvm_sm_map(gpuvm, ops, priv,
|
return __drm_gpuvm_sm_map(gpuvm, ops, priv, req);
|
||||||
req_addr, req_range,
|
|
||||||
req_obj, req_offset);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
|
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
|
||||||
|
|
||||||
|
@ -2421,10 +2421,7 @@ static const struct drm_gpuvm_ops lock_ops = {
|
||||||
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
||||||
* @exec: the &drm_exec locking context
|
* @exec: the &drm_exec locking context
|
||||||
* @num_fences: for newly mapped objects, the # of fences to reserve
|
* @num_fences: for newly mapped objects, the # of fences to reserve
|
||||||
* @req_addr: the start address of the range to unmap
|
* @req: ptr to drm_gpuvm_map_req struct
|
||||||
* @req_range: the range of the mappings to unmap
|
|
||||||
* @req_obj: the &drm_gem_object to map
|
|
||||||
* @req_offset: the offset within the &drm_gem_object
|
|
||||||
*
|
*
|
||||||
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
|
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
|
||||||
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
|
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
|
||||||
|
@ -2445,9 +2442,7 @@ static const struct drm_gpuvm_ops lock_ops = {
|
||||||
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
|
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
|
||||||
* break;
|
* break;
|
||||||
* case DRIVER_OP_MAP:
|
* case DRIVER_OP_MAP:
|
||||||
* ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
|
* ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
|
||||||
* op->addr, op->range,
|
|
||||||
* obj, op->obj_offset);
|
|
||||||
* break;
|
* break;
|
||||||
* }
|
* }
|
||||||
*
|
*
|
||||||
|
@ -2478,18 +2473,17 @@ static const struct drm_gpuvm_ops lock_ops = {
|
||||||
int
|
int
|
||||||
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
|
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
|
||||||
struct drm_exec *exec, unsigned int num_fences,
|
struct drm_exec *exec, unsigned int num_fences,
|
||||||
u64 req_addr, u64 req_range,
|
struct drm_gpuvm_map_req *req)
|
||||||
struct drm_gem_object *req_obj, u64 req_offset)
|
|
||||||
{
|
{
|
||||||
|
struct drm_gem_object *req_obj = req->map.gem.obj;
|
||||||
|
|
||||||
if (req_obj) {
|
if (req_obj) {
|
||||||
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
|
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
|
return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req);
|
||||||
req_addr, req_range,
|
|
||||||
req_obj, req_offset);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
|
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
|
||||||
|
@ -2611,10 +2605,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
|
||||||
/**
|
/**
|
||||||
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
|
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
|
||||||
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
||||||
* @req_addr: the start address of the new mapping
|
* @req: map request arguments
|
||||||
* @req_range: the range of the new mapping
|
|
||||||
* @req_obj: the &drm_gem_object to map
|
|
||||||
* @req_offset: the offset within the &drm_gem_object
|
|
||||||
*
|
*
|
||||||
* This function creates a list of operations to perform splitting and merging
|
* This function creates a list of operations to perform splitting and merging
|
||||||
* of existent mapping(s) with the newly requested one.
|
* of existent mapping(s) with the newly requested one.
|
||||||
|
@ -2642,8 +2633,7 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
|
||||||
*/
|
*/
|
||||||
struct drm_gpuva_ops *
|
struct drm_gpuva_ops *
|
||||||
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
|
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
|
||||||
u64 req_addr, u64 req_range,
|
const struct drm_gpuvm_map_req *req)
|
||||||
struct drm_gem_object *req_obj, u64 req_offset)
|
|
||||||
{
|
{
|
||||||
struct drm_gpuva_ops *ops;
|
struct drm_gpuva_ops *ops;
|
||||||
struct {
|
struct {
|
||||||
|
@ -2661,9 +2651,7 @@ drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
|
||||||
args.vm = gpuvm;
|
args.vm = gpuvm;
|
||||||
args.ops = ops;
|
args.ops = ops;
|
||||||
|
|
||||||
ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
|
ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req);
|
||||||
req_addr, req_range,
|
|
||||||
req_obj, req_offset);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_ops;
|
goto err_free_ops;
|
||||||
|
|
||||||
|
|
|
@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
|
||||||
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
|
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
|
||||||
{
|
{
|
||||||
switch (bind_op->type) {
|
switch (bind_op->type) {
|
||||||
case PVR_VM_BIND_TYPE_MAP:
|
case PVR_VM_BIND_TYPE_MAP: {
|
||||||
|
const struct drm_gpuvm_map_req map_req = {
|
||||||
|
.map.va.addr = bind_op->device_addr,
|
||||||
|
.map.va.range = bind_op->size,
|
||||||
|
.map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
|
||||||
|
.map.gem.offset = bind_op->offset,
|
||||||
|
};
|
||||||
|
|
||||||
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
|
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
|
||||||
bind_op, bind_op->device_addr,
|
bind_op, &map_req);
|
||||||
bind_op->size,
|
}
|
||||||
gem_from_pvr_gem(bind_op->pvr_obj),
|
|
||||||
bind_op->offset);
|
|
||||||
|
|
||||||
case PVR_VM_BIND_TYPE_UNMAP:
|
case PVR_VM_BIND_TYPE_UNMAP:
|
||||||
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
|
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
|
||||||
|
|
|
@ -1171,11 +1171,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
|
||||||
op->obj_offset);
|
op->obj_offset);
|
||||||
break;
|
break;
|
||||||
case MSM_VM_BIND_OP_MAP:
|
case MSM_VM_BIND_OP_MAP:
|
||||||
case MSM_VM_BIND_OP_MAP_NULL:
|
case MSM_VM_BIND_OP_MAP_NULL: {
|
||||||
ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
|
struct drm_gpuvm_map_req map_req = {
|
||||||
op->iova, op->range,
|
.map.va.addr = op->iova,
|
||||||
op->obj, op->obj_offset);
|
.map.va.range = op->range,
|
||||||
|
.map.gem.obj = op->obj,
|
||||||
|
.map.gem.offset = op->obj_offset,
|
||||||
|
};
|
||||||
|
|
||||||
|
ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
/*
|
/*
|
||||||
* lookup_op() should have already thrown an error for
|
* lookup_op() should have already thrown an error for
|
||||||
|
@ -1282,10 +1288,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
|
||||||
if (op->flags & MSM_VM_BIND_OP_DUMP)
|
if (op->flags & MSM_VM_BIND_OP_DUMP)
|
||||||
arg.flags |= MSM_VMA_DUMP;
|
arg.flags |= MSM_VMA_DUMP;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case MSM_VM_BIND_OP_MAP_NULL:
|
case MSM_VM_BIND_OP_MAP_NULL: {
|
||||||
ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
|
struct drm_gpuvm_map_req map_req = {
|
||||||
op->range, op->obj, op->obj_offset);
|
.map.va.addr = op->iova,
|
||||||
|
.map.va.range = op->range,
|
||||||
|
.map.gem.obj = op->obj,
|
||||||
|
.map.gem.offset = op->obj_offset,
|
||||||
|
};
|
||||||
|
|
||||||
|
ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
/*
|
/*
|
||||||
* lookup_op() should have already thrown an error for
|
* lookup_op() should have already thrown an error for
|
||||||
|
|
|
@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
|
||||||
break;
|
break;
|
||||||
case OP_MAP: {
|
case OP_MAP: {
|
||||||
struct nouveau_uvma_region *reg;
|
struct nouveau_uvma_region *reg;
|
||||||
|
struct drm_gpuvm_map_req map_req = {
|
||||||
|
.map.va.addr = op->va.addr,
|
||||||
|
.map.va.range = op->va.range,
|
||||||
|
.map.gem.obj = op->gem.obj,
|
||||||
|
.map.gem.offset = op->gem.offset,
|
||||||
|
};
|
||||||
|
|
||||||
reg = nouveau_uvma_region_find_first(uvmm,
|
reg = nouveau_uvma_region_find_first(uvmm,
|
||||||
op->va.addr,
|
op->va.addr,
|
||||||
|
@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
|
||||||
}
|
}
|
||||||
|
|
||||||
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
|
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
|
||||||
op->va.addr,
|
&map_req);
|
||||||
op->va.range,
|
|
||||||
op->gem.obj,
|
|
||||||
op->gem.offset);
|
|
||||||
if (IS_ERR(op->ops)) {
|
if (IS_ERR(op->ops)) {
|
||||||
ret = PTR_ERR(op->ops);
|
ret = PTR_ERR(op->ops);
|
||||||
goto unwind_continue;
|
goto unwind_continue;
|
||||||
|
|
|
@ -2202,15 +2202,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
|
||||||
mutex_lock(&vm->op_lock);
|
mutex_lock(&vm->op_lock);
|
||||||
vm->op_ctx = op;
|
vm->op_ctx = op;
|
||||||
switch (op_type) {
|
switch (op_type) {
|
||||||
case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
|
case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
|
||||||
|
const struct drm_gpuvm_map_req map_req = {
|
||||||
|
.map.va.addr = op->va.addr,
|
||||||
|
.map.va.range = op->va.range,
|
||||||
|
.map.gem.obj = op->map.vm_bo->obj,
|
||||||
|
.map.gem.offset = op->map.bo_offset,
|
||||||
|
};
|
||||||
|
|
||||||
if (vm->unusable) {
|
if (vm->unusable) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
|
ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
|
||||||
op->map.vm_bo->obj, op->map.bo_offset);
|
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
|
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
|
||||||
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
|
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
|
||||||
|
|
|
@ -2316,10 +2316,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
|
||||||
|
|
||||||
switch (operation) {
|
switch (operation) {
|
||||||
case DRM_XE_VM_BIND_OP_MAP:
|
case DRM_XE_VM_BIND_OP_MAP:
|
||||||
case DRM_XE_VM_BIND_OP_MAP_USERPTR:
|
case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
|
||||||
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
|
struct drm_gpuvm_map_req map_req = {
|
||||||
obj, bo_offset_or_userptr);
|
.map.va.addr = addr,
|
||||||
|
.map.va.range = range,
|
||||||
|
.map.gem.obj = obj,
|
||||||
|
.map.gem.offset = bo_offset_or_userptr,
|
||||||
|
};
|
||||||
|
|
||||||
|
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case DRM_XE_VM_BIND_OP_UNMAP:
|
case DRM_XE_VM_BIND_OP_UNMAP:
|
||||||
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
|
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1058,10 +1058,20 @@ struct drm_gpuva_ops {
|
||||||
*/
|
*/
|
||||||
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
|
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
|
||||||
|
*/
|
||||||
|
struct drm_gpuvm_map_req {
|
||||||
|
/**
|
||||||
|
* @op_map: struct drm_gpuva_op_map
|
||||||
|
*/
|
||||||
|
struct drm_gpuva_op_map map;
|
||||||
|
};
|
||||||
|
|
||||||
struct drm_gpuva_ops *
|
struct drm_gpuva_ops *
|
||||||
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
|
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
|
||||||
u64 addr, u64 range,
|
const struct drm_gpuvm_map_req *req);
|
||||||
struct drm_gem_object *obj, u64 offset);
|
|
||||||
struct drm_gpuva_ops *
|
struct drm_gpuva_ops *
|
||||||
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
|
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
|
||||||
u64 addr, u64 range);
|
u64 addr, u64 range);
|
||||||
|
@ -1205,16 +1215,14 @@ struct drm_gpuvm_ops {
|
||||||
};
|
};
|
||||||
|
|
||||||
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
|
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
|
||||||
u64 addr, u64 range,
|
const struct drm_gpuvm_map_req *req);
|
||||||
struct drm_gem_object *obj, u64 offset);
|
|
||||||
|
|
||||||
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
|
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
|
||||||
u64 addr, u64 range);
|
u64 addr, u64 range);
|
||||||
|
|
||||||
int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
|
int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
|
||||||
struct drm_exec *exec, unsigned int num_fences,
|
struct drm_exec *exec, unsigned int num_fences,
|
||||||
u64 req_addr, u64 req_range,
|
struct drm_gpuvm_map_req *req);
|
||||||
struct drm_gem_object *obj, u64 offset);
|
|
||||||
|
|
||||||
int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
|
int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
|
||||||
u64 req_addr, u64 req_range);
|
u64 req_addr, u64 req_range);
|
||||||
|
|
Loading…
Reference in New Issue