mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
drm/i915: Wrap all access to i915_vma.node.start|size
We already wrap i915_vma.node.start for use with the GGTT, as there we can perform additional sanity checks that the node belongs to the GGTT and fits within the 32b registers. In the next couple of patches, we will introduce guard pages around the objects _inside_ the drm_mm_node allocation. That is we will offset the vma->pages so that the first page is at drm_mm_node.start + vma->guard (not 0 as is currently the case). All users must then not use i915_vma.node.start directly, but compute the guard offset, thus all users are converted to use a i915_vma_offset() wrapper. The notable exceptions are the selftests that are testing exact behaviour of i915_vma_pin/i915_vma_insert. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Tejas Upadhyay <tejaskumarx.surendrakumar.upadhyay@intel.com> Co-developed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221130235805.221010-3-andi.shyti@linux.intel.com
This commit is contained in:
@@ -1017,8 +1017,8 @@ empty_request(struct intel_engine_cs *engine,
|
||||
return request;
|
||||
|
||||
err = engine->emit_bb_start(request,
|
||||
batch->node.start,
|
||||
batch->node.size,
|
||||
i915_vma_offset(batch),
|
||||
i915_vma_size(batch),
|
||||
I915_DISPATCH_SECURE);
|
||||
if (err)
|
||||
goto out_request;
|
||||
@@ -1138,14 +1138,14 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
|
||||
|
||||
if (ver >= 8) {
|
||||
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
|
||||
*cmd++ = lower_32_bits(vma->node.start);
|
||||
*cmd++ = upper_32_bits(vma->node.start);
|
||||
*cmd++ = lower_32_bits(i915_vma_offset(vma));
|
||||
*cmd++ = upper_32_bits(i915_vma_offset(vma));
|
||||
} else if (ver >= 6) {
|
||||
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
|
||||
*cmd++ = lower_32_bits(vma->node.start);
|
||||
*cmd++ = lower_32_bits(i915_vma_offset(vma));
|
||||
} else {
|
||||
*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
|
||||
*cmd++ = lower_32_bits(vma->node.start);
|
||||
*cmd++ = lower_32_bits(i915_vma_offset(vma));
|
||||
}
|
||||
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
|
||||
|
||||
@@ -1227,8 +1227,8 @@ static int live_all_engines(void *arg)
|
||||
GEM_BUG_ON(err);
|
||||
|
||||
err = engine->emit_bb_start(request[idx],
|
||||
batch->node.start,
|
||||
batch->node.size,
|
||||
i915_vma_offset(batch),
|
||||
i915_vma_size(batch),
|
||||
0);
|
||||
GEM_BUG_ON(err);
|
||||
request[idx]->batch = batch;
|
||||
@@ -1354,8 +1354,8 @@ static int live_sequential_engines(void *arg)
|
||||
GEM_BUG_ON(err);
|
||||
|
||||
err = engine->emit_bb_start(request[idx],
|
||||
batch->node.start,
|
||||
batch->node.size,
|
||||
i915_vma_offset(batch),
|
||||
i915_vma_size(batch),
|
||||
0);
|
||||
GEM_BUG_ON(err);
|
||||
request[idx]->batch = batch;
|
||||
|
||||
Reference in New Issue
Block a user