mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-23 05:56:14 -04:00
drm/xe: Strict migration policy for atomic SVM faults
Mixing GPU and CPU atomics does not work unless a strict migration
policy of GPU atomics must be device memory. Enforce a policy of must be
in VRAM with a retry loop of 3 attempts, if retry loop fails abort
fault.
Removing always_migrate_to_vram modparam as we now have real migration
policy.
v2:
- Only retry migration on atomics
- Drop alway migrate modparam
v3:
- Only set vram_only on DGFX (Himal)
- Bail on get_pages failure if vram_only and retry count exceeded (Himal)
- s/vram_only/devmem_only
- Update xe_svm_range_is_valid to accept devmem_only argument
v4:
- Fix logic bug get_pages failure
v5:
- Fix commit message (Himal)
- Mention removing always_migrate_to_vram in commit message (Lucas)
- Fix xe_svm_range_is_valid to check for devmem pages
- Bail on devmem_only && !migrate_devmem (Thomas)
v6:
- Add READ_ONCE barriers for opportunistic checks (Thomas)
- Pair READ_ONCE with WRITE_ONCE (Thomas)
v7:
- Adjust comments (Thomas)
Fixes: 2f118c9491 ("drm/xe: Add SVM VRAM migration")
Cc: stable@vger.kernel.org
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Acked-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://lore.kernel.org/r/20250512135500.1405019-3-matthew.brost@intel.com
This commit is contained in:
@@ -1118,6 +1118,10 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
|
||||
lockdep_assert_held(&gpusvm->notifier_lock);
|
||||
|
||||
if (range->flags.has_dma_mapping) {
|
||||
struct drm_gpusvm_range_flags flags = {
|
||||
.__flags = range->flags.__flags,
|
||||
};
|
||||
|
||||
for (i = 0, j = 0; i < npages; j++) {
|
||||
struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
|
||||
|
||||
@@ -1131,8 +1135,12 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
|
||||
dev, *addr);
|
||||
i += 1 << addr->order;
|
||||
}
|
||||
range->flags.has_devmem_pages = false;
|
||||
range->flags.has_dma_mapping = false;
|
||||
|
||||
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
|
||||
flags.has_devmem_pages = false;
|
||||
flags.has_dma_mapping = false;
|
||||
WRITE_ONCE(range->flags.__flags, flags.__flags);
|
||||
|
||||
range->dpagemap = NULL;
|
||||
}
|
||||
}
|
||||
@@ -1334,6 +1342,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
|
||||
int err = 0;
|
||||
struct dev_pagemap *pagemap;
|
||||
struct drm_pagemap *dpagemap;
|
||||
struct drm_gpusvm_range_flags flags;
|
||||
|
||||
retry:
|
||||
hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
|
||||
@@ -1378,7 +1387,8 @@ map_pages:
|
||||
*/
|
||||
drm_gpusvm_notifier_lock(gpusvm);
|
||||
|
||||
if (range->flags.unmapped) {
|
||||
flags.__flags = range->flags.__flags;
|
||||
if (flags.unmapped) {
|
||||
drm_gpusvm_notifier_unlock(gpusvm);
|
||||
err = -EFAULT;
|
||||
goto err_free;
|
||||
@@ -1474,14 +1484,17 @@ map_pages:
|
||||
}
|
||||
i += 1 << order;
|
||||
num_dma_mapped = i;
|
||||
range->flags.has_dma_mapping = true;
|
||||
flags.has_dma_mapping = true;
|
||||
}
|
||||
|
||||
if (zdd) {
|
||||
range->flags.has_devmem_pages = true;
|
||||
flags.has_devmem_pages = true;
|
||||
range->dpagemap = dpagemap;
|
||||
}
|
||||
|
||||
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
|
||||
WRITE_ONCE(range->flags.__flags, flags.__flags);
|
||||
|
||||
drm_gpusvm_notifier_unlock(gpusvm);
|
||||
kvfree(pfns);
|
||||
set_seqno:
|
||||
|
||||
Reference in New Issue
Block a user