Revert "drm/amdkfd: Use partial migrations in GPU page faults"

This reverts commit dc427a473e.

The change prevents migrating the entire range to VRAM because retry
fault restore_pages map the remaining system memory range to GPUs. It
will work correctly to submit together with partial mapping to GPU
patch later.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Philip Yang
2023-10-23 16:08:34 -04:00
committed by Alex Deucher
parent afaec204d2
commit 541c341d2e
4 changed files with 85 additions and 160 deletions

View File

@@ -442,10 +442,10 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
cpages, npages);
else
pr_debug("0x%lx pages collected\n", cpages);
pr_debug("0x%lx pages migrated\n", cpages);
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
@@ -479,8 +479,6 @@ out:
* svm_migrate_ram_to_vram - migrate svm range from system to device
* @prange: range structure
* @best_loc: the device to migrate to
* @start_mgr: start page to migrate
* @last_mgr: last page to migrate
* @mm: the process mm structure
* @trigger: reason of migration
*
@@ -491,7 +489,6 @@ out:
*/
static int
svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long start_mgr, unsigned long last_mgr,
struct mm_struct *mm, uint32_t trigger)
{
unsigned long addr, start, end;
@@ -501,30 +498,23 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long cpages = 0;
long r = 0;
if (!best_loc) {
pr_debug("svms 0x%p [0x%lx 0x%lx] migrate to sys ram\n",
prange->svms, start_mgr, last_mgr);
if (prange->actual_loc == best_loc) {
pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
prange->svms, prange->start, prange->last, best_loc);
return 0;
}
if (start_mgr < prange->start || last_mgr > prange->last) {
pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
start_mgr, last_mgr, prange->start, prange->last);
return -EFAULT;
}
node = svm_range_get_node_by_id(prange, best_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
return -ENODEV;
}
pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
prange->svms, start_mgr, last_mgr, prange->start, prange->last,
best_loc);
pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
prange->start, prange->last, best_loc);
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
r = svm_range_vram_node_new(node, prange, true);
if (r) {
@@ -554,11 +544,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (cpages) {
prange->actual_loc = best_loc;
prange->vram_pages = prange->vram_pages + cpages;
} else if (!prange->actual_loc) {
/* if no page migrated and all pages from prange are at
* sys ram drop svm_bo got from svm_range_vram_node_new
*/
svm_range_dma_unmap(prange);
} else {
svm_range_vram_node_free(prange);
}
@@ -676,8 +663,9 @@ out_oom:
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
* 0 - success with all pages migrated
* negative values - indicate error
* positive values or zero - number of pages got migrated
* positive values - partial migration, number of pages not migrated
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
@@ -688,7 +676,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
unsigned long upages = npages;
unsigned long cpages = 0;
unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -738,10 +725,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
cpages, npages);
else
pr_debug("0x%lx pages collected\n", cpages);
pr_debug("0x%lx pages migrated\n", cpages);
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
@@ -764,21 +751,17 @@ out_free:
kvfree(buf);
out:
if (!r && cpages) {
mpages = cpages - upages;
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
}
return r ? r : mpages;
return r ? r : upages;
}
/**
* svm_migrate_vram_to_ram - migrate svm range from device to system
* @prange: range structure
* @mm: process mm, use current->mm if NULL
* @start_mgr: start page need be migrated to sys ram
* @last_mgr: last page need be migrated to sys ram
* @trigger: reason of migration
* @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
@@ -788,7 +771,6 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long start_mgr, unsigned long last_mgr,
uint32_t trigger, struct page *fault_page)
{
struct kfd_node *node;
@@ -796,33 +778,26 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long addr;
unsigned long start;
unsigned long end;
unsigned long mpages = 0;
unsigned long upages = 0;
long r = 0;
/* this pragne has no any vram page to migrate to sys ram */
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
prange->start, prange->last);
return 0;
}
if (start_mgr < prange->start || last_mgr > prange->last) {
pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
start_mgr, last_mgr, prange->start, prange->last);
return -EFAULT;
}
node = svm_range_get_node_by_id(prange, prange->actual_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
return -ENODEV;
}
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
prange->svms, prange, start_mgr, last_mgr,
prange->svms, prange, prange->start, prange->last,
prange->actual_loc);
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@@ -841,21 +816,14 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
} else {
mpages += r;
upages += r;
}
addr = next;
}
if (r >= 0) {
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system
* and drop its svm_bo ref
*/
if (prange->vram_pages == 0 && prange->ttm_res) {
prange->actual_loc = 0;
svm_range_vram_node_free(prange);
}
if (r >= 0 && !upages) {
svm_range_vram_node_free(prange);
prange->actual_loc = 0;
}
return r < 0 ? r : 0;
@@ -865,23 +833,17 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
* svm_migrate_vram_to_vram - migrate svm range from device to device
* @prange: range structure
* @best_loc: the device to migrate to
* @start: start page need be migrated to sys ram
* @last: last page need be migrated to sys ram
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
*
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
*
* migrate all vram pages in prange to sys ram, then migrate
* [start, last] pages from sys ram to gpu node best_loc.
*
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger)
struct mm_struct *mm, uint32_t trigger)
{
int r, retries = 3;
@@ -893,8 +855,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
trigger, NULL);
r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@@ -902,21 +863,17 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (prange->actual_loc)
return -EDEADLK;
return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
}
int
svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger)
struct mm_struct *mm, uint32_t trigger)
{
if (!prange->actual_loc || prange->actual_loc == best_loc)
return svm_migrate_ram_to_vram(prange, best_loc, start, last,
mm, trigger);
if (!prange->actual_loc)
return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
else
return svm_migrate_vram_to_vram(prange, best_loc, start, last,
mm, trigger);
return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
}
@@ -932,9 +889,10 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
*/
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
unsigned long start, last, size;
unsigned long addr = vmf->address;
struct svm_range_bo *svm_bo;
enum svm_work_list_ops op;
struct svm_range *parent;
struct svm_range *prange;
struct kfd_process *p;
struct mm_struct *mm;
@@ -971,31 +929,51 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
mutex_lock(&p->svms.lock);
prange = svm_range_from_addr(&p->svms, addr, NULL);
prange = svm_range_from_addr(&p->svms, addr, &parent);
if (!prange) {
pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
goto out_unlock_svms;
}
mutex_lock(&prange->migrate_mutex);
mutex_lock(&parent->migrate_mutex);
if (prange != parent)
mutex_lock_nested(&prange->migrate_mutex, 1);
if (!prange->actual_loc)
goto out_unlock_prange;
/* Align migration range start and size to granularity size */
size = 1UL << prange->granularity;
start = max(ALIGN_DOWN(addr, size), prange->start);
last = min(ALIGN(addr + 1, size) - 1, prange->last);
svm_range_lock(parent);
if (prange != parent)
mutex_lock_nested(&prange->lock, 1);
r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
if (prange != parent)
mutex_unlock(&prange->lock);
svm_range_unlock(parent);
if (r) {
pr_debug("failed %d to split range by granularity\n", r);
goto out_unlock_prange;
}
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
r, prange->svms, prange, start, last);
r, prange->svms, prange, prange->start, prange->last);
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && parent == prange)
op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
else
op = SVM_OP_UPDATE_RANGE_NOTIFIER;
svm_range_add_list_work(&p->svms, parent, mm, op);
schedule_deferred_list_work(&p->svms);
out_unlock_prange:
mutex_unlock(&prange->migrate_mutex);
if (prange != parent)
mutex_unlock(&prange->migrate_mutex);
mutex_unlock(&parent->migrate_mutex);
out_unlock_svms:
mutex_unlock(&p->svms.lock);
out_unref_process: