mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
drm/amdkfd: Use partial hmm page walk during buffer validation in SVM
SVM uses hmm page walk to valid buffer before map to gpu vm. After have partial migration/mapping do validation on same vm range as migration/map do instead of whole svm range that can be very large. This change is expected to improve svm code performance. Signed-off-by: Xiaogang Chen <xiaogang.chen@amd.com> Reviewed-by: Philip Yang <philip.yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
e48c8cbeeb
commit
006ad514a5
@@ -260,19 +260,6 @@ static void svm_migrate_put_sys_page(unsigned long addr)
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
|
||||
{
|
||||
unsigned long cpages = 0;
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < migrate->npages; i++) {
|
||||
if (migrate->src[i] & MIGRATE_PFN_VALID &&
|
||||
migrate->src[i] & MIGRATE_PFN_MIGRATE)
|
||||
cpages++;
|
||||
}
|
||||
return cpages;
|
||||
}
|
||||
|
||||
static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
|
||||
{
|
||||
unsigned long upages = 0;
|
||||
@@ -402,6 +389,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
|
||||
struct dma_fence *mfence = NULL;
|
||||
struct migrate_vma migrate = { 0 };
|
||||
unsigned long cpages = 0;
|
||||
unsigned long mpages = 0;
|
||||
dma_addr_t *scratch;
|
||||
void *buf;
|
||||
int r = -ENOMEM;
|
||||
@@ -450,12 +438,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
|
||||
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
|
||||
migrate_vma_pages(&migrate);
|
||||
|
||||
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
|
||||
svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
|
||||
|
||||
svm_migrate_copy_done(adev, mfence);
|
||||
migrate_vma_finalize(&migrate);
|
||||
|
||||
mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
|
||||
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
|
||||
mpages, cpages, migrate.npages);
|
||||
|
||||
kfd_smi_event_migration_end(node, p->lead_thread->pid,
|
||||
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
|
||||
0, node->id, trigger);
|
||||
@@ -465,12 +454,12 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
|
||||
out_free:
|
||||
kvfree(buf);
|
||||
out:
|
||||
if (!r && cpages) {
|
||||
if (!r && mpages) {
|
||||
pdd = svm_range_get_pdd_by_node(prange, node);
|
||||
if (pdd)
|
||||
WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
|
||||
WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
|
||||
|
||||
return cpages;
|
||||
return mpages;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
@@ -498,7 +487,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
struct vm_area_struct *vma;
|
||||
uint64_t ttm_res_offset;
|
||||
struct kfd_node *node;
|
||||
unsigned long cpages = 0;
|
||||
unsigned long mpages = 0;
|
||||
long r = 0;
|
||||
|
||||
if (start_mgr < prange->start || last_mgr > prange->last) {
|
||||
@@ -540,15 +529,15 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
|
||||
pr_debug("failed %ld to migrate\n", r);
|
||||
break;
|
||||
} else {
|
||||
cpages += r;
|
||||
mpages += r;
|
||||
}
|
||||
ttm_res_offset += next - addr;
|
||||
addr = next;
|
||||
}
|
||||
|
||||
if (cpages) {
|
||||
if (mpages) {
|
||||
prange->actual_loc = best_loc;
|
||||
prange->vram_pages = prange->vram_pages + cpages;
|
||||
prange->vram_pages += mpages;
|
||||
} else if (!prange->actual_loc) {
|
||||
/* if no page migrated and all pages from prange are at
|
||||
* sys ram drop svm_bo got from svm_range_vram_node_new
|
||||
|
||||
Reference in New Issue
Block a user