mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-05 08:17:42 -04:00
drm/shmem-helper: Map huge pages in fault handler
Attempt a PMD sized PFN insertion into the VMA if the faulty address of the fault handler is part of a huge page. On builds with CONFIG_TRANSPARENT_HUGEPAGE enabled, if the mmap() user address is PMD size aligned, if the GEM object is backed by shmem buffers on mountpoints setting the 'huge=' option and if the shmem backing store manages to allocate a huge folio, CPU mapping would then benefit from significantly increased memcpy() performance. When these conditions are met on a system with 2 MiB huge pages, an aligned copy of 2 MiB would raise a single page fault instead of 4096. v4: - implement map_pages instead of huge_fault v6: - get rid of map_pages handler for now (keep it for another series along with arm64 contpte support) v11: - remove page fault validity check helper - rename drm_gem_shmem_map_pmd() to drm_gem_shmem_try_map_pmd() - add Boris R-b v12: - move up ret var decl in fault handler to minimize diff Signed-off-by: Loïc Molinari <loic.molinari@collabora.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Link: https://patch.msgid.link/20251205182231.194072-3-loic.molinari@collabora.com Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
This commit is contained in:
committed by
Boris Brezillon
parent
9d2d49027c
commit
211b9a39f2
@@ -567,6 +567,26 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
|
||||
|
||||
static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
|
||||
struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
unsigned long paddr = pfn << PAGE_SHIFT;
|
||||
bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
|
||||
|
||||
if (aligned &&
|
||||
pmd_none(*vmf->pmd) &&
|
||||
folio_test_pmd_mappable(page_folio(page))) {
|
||||
pfn &= PMD_MASK >> PAGE_SHIFT;
|
||||
if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
@@ -574,8 +594,9 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
loff_t num_pages = obj->size >> PAGE_SHIFT;
|
||||
vm_fault_t ret;
|
||||
struct page *page;
|
||||
struct page **pages = shmem->pages;
|
||||
pgoff_t page_offset;
|
||||
unsigned long pfn;
|
||||
|
||||
/* Offset to faulty address in the VMA. */
|
||||
page_offset = vmf->pgoff - vma->vm_pgoff;
|
||||
@@ -586,12 +607,18 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
|
||||
drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
|
||||
shmem->madv < 0) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
} else {
|
||||
page = shmem->pages[page_offset];
|
||||
|
||||
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pfn = page_to_pfn(pages[page_offset]);
|
||||
ret = vmf_insert_pfn(vma, vmf->address, pfn);
|
||||
|
||||
out:
|
||||
dma_resv_unlock(shmem->base.resv);
|
||||
|
||||
return ret;
|
||||
|
||||
Reference in New Issue
Block a user