Merge branch 'mm-hotfixes-stable' into mm-stable in order to merge

"mm/huge_memory: only get folio_order() once during __folio_split()" into
mm-stable.
This commit is contained in:
Andrew Morton
2025-11-24 15:07:34 -08:00
8 changed files with 63 additions and 36 deletions

View File

@@ -3691,8 +3691,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
unsigned long addr, unsigned int nr_pages,
unsigned long *rss, unsigned short *mmap_miss,
bool can_map_large)
pgoff_t file_end)
{
struct address_space *mapping = folio->mapping;
unsigned int ref_from_caller = 1;
vm_fault_t ret = 0;
struct page *page = folio_page(folio, start);
@@ -3701,12 +3702,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
unsigned long addr0;
/*
* Map the large folio fully where possible.
* Map the large folio fully where possible:
*
* The folio must not cross VMA or page table boundary.
* - The folio is fully within size of the file or belong
* to shmem/tmpfs;
* - The folio doesn't cross VMA boundary;
* - The folio doesn't cross page table boundary;
*/
addr0 = addr - start * PAGE_SIZE;
if (can_map_large && folio_within_vma(folio, vmf->vma) &&
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
folio_within_vma(folio, vmf->vma) &&
(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
vmf->pte -= start;
page -= start;
@@ -3821,7 +3826,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long rss = 0;
unsigned int nr_pages = 0, folio_type;
unsigned short mmap_miss = 0, mmap_miss_saved;
bool can_map_large;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@@ -3832,16 +3836,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
end_pgoff = min(end_pgoff, file_end);
/*
* Do not allow to map with PTEs beyond i_size and with PMD
* across i_size to preserve SIGBUS semantics.
* Do not allow to map with PMD across i_size to preserve
* SIGBUS semantics.
*
* Make an exception for shmem/tmpfs that for long time
* intentionally mapped with PMDs across i_size.
*/
can_map_large = shmem_mapping(mapping) ||
file_end >= folio_next_index(folio);
if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) {
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
filemap_map_pmd(vmf, folio, start_pgoff)) {
ret = VM_FAULT_NOPAGE;
goto out;
}
@@ -3870,8 +3872,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
else
ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr,
nr_pages, &rss, &mmap_miss,
can_map_large);
nr_pages, &rss, &mmap_miss, file_end);
folio_unlock(folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);