9 hotfixes. 3 are cc:stable and the remainder address post-6.15 issues

or aren't considered necessary for -stable kernels.  Only 4 are for MM.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaE0BIAAKCRDdBJ7gKXxA
 jkTMAQCTWhvZZdcEdyxo0HQbGo2pcqB4awXjire6GabBFcr1owD5AVV0OYiQNNEN
 tbOVsr+2aZBr/aXTkTy4VpOg1kin8Ak=
 =ThOY
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2025-06-13-21-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "9 hotfixes. 3 are cc:stable and the remainder address post-6.15 issues
  or aren't considered necessary for -stable kernels. Only 4 are for MM"

* tag 'mm-hotfixes-stable-2025-06-13-21-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm: add mmap_prepare() compatibility layer for nested file systems
  init: fix build warnings about export.h
  MAINTAINERS: add Barry as a THP reviewer
  drivers/rapidio/rio_cm.c: prevent possible heap overwrite
  mm: close theoretical race where stale TLB entries could linger
  mm/vma: reset VMA iterator on commit_merge() OOM failure
  docs: proc: update VmFlags documentation in smaps
  scatterlist: fix extraneous '@'-sign kernel-doc notation
  selftests/mm: skip failed memfd setups in gup_longterm
This commit is contained in:
Linus Torvalds 2025-06-14 08:18:09 -07:00
commit 27b9989b87
14 changed files with 134 additions and 29 deletions

View File

@ -584,7 +584,6 @@ encoded manner. The codes are the following:
ms may share
gd stack segment growns down
pf pure PFN range
dw disabled write to the mapped file
lo pages are locked in memory
io memory mapped I/O area
sr sequential read advise provided
@ -607,8 +606,11 @@ encoded manner. The codes are the following:
mt arm64 MTE allocation tags are enabled
um userfaultfd missing tracking
uw userfaultfd wr-protect tracking
ui userfaultfd minor fault
ss shadow/guarded control stack page
sl sealed
lf lock on fault pages
dp always lazily freeable mapping
== =======================================
Note that there is no guarantee that every flag and associated mnemonic will

View File

@ -15921,6 +15921,7 @@ R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Nico Pache <npache@redhat.com>
R: Ryan Roberts <ryan.roberts@arm.com>
R: Dev Jain <dev.jain@arm.com>
R: Barry Song <baohua@kernel.org>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org

View File

@ -783,6 +783,9 @@ static int riocm_ch_send(u16 ch_id, void *buf, int len)
if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
return -EINVAL;
if (len < sizeof(struct rio_ch_chan_hdr))
return -EINVAL; /* insufficient data from user */
ch = riocm_get_channel(ch_id);
if (!ch) {
riocm_error("%s(%d) ch_%d not found", current->comm,

View File

@ -2274,10 +2274,12 @@ static inline bool file_has_valid_mmap_hooks(struct file *file)
return true;
}
int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma);
static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
{
if (WARN_ON_ONCE(file->f_op->mmap_prepare))
return -EINVAL;
if (file->f_op->mmap_prepare)
return compat_vma_mmap_prepare(file, vma);
return file->f_op->mmap(file, vma);
}

View File

@ -99,7 +99,7 @@ static inline bool sg_is_last(struct scatterlist *sg)
* @sg: The current sg entry
*
* Description:
* Usually the next entry will be @sg@ + 1, but if this sg element is part
* Usually the next entry will be @sg + 1, but if this sg element is part
* of a chained scatterlist, it could jump to the start of a new
* scatterlist array.
*
@ -254,7 +254,7 @@ static inline void __sg_chain(struct scatterlist *chain_sg,
* @sgl: Second scatterlist
*
* Description:
* Links @prv@ and @sgl@ together, to form a longer scatterlist.
* Links @prv and @sgl together, to form a longer scatterlist.
*
**/
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/async.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/types.h>

View File

@ -13,6 +13,7 @@
#define DEBUG /* Enable initcall_debug */
#include <linux/types.h>
#include <linux/export.h>
#include <linux/extable.h>
#include <linux/module.h>
#include <linux/proc_fs.h>

View File

@ -73,9 +73,9 @@ EXPORT_SYMBOL(sg_nents_for_len);
* Should only be used casually, it (currently) scans the entire list
* to get the last entry.
*
* Note that the @sgl@ pointer passed in need not be the first one,
* the important bit is that @nents@ denotes the number of entries that
* exist from @sgl@.
* Note that the @sgl pointer passed in need not be the first one,
* the important bit is that @nents denotes the number of entries that
* exist from @sgl.
*
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
@ -345,7 +345,7 @@ EXPORT_SYMBOL(__sg_alloc_table);
* @gfp_mask: GFP allocation mask
*
* Description:
* Allocate and initialize an sg table. If @nents@ is larger than
* Allocate and initialize an sg table. If @nents is larger than
* SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
*
**/

View File

@ -508,6 +508,7 @@ restart:
pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!start_pte)
break;
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
if (!err)
nr = 0;
@ -741,6 +742,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
start_pte = pte;
if (!start_pte)
break;
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
if (!err)
nr = 0;

View File

@ -1131,3 +1131,43 @@ void flush_dcache_folio(struct folio *folio)
}
EXPORT_SYMBOL(flush_dcache_folio);
#endif
/**
* compat_vma_mmap_prepare() - Apply the file's .mmap_prepare() hook to an
* existing VMA
* @file: The file which possesss an f_op->mmap_prepare() hook
* @vma: The VMA to apply the .mmap_prepare() hook to.
*
* Ordinarily, .mmap_prepare() is invoked directly upon mmap(). However, certain
* 'wrapper' file systems invoke a nested mmap hook of an underlying file.
*
* Until all filesystems are converted to use .mmap_prepare(), we must be
* conservative and continue to invoke these 'wrapper' filesystems using the
* deprecated .mmap() hook.
*
* However we have a problem if the underlying file system possesses an
* .mmap_prepare() hook, as we are in a different context when we invoke the
* .mmap() hook, already having a VMA to deal with.
*
* compat_vma_mmap_prepare() is a compatibility function that takes VMA state,
* establishes a struct vm_area_desc descriptor, passes to the underlying
* .mmap_prepare() hook and applies any changes performed by it.
*
* Once the conversion of filesystems is complete this function will no longer
* be required and will be removed.
*
* Returns: 0 on success or error.
*/
int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma)
{
struct vm_area_desc desc;
int err;
err = file->f_op->mmap_prepare(vma_to_desc(vma, &desc));
if (err)
return err;
set_vma_from_desc(vma, &desc);
return 0;
}
EXPORT_SYMBOL(compat_vma_mmap_prepare);

View File

@ -967,26 +967,9 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
err = dup_anon_vma(next, middle, &anon_dup);
}
if (err)
if (err || commit_merge(vmg))
goto abort;
err = commit_merge(vmg);
if (err) {
VM_WARN_ON(err != -ENOMEM);
if (anon_dup)
unlink_anon_vmas(anon_dup);
/*
* We've cleaned up any cloned anon_vma's, no VMAs have been
* modified, no harm no foul if the user requests that we not
* report this and just give up, leaving the VMAs unmerged.
*/
if (!vmg->give_up_on_oom)
vmg->state = VMA_MERGE_ERROR_NOMEM;
return NULL;
}
khugepaged_enter_vma(vmg->target, vmg->flags);
vmg->state = VMA_MERGE_SUCCESS;
return vmg->target;
@ -995,6 +978,9 @@ abort:
vma_iter_set(vmg->vmi, start);
vma_iter_load(vmg->vmi);
if (anon_dup)
unlink_anon_vmas(anon_dup);
/*
* This means we have failed to clone anon_vma's correctly, but no
* actual changes to VMAs have occurred, so no harm no foul - if the
@ -3127,7 +3113,6 @@ int __vm_munmap(unsigned long start, size_t len, bool unlock)
return ret;
}
/* Insert vm structure into process list sorted by address
* and into the inode's i_mmap tree. If vm_file is non-NULL
* then i_mmap_rwsem is taken here.

View File

@ -222,6 +222,53 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
return 0;
}
/*
* Temporary helper functions for file systems which wrap an invocation of
* f_op->mmap() but which might have an underlying file system which implements
* f_op->mmap_prepare().
*/
static inline struct vm_area_desc *vma_to_desc(struct vm_area_struct *vma,
struct vm_area_desc *desc)
{
desc->mm = vma->vm_mm;
desc->start = vma->vm_start;
desc->end = vma->vm_end;
desc->pgoff = vma->vm_pgoff;
desc->file = vma->vm_file;
desc->vm_flags = vma->vm_flags;
desc->page_prot = vma->vm_page_prot;
desc->vm_ops = NULL;
desc->private_data = NULL;
return desc;
}
static inline void set_vma_from_desc(struct vm_area_struct *vma,
struct vm_area_desc *desc)
{
/*
* Since we're invoking .mmap_prepare() despite having a partially
* established VMA, we must take care to handle setting fields
* correctly.
*/
/* Mutable fields. Populated with initial state. */
vma->vm_pgoff = desc->pgoff;
if (vma->vm_file != desc->file)
vma_set_file(vma, desc->file);
if (vma->vm_flags != desc->vm_flags)
vm_flags_set(vma, desc->vm_flags);
vma->vm_page_prot = desc->page_prot;
/* User-defined fields. */
vma->vm_ops = desc->vm_ops;
vma->vm_private_data = desc->private_data;
}
int
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,

View File

@ -298,8 +298,11 @@ static void run_with_memfd(test_fn fn, const char *desc)
log_test_start("%s ... with memfd", desc);
fd = memfd_create("test", 0);
if (fd < 0)
if (fd < 0) {
ksft_print_msg("memfd_create() failed (%s)\n", strerror(errno));
log_test_result(KSFT_SKIP);
return;
}
fn(fd, pagesize);
close(fd);
@ -366,6 +369,8 @@ static void run_with_memfd_hugetlb(test_fn fn, const char *desc,
fd = memfd_create("test", flags);
if (fd < 0) {
ksft_print_msg("memfd_create() failed (%s)\n", strerror(errno));
log_test_result(KSFT_SKIP);
return;
}
fn(fd, hugetlbsize);

View File

@ -159,6 +159,14 @@ typedef __bitwise unsigned int vm_fault_t;
#define ASSERT_EXCLUSIVE_WRITER(x)
/**
* swap - swap values of @a and @b
* @a: first value
* @b: second value
*/
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
struct kref {
refcount_t refcount;
};
@ -1468,4 +1476,12 @@ static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
(void)vma;
}
static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
{
/* Changing an anonymous vma with this is illegal */
get_file(file);
swap(vma->vm_file, file);
fput(file);
}
#endif /* __MM_VMA_INTERNAL_H */