mm: change vma_start_read() to drop RCU lock on failure

vma_start_read() can drop and reacquire RCU lock in certain failure cases.
It's not apparent that the RCU session started by the caller of this
function might be interrupted when vma_start_read() fails to lock the vma.
This might become a source of subtle bugs and to prevent that we change
the locking rules for vma_start_read() to drop RCU read lock upon failure.
This way it's more obvious that RCU-protected objects are unsafe after
vma locking fails.

Link: https://lkml.kernel.org/r/20250804233349.1278678-2-surenb@google.com
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Tested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Jann Horn <jannh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Suren Baghdasaryan 2025-08-04 16:33:49 -07:00 committed by Andrew Morton
parent cc483b3288
commit 0b16f8bed1
1 changed files with 45 additions and 39 deletions

View File

@ -136,15 +136,16 @@ void vma_mark_detached(struct vm_area_struct *vma)
* Returns the vma on success, NULL on failure to lock and EAGAIN if vma got
* detached.
*
* WARNING! The vma passed to this function cannot be used if the function
* fails to lock it because in certain cases RCU lock is dropped and then
* reacquired. Once RCU lock is dropped the vma can be concurently freed.
* IMPORTANT: RCU lock must be held upon entering the function, but upon error
* IT IS RELEASED. The caller must handle this correctly.
*/
static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
struct vm_area_struct *vma)
{
struct mm_struct *other_mm;
int oldcnt;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
/*
* Check before locking. A race might cause false locked result.
* We can use READ_ONCE() for the mm_lock_seq here, and don't need
@ -152,8 +153,10 @@ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
* we don't rely on for anything - the mm_lock_seq read against which we
* need ordering is below.
*/
if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence))
return NULL;
if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) {
vma = NULL;
goto err;
}
/*
* If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire()
@ -164,34 +167,14 @@ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
VMA_REF_LIMIT))) {
/* return EAGAIN if vma got detached from under us */
return oldcnt ? NULL : ERR_PTR(-EAGAIN);
vma = oldcnt ? NULL : ERR_PTR(-EAGAIN);
goto err;
}
rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
/*
* If vma got attached to another mm from under us, that mm is not
* stable and can be freed in the narrow window after vma->vm_refcnt
* is dropped and before rcuwait_wake_up(mm) is called. Grab it before
* releasing vma->vm_refcnt.
*/
if (unlikely(vma->vm_mm != mm)) {
/* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
struct mm_struct *other_mm = vma->vm_mm;
/*
* __mmdrop() is a heavy operation and we don't need RCU
* protection here. Release RCU lock during these operations.
* We reinstate the RCU read lock as the caller expects it to
* be held when this function returns even on error.
*/
rcu_read_unlock();
mmgrab(other_mm);
vma_refcount_put(vma);
mmdrop(other_mm);
rcu_read_lock();
return NULL;
}
if (unlikely(vma->vm_mm != mm))
goto err_unstable;
/*
* Overflow of vm_lock_seq/mm_lock_seq might produce false locked result.
@ -206,10 +189,31 @@ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
*/
if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
vma_refcount_put(vma);
return NULL;
vma = NULL;
goto err;
}
return vma;
err:
rcu_read_unlock();
return vma;
err_unstable:
/*
* If vma got attached to another mm from under us, that mm is not
* stable and can be freed in the narrow window after vma->vm_refcnt
* is dropped and before rcuwait_wake_up(mm) is called. Grab it before
* releasing vma->vm_refcnt.
*/
other_mm = vma->vm_mm; /* use a copy as vma can be freed after we drop vm_refcnt */
/* __mmdrop() is a heavy operation, do it after dropping RCU lock. */
rcu_read_unlock();
mmgrab(other_mm);
vma_refcount_put(vma);
mmdrop(other_mm);
return NULL;
}
/*
@ -223,11 +227,13 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
MA_STATE(mas, &mm->mm_mt, address, address);
struct vm_area_struct *vma;
rcu_read_lock();
retry:
rcu_read_lock();
vma = mas_walk(&mas);
if (!vma)
if (!vma) {
rcu_read_unlock();
goto inval;
}
vma = vma_start_read(mm, vma);
if (IS_ERR_OR_NULL(vma)) {
@ -247,18 +253,17 @@ retry:
* From here on, we can access the VMA without worrying about which
* fields are accessible for RCU readers.
*/
rcu_read_unlock();
/* Check if the vma we locked is the right one. */
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
goto inval_end_read;
if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
vma_end_read(vma);
goto inval;
}
rcu_read_unlock();
return vma;
inval_end_read:
vma_end_read(vma);
inval:
rcu_read_unlock();
count_vm_vma_lock_event(VMA_LOCK_ABORT);
return NULL;
}
@ -313,6 +318,7 @@ retry:
*/
if (PTR_ERR(vma) == -EAGAIN) {
/* reset to search from the last address */
rcu_read_lock();
vma_iter_set(vmi, from_addr);
goto retry;
}
@ -342,9 +348,9 @@ retry:
return vma;
fallback_unlock:
rcu_read_unlock();
vma_end_read(vma);
fallback:
rcu_read_unlock();
vma = lock_next_vma_under_mmap_lock(mm, vmi, from_addr);
rcu_read_lock();
/* Reinitialize the iterator after re-entering rcu read section */