Merge tag 'vfs-7.0-rc3.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:

 - kthread: consolidate kthread exit paths to prevent use-after-free

 - iomap:
    - don't mark folio uptodate if read IO has bytes pending
    - don't report direct-io retries to fserror
    - reject delalloc mappings during writeback

 - ns: tighten visibility checks

 - netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict
   sequence

* tag 'vfs-7.0-rc3.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  iomap: reject delalloc mappings during writeback
  iomap: don't mark folio uptodate if read IO has bytes pending
  selftests: fix mntns iteration selftests
  nstree: tighten permission checks for listing
  nsfs: tighten permission checks for handle opening
  nsfs: tighten permission checks for ns iteration ioctls
  netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict sequence
  kthread: consolidate kthread exit paths to prevent use-after-free
  iomap: don't report direct-io retries to fserror
This commit is contained in:
Linus Torvalds
2026-03-04 15:03:16 -08:00
16 changed files with 326 additions and 160 deletions

View File

@@ -896,11 +896,16 @@ static void synchronize_group_exit(struct task_struct *tsk, long code)
void __noreturn do_exit(long code)
{
struct task_struct *tsk = current;
struct kthread *kthread;
int group_dead;
WARN_ON(irqs_disabled());
WARN_ON(tsk->plug);
kthread = tsk_is_kthread(tsk);
if (unlikely(kthread))
kthread_do_exit(kthread, code);
kcov_task_exit(tsk);
kmsan_task_exit(tsk);
@@ -1013,6 +1018,7 @@ void __noreturn do_exit(long code)
lockdep_free_task(tsk);
do_task_dead();
}
EXPORT_SYMBOL(do_exit);
void __noreturn make_task_dead(int signr)
{

View File

@@ -85,24 +85,6 @@ static inline struct kthread *to_kthread(struct task_struct *k)
return k->worker_private;
}
/*
* Variant of to_kthread() that doesn't assume @p is a kthread.
*
* When "(p->flags & PF_KTHREAD)" is set the task is a kthread and will
* always remain a kthread. For kthreads p->worker_private always
* points to a struct kthread. For tasks that are not kthreads
* p->worker_private is used to point to other things.
*
* Return NULL for any task that is not a kthread.
*/
static inline struct kthread *__to_kthread(struct task_struct *p)
{
void *kthread = p->worker_private;
if (kthread && !(p->flags & PF_KTHREAD))
kthread = NULL;
return kthread;
}
void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
{
struct kthread *kthread = to_kthread(tsk);
@@ -193,7 +175,7 @@ EXPORT_SYMBOL_GPL(kthread_should_park);
bool kthread_should_stop_or_park(void)
{
struct kthread *kthread = __to_kthread(current);
struct kthread *kthread = tsk_is_kthread(current);
if (!kthread)
return false;
@@ -234,7 +216,7 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
*/
void *kthread_func(struct task_struct *task)
{
struct kthread *kthread = __to_kthread(task);
struct kthread *kthread = tsk_is_kthread(task);
if (kthread)
return kthread->threadfn;
return NULL;
@@ -266,7 +248,7 @@ EXPORT_SYMBOL_GPL(kthread_data);
*/
void *kthread_probe_data(struct task_struct *task)
{
struct kthread *kthread = __to_kthread(task);
struct kthread *kthread = tsk_is_kthread(task);
void *data = NULL;
if (kthread)
@@ -309,19 +291,8 @@ void kthread_parkme(void)
}
EXPORT_SYMBOL_GPL(kthread_parkme);
/**
* kthread_exit - Cause the current kthread return @result to kthread_stop().
* @result: The integer value to return to kthread_stop().
*
* While kthread_exit can be called directly, it exists so that
* functions which do some additional work in non-modular code such as
* module_put_and_kthread_exit can be implemented.
*
* Does not return.
*/
void __noreturn kthread_exit(long result)
void kthread_do_exit(struct kthread *kthread, long result)
{
struct kthread *kthread = to_kthread(current);
kthread->result = result;
if (!list_empty(&kthread->affinity_node)) {
mutex_lock(&kthread_affinity_lock);
@@ -333,9 +304,7 @@ void __noreturn kthread_exit(long result)
kthread->preferred_affinity = NULL;
}
}
do_exit(0);
}
EXPORT_SYMBOL(kthread_exit);
/**
* kthread_complete_and_exit - Exit the current kthread.
@@ -683,7 +652,7 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
bool kthread_is_per_cpu(struct task_struct *p)
{
struct kthread *kthread = __to_kthread(p);
struct kthread *kthread = tsk_is_kthread(p);
if (!kthread)
return false;

View File

@@ -309,3 +309,9 @@ void __ns_ref_active_get(struct ns_common *ns)
return;
}
}
bool may_see_all_namespaces(void)
{
return (task_active_pid_ns(current) == &init_pid_ns) &&
ns_capable_noaudit(init_pid_ns.user_ns, CAP_SYS_ADMIN);
}

View File

@@ -515,32 +515,11 @@ static inline bool __must_check ns_requested(const struct klistns *kls,
static inline bool __must_check may_list_ns(const struct klistns *kls,
struct ns_common *ns)
{
if (kls->user_ns) {
if (kls->userns_capable)
return true;
} else {
struct ns_common *owner;
struct user_namespace *user_ns;
owner = ns_owner(ns);
if (owner)
user_ns = to_user_ns(owner);
else
user_ns = &init_user_ns;
if (ns_capable_noaudit(user_ns, CAP_SYS_ADMIN))
return true;
}
if (kls->user_ns && kls->userns_capable)
return true;
if (is_current_namespace(ns))
return true;
if (ns->ns_type != CLONE_NEWUSER)
return false;
if (ns_capable_noaudit(to_user_ns(ns), CAP_SYS_ADMIN))
return true;
return false;
return may_see_all_namespaces();
}
static inline void ns_put(struct ns_common *ns)
@@ -600,7 +579,7 @@ static ssize_t do_listns_userns(struct klistns *kls)
ret = 0;
head = &to_ns_common(kls->user_ns)->ns_owner_root.ns_list_head;
kls->userns_capable = ns_capable_noaudit(kls->user_ns, CAP_SYS_ADMIN);
kls->userns_capable = may_see_all_namespaces();
rcu_read_lock();