sched_ext: Merge branch 'for-6.16-fixes' into for-6.17

Pull sched_ext/for-6.16-fixes to receive:

 c50784e99f ("sched_ext: Make scx_group_set_weight() always update tg->scx.weight")
 33796b9187 ("sched_ext, sched/core: Don't call scx_group_set_weight() prematurely from sched_create_group()")

which are needed to implement CPU bandwidth control interface support.

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo
2025-06-20 17:01:21 -10:00
10 changed files with 37 additions and 17 deletions

View File

@@ -22171,7 +22171,7 @@ R: Tejun Heo <tj@kernel.org>
R: David Vernet <void@manifault.com>
R: Andrea Righi <arighi@nvidia.com>
R: Changwoo Min <changwoo@igalia.com>
L: linux-kernel@vger.kernel.org
L: sched-ext@lists.linux.dev
S: Maintained
W: https://github.com/sched-ext/scx
T: git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git

View File

@@ -521,6 +521,15 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
return -EINVAL;
}
/*
* Map complete page to the paste address. So the user
* space should pass 0ULL to the offset parameter.
*/
if (vma->vm_pgoff) {
pr_debug("Page offset unsupported to map paste address\n");
return -EINVAL;
}
/* Ensure instance has an open send window */
if (!txwin) {
pr_err("No send window open?\n");

View File

@@ -48,11 +48,15 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct memtrace_entry *ent = filp->private_data;
unsigned long ent_nrpages = ent->size >> PAGE_SHIFT;
unsigned long vma_nrpages = vma_pages(vma);
if (ent->size < vma->vm_end - vma->vm_start)
/* The requested page offset should be within object's page count */
if (vma->vm_pgoff >= ent_nrpages)
return -EINVAL;
if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
/* The requested mapping range should remain within the bounds */
if (vma_nrpages > ent_nrpages - vma->vm_pgoff)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

View File

@@ -236,7 +236,7 @@ struct key {
#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
#define KEY_FLAG_FINAL_PUT 10 /* set if final put has happened on key */
#define KEY_FLAG_USER_ALIVE 10 /* set if final put has not happened on key yet */
/* the key type and key description string
* - the desc is used to match a key against search criteria

View File

@@ -8437,7 +8437,7 @@ void __init sched_init(void)
init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_EXT_GROUP_SCHED
root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
scx_tg_init(&root_task_group);
#endif /* CONFIG_EXT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
@@ -8872,7 +8872,7 @@ struct task_group *sched_create_group(struct task_group *parent)
if (!alloc_rt_sched_group(tg, parent))
goto err;
scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
scx_tg_init(tg);
alloc_uclamp_sched_group(tg, parent);
return tg;

View File

@@ -4056,6 +4056,11 @@ bool scx_can_stop_tick(struct rq *rq)
DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
static bool scx_cgroup_enabled;
void scx_tg_init(struct task_group *tg)
{
tg->scx_weight = CGROUP_WEIGHT_DFL;
}
int scx_tg_online(struct task_group *tg)
{
struct scx_sched *sch = scx_root;
@@ -4205,12 +4210,12 @@ void scx_group_set_weight(struct task_group *tg, unsigned long weight)
percpu_down_read(&scx_cgroup_rwsem);
if (scx_cgroup_enabled && tg->scx_weight != weight) {
if (SCX_HAS_OP(sch, cgroup_set_weight))
SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL,
tg_cgrp(tg), weight);
tg->scx_weight = weight;
}
if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_weight) &&
tg->scx_weight != weight)
SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_weight, NULL,
tg_cgrp(tg), weight);
tg->scx_weight = weight;
percpu_up_read(&scx_cgroup_rwsem);
}

View File

@@ -95,6 +95,7 @@ static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
#ifdef CONFIG_CGROUP_SCHED
#ifdef CONFIG_EXT_GROUP_SCHED
void scx_tg_init(struct task_group *tg);
int scx_tg_online(struct task_group *tg);
void scx_tg_offline(struct task_group *tg);
int scx_cgroup_can_attach(struct cgroup_taskset *tset);
@@ -104,6 +105,7 @@ void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
void scx_group_set_idle(struct task_group *tg, bool idle);
#else /* CONFIG_EXT_GROUP_SCHED */
static inline void scx_tg_init(struct task_group *tg) {}
static inline int scx_tg_online(struct task_group *tg) { return 0; }
static inline void scx_tg_offline(struct task_group *tg) {}
static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }

View File

@@ -4,7 +4,6 @@ menu "Data Access Monitoring"
config DAMON
bool "DAMON: Data Access Monitoring Framework"
default y
help
This builds a framework that allows kernel subsystems to monitor
access frequency of each memory region. The information can be useful

View File

@@ -218,8 +218,8 @@ continue_scanning:
key = rb_entry(cursor, struct key, serial_node);
cursor = rb_next(cursor);
if (test_bit(KEY_FLAG_FINAL_PUT, &key->flags)) {
smp_mb(); /* Clobber key->user after FINAL_PUT seen. */
if (!test_bit_acquire(KEY_FLAG_USER_ALIVE, &key->flags)) {
/* Clobber key->user after final put seen. */
goto found_unreferenced_key;
}

View File

@@ -298,6 +298,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->restrict_link = restrict_link;
key->last_used_at = ktime_get_real_seconds();
key->flags |= 1 << KEY_FLAG_USER_ALIVE;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
if (flags & KEY_ALLOC_BUILT_IN)
@@ -658,8 +659,8 @@ void key_put(struct key *key)
key->user->qnbytes -= key->quotalen;
spin_unlock_irqrestore(&key->user->lock, flags);
}
smp_mb(); /* key->user before FINAL_PUT set. */
set_bit(KEY_FLAG_FINAL_PUT, &key->flags);
/* Mark key as safe for GC after key->user done. */
clear_bit_unlock(KEY_FLAG_USER_ALIVE, &key->flags);
schedule_work(&key_gc_work);
}
}