sched_ext: Add the @sch parameter to ext_idle helpers

In preparation for multiple scheduler support, add the @sch parameter to
validate_node(), check_builtin_idle_enabled() and select_cpu_from_kfunc(),
and update their callers to read $scx_root, verify that it's not NULL and
pass it in. The passed in @sch parameter is not used yet.

Reviewed-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo 2025-09-23 09:03:26 -10:00
parent fc6a93aa62
commit 2407bae23d
1 changed files with 94 additions and 15 deletions

View File

@ -819,7 +819,7 @@ void scx_idle_disable(void)
* Helpers that can be called from the BPF scheduler.
*/
static int validate_node(int node)
static int validate_node(struct scx_sched *sch, int node)
{
if (!static_branch_likely(&scx_builtin_idle_per_node)) {
scx_kf_error("per-node idle tracking is disabled");
@ -847,7 +847,7 @@ static int validate_node(int node)
__bpf_kfunc_start_defs();
static bool check_builtin_idle_enabled(void)
static bool check_builtin_idle_enabled(struct scx_sched *sch)
{
if (static_branch_likely(&scx_builtin_idle_enabled))
return true;
@ -856,7 +856,8 @@ static bool check_builtin_idle_enabled(void)
return false;
}
static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p,
s32 prev_cpu, u64 wake_flags,
const struct cpumask *allowed, u64 flags)
{
struct rq *rq;
@ -866,7 +867,7 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
if (!kf_cpu_valid(prev_cpu, NULL))
return -EINVAL;
if (!check_builtin_idle_enabled())
if (!check_builtin_idle_enabled(sch))
return -EBUSY;
/*
@ -946,15 +947,21 @@ __bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
u64 wake_flags, bool *is_idle)
{
struct scx_sched *sch;
s32 cpu;
cpu = select_cpu_from_kfunc(p, prev_cpu, wake_flags, NULL, 0);
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return -ENODEV;
cpu = select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags, NULL, 0);
if (cpu >= 0) {
*is_idle = true;
return cpu;
}
*is_idle = false;
return prev_cpu;
}
@ -981,7 +988,16 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
__bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
const struct cpumask *cpus_allowed, u64 flags)
{
return select_cpu_from_kfunc(p, prev_cpu, wake_flags, cpus_allowed, flags);
struct scx_sched *sch;
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return -ENODEV;
return select_cpu_from_kfunc(sch, p, prev_cpu, wake_flags,
cpus_allowed, flags);
}
/**
@ -995,7 +1011,15 @@ __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
{
node = validate_node(node);
struct scx_sched *sch;
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return cpu_none_mask;
node = validate_node(sch, node);
if (node < 0)
return cpu_none_mask;
@ -1011,12 +1035,20 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
{
struct scx_sched *sch;
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return cpu_none_mask;
if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
scx_kf_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
return cpu_none_mask;
}
if (!check_builtin_idle_enabled())
if (!check_builtin_idle_enabled(sch))
return cpu_none_mask;
return idle_cpumask(NUMA_NO_NODE)->cpu;
@ -1034,7 +1066,15 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
{
node = validate_node(node);
struct scx_sched *sch;
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return cpu_none_mask;
node = validate_node(sch, node);
if (node < 0)
return cpu_none_mask;
@ -1054,12 +1094,20 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
{
struct scx_sched *sch;
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return cpu_none_mask;
if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
scx_kf_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
return cpu_none_mask;
}
if (!check_builtin_idle_enabled())
if (!check_builtin_idle_enabled(sch))
return cpu_none_mask;
if (sched_smt_active())
@ -1095,7 +1143,15 @@ __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
*/
__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
{
if (!check_builtin_idle_enabled())
struct scx_sched *sch;
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return false;
if (!check_builtin_idle_enabled(sch))
return false;
if (!kf_cpu_valid(cpu, NULL))
@ -1126,7 +1182,15 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
__bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(const struct cpumask *cpus_allowed,
int node, u64 flags)
{
node = validate_node(node);
struct scx_sched *sch;
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return -ENODEV;
node = validate_node(sch, node);
if (node < 0)
return node;
@ -1158,12 +1222,20 @@ __bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(const struct cpumask *cpus_allowed,
__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
u64 flags)
{
struct scx_sched *sch;
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return -ENODEV;
if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
scx_kf_error("per-node idle tracking is enabled");
return -EBUSY;
}
if (!check_builtin_idle_enabled())
if (!check_builtin_idle_enabled(sch))
return -EBUSY;
return scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
@ -1193,9 +1265,16 @@ __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
__bpf_kfunc s32 scx_bpf_pick_any_cpu_node(const struct cpumask *cpus_allowed,
int node, u64 flags)
{
struct scx_sched *sch;
s32 cpu;
node = validate_node(node);
guard(rcu)();
sch = rcu_dereference(scx_root);
if (unlikely(!sch))
return -ENODEV;
node = validate_node(sch, node);
if (node < 0)
return node;