sched_ext: Inline create_dsq() into scx_bpf_create_dsq()

create_dsq() is only used by scx_bpf_create_dsq() and the separation gets in
the way of making dsq_hash per scx_sched. Inline it into
scx_bpf_create_dsq(). While at it, add unlikely() around
SCX_DSQ_FLAG_BUILTIN test.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
Acked-by: Changwoo Min <changwoo@igalia.com>
This commit is contained in:
Tejun Heo
2025-04-29 08:40:10 -10:00
parent 17108735b4
commit 392b7e08de

View File

@@ -4196,29 +4196,6 @@ static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
dsq->id = dsq_id;
}
static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
{
struct scx_dispatch_q *dsq;
int ret;
if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
return ERR_PTR(-EINVAL);
dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
if (!dsq)
return ERR_PTR(-ENOMEM);
init_dsq(dsq, dsq_id);
ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node,
dsq_hash_params);
if (ret) {
kfree(dsq);
return ERR_PTR(ret);
}
return dsq;
}
static void free_dsq_irq_workfn(struct irq_work *irq_work)
{
struct llist_node *to_free = llist_del_all(&dsqs_to_free);
@@ -6712,10 +6689,27 @@ __bpf_kfunc_start_defs();
*/
__bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
{
struct scx_dispatch_q *dsq;
s32 ret;
if (unlikely(node >= (int)nr_node_ids ||
(node < 0 && node != NUMA_NO_NODE)))
return -EINVAL;
return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN))
return -EINVAL;
dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
if (!dsq)
return -ENOMEM;
init_dsq(dsq, dsq_id);
ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node,
dsq_hash_params);
if (ret)
kfree(dsq);
return ret;
}
__bpf_kfunc_end_defs();