treewide: Replace kmalloc with kmalloc_obj for non-scalar types

This is the result of running the Coccinelle script from
scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to
avoid scalar types (which need careful case-by-case checking), and
instead replace kmalloc-family calls that allocate struct or union
object instances:

Single allocations:	kmalloc(sizeof(TYPE), ...)
are replaced with:	kmalloc_obj(TYPE, ...)

Array allocations:	kmalloc_array(COUNT, sizeof(TYPE), ...)
are replaced with:	kmalloc_objs(TYPE, COUNT, ...)

Flex array allocations:	kmalloc(struct_size(PTR, FAM, COUNT), ...)
are replaced with:	kmalloc_flex(*PTR, FAM, COUNT, ...)

(where TYPE may also be *VAR)

The resulting allocations no longer return "void *", instead returning
"TYPE *".

Signed-off-by: Kees Cook <kees@kernel.org>
This commit is contained in:
Kees Cook
2026-02-20 23:49:23 -08:00
parent d39a1d7486
commit 69050f8d6d
8016 changed files with 20055 additions and 20913 deletions

View File

@@ -500,7 +500,7 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
nr_maps++;
}
diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
diag = kzalloc_flex(*diag, maps, nr_maps, GFP_KERNEL);
if (!diag)
return ERR_PTR(-ENOMEM);

View File

@@ -273,7 +273,7 @@ static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
{
struct netdev_name_node *name_node;
name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
name_node = kmalloc_obj(*name_node, GFP_KERNEL);
if (!name_node)
return NULL;
INIT_HLIST_NODE(&name_node->hlist);
@@ -6510,8 +6510,7 @@ struct flush_backlogs {
static struct flush_backlogs *flush_backlogs_alloc(void)
{
return kmalloc(struct_size_t(struct flush_backlogs, w, nr_cpu_ids),
GFP_KERNEL);
return kmalloc_flex(struct flush_backlogs, w, nr_cpu_ids, GFP_KERNEL);
}
static struct flush_backlogs *flush_backlogs_fallback;
@@ -8694,7 +8693,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
return 0;
}
adj = kmalloc(sizeof(*adj), GFP_KERNEL);
adj = kmalloc_obj(*adj, GFP_KERNEL);
if (!adj)
return -ENOMEM;
@@ -9134,8 +9133,8 @@ static int netdev_offload_xstats_enable_l3(struct net_device *dev,
int err;
int rc;
dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
GFP_KERNEL);
dev->offload_xstats_l3 = kzalloc_obj(*dev->offload_xstats_l3,
GFP_KERNEL);
if (!dev->offload_xstats_l3)
return -ENOMEM;
@@ -10660,7 +10659,7 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
return -EINVAL;
}
link = kzalloc(sizeof(*link), GFP_USER);
link = kzalloc_obj(*link, GFP_USER);
if (!link) {
err = -ENOMEM;
goto unlock;
@@ -11941,7 +11940,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
#ifdef CONFIG_NET_CLS_ACT
if (queue)
return queue;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
queue = kzalloc_obj(*queue, GFP_KERNEL);
if (!queue)
return NULL;
netdev_init_one_queue(dev, queue, NULL);
@@ -12016,8 +12015,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
maxqs = max(txqs, rxqs);
dev = kvzalloc(struct_size(dev, priv, sizeof_priv),
GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
dev = kvzalloc_flex(*dev, priv, sizeof_priv,
GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!dev)
return NULL;
@@ -12088,11 +12087,11 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->real_num_rx_queues = rxqs;
if (netif_alloc_rx_queues(dev))
goto free_all;
dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT);
dev->ethtool = kzalloc_obj(*dev->ethtool, GFP_KERNEL_ACCOUNT);
if (!dev->ethtool)
goto free_all;
dev->cfg = kzalloc(sizeof(*dev->cfg), GFP_KERNEL_ACCOUNT);
dev->cfg = kzalloc_obj(*dev->cfg, GFP_KERNEL_ACCOUNT);
if (!dev->cfg)
goto free_all;
dev->cfg_pending = dev->cfg;
@@ -12858,7 +12857,7 @@ static struct hlist_head * __net_init netdev_create_hash(void)
int i;
struct hlist_head *hash;
hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
hash = kmalloc_objs(*hash, NETDEV_HASHENTRIES, GFP_KERNEL);
if (hash != NULL)
for (i = 0; i < NETDEV_HASHENTRIES; i++)
INIT_HLIST_HEAD(&hash[i]);

View File

@@ -241,9 +241,9 @@ net_devmem_bind_dmabuf(struct net_device *dev,
}
if (direction == DMA_TO_DEVICE) {
binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE,
sizeof(struct net_iov *),
GFP_KERNEL);
binding->tx_vec = kvmalloc_objs(struct net_iov *,
dmabuf->size / PAGE_SIZE,
GFP_KERNEL);
if (!binding->tx_vec) {
err = -ENOMEM;
goto err_unmap;
@@ -289,9 +289,9 @@ net_devmem_bind_dmabuf(struct net_device *dev,
goto err_free_chunks;
}
owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
sizeof(*owner->area.niovs),
GFP_KERNEL);
owner->area.niovs = kvmalloc_objs(*owner->area.niovs,
owner->area.num_niovs,
GFP_KERNEL);
if (!owner->area.niovs) {
err = -ENOMEM;
goto err_free_chunks;

View File

@@ -306,8 +306,8 @@ net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
struct net_dm_hw_entries *hw_entries;
unsigned long flags;
hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit),
GFP_KERNEL);
hw_entries = kzalloc_flex(*hw_entries, entries, dm_hit_limit,
GFP_KERNEL);
if (!hw_entries) {
/* If the memory allocation failed, we try to perform another
* allocation in 1/10 second. Otherwise, the probe function
@@ -856,7 +856,7 @@ net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata)
const char *trap_group_name;
const char *trap_name;
hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC);
hw_metadata = kzalloc_obj(*hw_metadata, GFP_ATOMIC);
if (!hw_metadata)
return NULL;
@@ -1583,7 +1583,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
case NETDEV_REGISTER:
if (WARN_ON_ONCE(rtnl_dereference(dev->dm_private)))
break;
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
stat = kzalloc_obj(*stat, GFP_KERNEL);
if (!stat)
break;

View File

@@ -191,7 +191,7 @@ EXPORT_SYMBOL(dst_release_immediate);
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{
struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
struct dst_metrics *p = kmalloc_obj(*p, GFP_ATOMIC);
if (p) {
struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
@@ -295,8 +295,7 @@ struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
{
struct metadata_dst *md_dst;
md_dst = kmalloc(struct_size(md_dst, u.tun_info.options, optslen),
flags);
md_dst = kmalloc_flex(*md_dst, u.tun_info.options, optslen, flags);
if (!md_dst)
return NULL;

View File

@@ -247,7 +247,7 @@ struct failover *failover_register(struct net_device *dev,
if (dev->type != ARPHRD_ETHER)
return ERR_PTR(-EINVAL);
failover = kzalloc(sizeof(*failover), GFP_KERNEL);
failover = kzalloc_obj(*failover, GFP_KERNEL);
if (!failover)
return ERR_PTR(-ENOMEM);

View File

@@ -600,8 +600,7 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
if (new_prog) {
first_insn = new_prog->insnsi;
addrs = kcalloc(len, sizeof(*addrs),
GFP_KERNEL | __GFP_NOWARN);
addrs = kzalloc_objs(*addrs, len, GFP_KERNEL | __GFP_NOWARN);
if (!addrs)
return -ENOMEM;
}
@@ -1162,7 +1161,7 @@ static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
unsigned int fsize = bpf_classic_proglen(fprog);
struct sock_fprog_kern *fkprog;
fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
fp->orig_prog = kmalloc_obj(*fkprog, GFP_KERNEL);
if (!fp->orig_prog)
return -ENOMEM;
@@ -1482,7 +1481,7 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
fp = kmalloc(sizeof(*fp), GFP_KERNEL);
fp = kmalloc_obj(*fp, GFP_KERNEL);
if (!fp)
return -ENOMEM;

View File

@@ -12,8 +12,7 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions)
struct flow_rule *rule;
int i;
rule = kzalloc(struct_size(rule, action.entries, num_actions),
GFP_KERNEL);
rule = kzalloc_flex(*rule, action.entries, num_actions, GFP_KERNEL);
if (!rule)
return NULL;
@@ -33,8 +32,8 @@ struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
struct flow_offload_action *fl_action;
int i;
fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
GFP_KERNEL);
fl_action = kzalloc_flex(*fl_action, action.entries, num_actions,
GFP_KERNEL);
if (!fl_action)
return NULL;
@@ -264,7 +263,7 @@ struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
{
struct flow_block_cb *block_cb;
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
block_cb = kzalloc_obj(*block_cb, GFP_KERNEL);
if (!block_cb)
return ERR_PTR(-ENOMEM);
@@ -391,7 +390,7 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
{
struct flow_indr_dev *indr_dev;
indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
indr_dev = kmalloc_obj(*indr_dev, GFP_KERNEL);
if (!indr_dev)
return NULL;
@@ -571,7 +570,7 @@ static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
if (info)
return -EEXIST;
info = kzalloc(sizeof(*info), GFP_KERNEL);
info = kzalloc_obj(*info, GFP_KERNEL);
if (!info)
return -ENOMEM;

View File

@@ -154,7 +154,7 @@ int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
if (parm->ewma_log == 0 || parm->ewma_log >= 31)
return -EINVAL;
est = kzalloc(sizeof(*est), GFP_KERNEL);
est = kzalloc_obj(*est, GFP_KERNEL);
if (!est)
return -ENOBUFS;

View File

@@ -132,7 +132,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
* because we might be called from cleanup_net(), and we
* definitely do not want to block this critical task.
*/
defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN);
defer = kmalloc_obj(*defer, GFP_KERNEL | __GFP_NOWARN);
if (likely(defer)) {
defer->ptr = gcells->cells;
call_rcu(&defer->rcu, percpu_free_defer_callback);

View File

@@ -562,7 +562,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
struct neigh_hash_table *ret;
int i;
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
ret = kmalloc_obj(*ret, GFP_ATOMIC);
if (!ret)
return NULL;

View File

@@ -492,7 +492,7 @@ static struct net *net_alloc(void)
goto out_free;
#ifdef CONFIG_KEYS
net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
net->key_domain = kzalloc_obj(struct key_tag, GFP_KERNEL);
if (!net->key_domain)
goto out_free_2;
refcount_set(&net->key_domain->usage, 1);

View File

@@ -32,7 +32,7 @@ cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cgroup_cls_state *cs;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
cs = kzalloc_obj(*cs, GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);

View File

@@ -565,7 +565,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
npinfo = rtnl_dereference(ndev->npinfo);
if (!npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
npinfo = kmalloc_obj(*npinfo, GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
goto out;

View File

@@ -135,7 +135,7 @@ cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cgroup_subsys_state *css;
css = kzalloc(sizeof(*css), GFP_KERNEL);
css = kzalloc_obj(*css, GFP_KERNEL);
if (!css)
return ERR_PTR(-ENOMEM);

View File

@@ -414,7 +414,7 @@ static int rtnl_register_internal(struct module *owner,
if (!link)
goto unlock;
} else {
link = kzalloc(sizeof(*link), GFP_KERNEL);
link = kzalloc_obj(*link, GFP_KERNEL);
if (!link)
goto unlock;
}
@@ -3969,7 +3969,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
int ops_srcu_index;
int ret;
tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
tbs = kmalloc_obj(*tbs, GFP_KERNEL);
if (!tbs)
return -ENOMEM;

View File

@@ -83,7 +83,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (!fpl)
{
fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL_ACCOUNT);
fpl = kmalloc_obj(struct scm_fp_list, GFP_KERNEL_ACCOUNT);
if (!fpl)
return -ENOMEM;
*fplp = fpl;

View File

@@ -237,7 +237,7 @@ static int __net_test_loopback(struct net_device *ndev,
struct sk_buff *skb = NULL;
int ret = 0;
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
tpriv = kzalloc_obj(*tpriv, GFP_KERNEL);
if (!tpriv)
return -ENOMEM;

View File

@@ -522,7 +522,7 @@ static struct sk_msg *alloc_sk_msg(gfp_t gfp)
{
struct sk_msg *msg;
msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
msg = kzalloc_obj(*msg, gfp | __GFP_NOWARN);
if (unlikely(!msg))
return NULL;
sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);

View File

@@ -1097,7 +1097,7 @@ sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
return -EINVAL;
num_tokens = optlen / sizeof(*tokens);
tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL);
tokens = kvmalloc_objs(*tokens, num_tokens, GFP_KERNEL);
if (!tokens)
return -ENOMEM;

View File

@@ -177,7 +177,7 @@ void sock_diag_broadcast_destroy(struct sock *sk)
{
/* Note, this function is often called from an interrupt context. */
struct broadcast_sk *bsk =
kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC);
kmalloc_obj(struct broadcast_sk, GFP_ATOMIC);
if (!bsk)
return sk_destruct(sk);
bsk->sk = sk;

View File

@@ -1858,7 +1858,7 @@ int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
goto out;
}
sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER);
sockmap_link = kzalloc_obj(*sockmap_link, GFP_USER);
if (!sockmap_link) {
ret = -ENOMEM;
goto out;

View File

@@ -175,7 +175,7 @@ static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
{
struct sock_reuseport *reuse;
reuse = kzalloc(struct_size(reuse, socks, max_socks), GFP_ATOMIC);
reuse = kzalloc_flex(*reuse, socks, max_socks, GFP_ATOMIC);
if (!reuse)
return NULL;

View File

@@ -214,7 +214,7 @@ static int __mem_id_init_hash_table(void)
if (unlikely(mem_id_init))
return 0;
rht = kzalloc(sizeof(*rht), GFP_KERNEL);
rht = kzalloc_obj(*rht, GFP_KERNEL);
if (!rht)
return -ENOMEM;
@@ -297,7 +297,7 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
return ERR_PTR(ret);
}
xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
xdp_alloc = kzalloc_obj(*xdp_alloc, gfp);
if (!xdp_alloc)
return ERR_PTR(-ENOMEM);