mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-01 22:37:41 -04:00
net: qrtr: replace qrtr_tx_flow radix_tree with xarray to fix memory leak
__radix_tree_create() allocates and links intermediate nodes into the
tree one by one. If a subsequent allocation fails, the already-linked
nodes remain in the tree with no corresponding leaf entry. These orphaned
internal nodes are never reclaimed because radix_tree_for_each_slot()
only visits slots containing leaf values.
The radix_tree API is deprecated in favor of xarray. As suggested by
Matthew Wilcox, migrate qrtr_tx_flow from radix_tree to xarray instead
of fixing the radix_tree itself [1]. xarray properly handles cleanup of
internal nodes — xa_destroy() frees all internal xarray nodes when the
qrtr_node is released, preventing the leak.
[1] https://lore.kernel.org/all/20260225071623.41275-1-jiayuan.chen@linux.dev/T/
Reported-by: syzbot+006987d1be3586e13555@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/000000000000bfba3a060bf4ffcf@google.com/T/
Fixes: 5fdeb0d372 ("net: qrtr: Implement outgoing flow control")
Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20260324080645.290197-1-jiayuan.chen@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
04f272188f
commit
2428083101
@@ -118,7 +118,7 @@ static DEFINE_XARRAY_ALLOC(qrtr_ports);
|
||||
* @ep: endpoint
|
||||
* @ref: reference count for node
|
||||
* @nid: node id
|
||||
* @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port
|
||||
* @qrtr_tx_flow: xarray of qrtr_tx_flow, keyed by node << 32 | port
|
||||
* @qrtr_tx_lock: lock for qrtr_tx_flow inserts
|
||||
* @rx_queue: receive queue
|
||||
* @item: list item for broadcast list
|
||||
@@ -129,7 +129,7 @@ struct qrtr_node {
|
||||
struct kref ref;
|
||||
unsigned int nid;
|
||||
|
||||
struct radix_tree_root qrtr_tx_flow;
|
||||
struct xarray qrtr_tx_flow;
|
||||
struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */
|
||||
|
||||
struct sk_buff_head rx_queue;
|
||||
@@ -172,6 +172,7 @@ static void __qrtr_node_release(struct kref *kref)
|
||||
struct qrtr_tx_flow *flow;
|
||||
unsigned long flags;
|
||||
void __rcu **slot;
|
||||
unsigned long index;
|
||||
|
||||
spin_lock_irqsave(&qrtr_nodes_lock, flags);
|
||||
/* If the node is a bridge for other nodes, there are possibly
|
||||
@@ -189,11 +190,9 @@ static void __qrtr_node_release(struct kref *kref)
|
||||
skb_queue_purge(&node->rx_queue);
|
||||
|
||||
/* Free tx flow counters */
|
||||
radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
|
||||
flow = *slot;
|
||||
radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
|
||||
xa_for_each(&node->qrtr_tx_flow, index, flow)
|
||||
kfree(flow);
|
||||
}
|
||||
xa_destroy(&node->qrtr_tx_flow);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
@@ -228,9 +227,7 @@ static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
|
||||
|
||||
key = remote_node << 32 | remote_port;
|
||||
|
||||
rcu_read_lock();
|
||||
flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
|
||||
rcu_read_unlock();
|
||||
flow = xa_load(&node->qrtr_tx_flow, key);
|
||||
if (flow) {
|
||||
spin_lock(&flow->resume_tx.lock);
|
||||
flow->pending = 0;
|
||||
@@ -269,12 +266,13 @@ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
|
||||
return 0;
|
||||
|
||||
mutex_lock(&node->qrtr_tx_lock);
|
||||
flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
|
||||
flow = xa_load(&node->qrtr_tx_flow, key);
|
||||
if (!flow) {
|
||||
flow = kzalloc_obj(*flow);
|
||||
if (flow) {
|
||||
init_waitqueue_head(&flow->resume_tx);
|
||||
if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
|
||||
if (xa_err(xa_store(&node->qrtr_tx_flow, key, flow,
|
||||
GFP_KERNEL))) {
|
||||
kfree(flow);
|
||||
flow = NULL;
|
||||
}
|
||||
@@ -326,9 +324,7 @@ static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
|
||||
unsigned long key = (u64)dest_node << 32 | dest_port;
|
||||
struct qrtr_tx_flow *flow;
|
||||
|
||||
rcu_read_lock();
|
||||
flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
|
||||
rcu_read_unlock();
|
||||
flow = xa_load(&node->qrtr_tx_flow, key);
|
||||
if (flow) {
|
||||
spin_lock_irq(&flow->resume_tx.lock);
|
||||
flow->tx_failed = 1;
|
||||
@@ -599,7 +595,7 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
|
||||
node->nid = QRTR_EP_NID_AUTO;
|
||||
node->ep = ep;
|
||||
|
||||
INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
|
||||
xa_init(&node->qrtr_tx_flow);
|
||||
mutex_init(&node->qrtr_tx_lock);
|
||||
|
||||
qrtr_node_assign(node, nid);
|
||||
@@ -627,6 +623,7 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
|
||||
struct qrtr_tx_flow *flow;
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
unsigned long index;
|
||||
void __rcu **slot;
|
||||
|
||||
mutex_lock(&node->ep_lock);
|
||||
@@ -649,10 +646,8 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
|
||||
|
||||
/* Wake up any transmitters waiting for resume-tx from the node */
|
||||
mutex_lock(&node->qrtr_tx_lock);
|
||||
radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
|
||||
flow = *slot;
|
||||
xa_for_each(&node->qrtr_tx_flow, index, flow)
|
||||
wake_up_interruptible_all(&flow->resume_tx);
|
||||
}
|
||||
mutex_unlock(&node->qrtr_tx_lock);
|
||||
|
||||
qrtr_node_release(node);
|
||||
|
||||
Reference in New Issue
Block a user