mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-25 00:52:45 -04:00
svcrdma: Relieve contention on sc_send_lock.
/proc/lock_stat indicates the the sc_send_lock is heavily contended when the server is under load from a single client. To address this, convert the send_ctxt free list to an llist. Returning an item to the send_ctxt cache is now waitless, which reduces the instruction path length in the single-threaded Send handler (svc_rdma_wc_send). The goal is to enable the ib_comp_wq worker to handle a higher RPC/RDMA Send completion rate given the same CPU resources. This change reduces CPU utilization of Send completion by 2-3% on my server. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-By: Tom Talpey <tom@talpey.com>
This commit is contained in:
@@ -90,7 +90,7 @@ struct svcxprt_rdma {
|
||||
struct ib_pd *sc_pd;
|
||||
|
||||
spinlock_t sc_send_lock;
|
||||
struct list_head sc_send_ctxts;
|
||||
struct llist_head sc_send_ctxts;
|
||||
spinlock_t sc_rw_ctxt_lock;
|
||||
struct list_head sc_rw_ctxts;
|
||||
|
||||
@@ -150,7 +150,7 @@ struct svc_rdma_recv_ctxt {
|
||||
};
|
||||
|
||||
struct svc_rdma_send_ctxt {
|
||||
struct list_head sc_list;
|
||||
struct llist_node sc_node;
|
||||
struct rpc_rdma_cid sc_cid;
|
||||
|
||||
struct ib_send_wr sc_send_wr;
|
||||
|
||||
Reference in New Issue
Block a user