RDMA/ucma: Support write an event into a CM

Enable user-space to inject an event into a CM through it's event
channel. Two new events are added and supported: RDMA_CM_EVENT_USER and
RDMA_CM_EVENT_INTERNAL. With these 2 events a new event parameter "arg"
is supported, which is passed from sender to receiver transparently.

With this feature an application is able to write an event into a CM
channel with a new user-space rdmacm API. For example thread T1 could
write an event with the API:
    rdma_write_cm_event(cm_id, RDMA_CM_EVENT_USER, status, arg);
and thread T2 could receive the event with rdma_get_cm_event().

Signed-off-by: Mark Zhang <markzhang@nvidia.com>
Reviewed-by: Vlad Dumitrescu <vdumitrescu@nvidia.com>
Link: https://patch.msgid.link/fdf49d0b17a45933c5d8c1d90605c9447d9a3c73.1751279794.git.leonro@nvidia.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Mark Zhang 2025-06-30 13:52:35 +03:00 committed by Leon Romanovsky
parent 810f874eda
commit a3c9d0fcd3
3 changed files with 70 additions and 3 deletions

View File

@ -1745,6 +1745,55 @@ err_unlock:
return ret;
}
static ssize_t ucma_write_cm_event(struct ucma_file *file,
const char __user *inbuf, int in_len,
int out_len)
{
struct rdma_ucm_write_cm_event cmd;
struct rdma_cm_event event = {};
struct ucma_event *uevent;
struct ucma_context *ctx;
int ret = 0;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
if ((cmd.event != RDMA_CM_EVENT_USER) &&
(cmd.event != RDMA_CM_EVENT_INTERNAL))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
event.event = cmd.event;
event.status = cmd.status;
event.param.arg = cmd.param.arg;
uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
if (!uevent) {
ret = -ENOMEM;
goto out;
}
uevent->ctx = ctx;
uevent->resp.uid = ctx->uid;
uevent->resp.id = ctx->id;
uevent->resp.event = event.event;
uevent->resp.status = event.status;
memcpy(uevent->resp.param.arg32, &event.param.arg,
sizeof(event.param.arg));
mutex_lock(&ctx->file->mut);
list_add_tail(&uevent->list, &ctx->file->event_list);
mutex_unlock(&ctx->file->mut);
wake_up_interruptible(&ctx->file->poll_wait);
out:
ucma_put_ctx(ctx);
return ret;
}
static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len) = {
@ -1771,7 +1820,8 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
[RDMA_USER_CM_CMD_BIND] = ucma_bind,
[RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
[RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
[RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE] = ucma_resolve_ib_service
[RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE] = ucma_resolve_ib_service,
[RDMA_USER_CM_CMD_WRITE_CM_EVENT] = ucma_write_cm_event,
};
static ssize_t ucma_write(struct file *filp, const char __user *buf,

View File

@ -35,7 +35,9 @@ enum rdma_cm_event_type {
RDMA_CM_EVENT_ADDR_CHANGE,
RDMA_CM_EVENT_TIMEWAIT_EXIT,
RDMA_CM_EVENT_ADDRINFO_RESOLVED,
RDMA_CM_EVENT_ADDRINFO_ERROR
RDMA_CM_EVENT_ADDRINFO_ERROR,
RDMA_CM_EVENT_USER,
RDMA_CM_EVENT_INTERNAL,
};
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event);
@ -98,6 +100,7 @@ struct rdma_cm_event {
union {
struct rdma_conn_param conn;
struct rdma_ud_param ud;
u64 arg;
} param;
struct rdma_ucm_ece ece;
};

View File

@ -68,7 +68,8 @@ enum {
RDMA_USER_CM_CMD_BIND,
RDMA_USER_CM_CMD_RESOLVE_ADDR,
RDMA_USER_CM_CMD_JOIN_MCAST,
RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE
RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE,
RDMA_USER_CM_CMD_WRITE_CM_EVENT,
};
/* See IBTA Annex A11, servies ID bytes 4 & 5 */
@ -304,6 +305,7 @@ struct rdma_ucm_event_resp {
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
__u32 arg32[2];
} param;
__u32 reserved;
struct rdma_ucm_ece ece;
@ -362,4 +364,16 @@ struct rdma_ucm_resolve_ib_service {
__u32 id;
struct rdma_ucm_ib_service ibs;
};
struct rdma_ucm_write_cm_event {
__u32 id;
__u32 reserved;
__u32 event;
__u32 status;
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
__u64 arg;
} param;
};
#endif /* RDMA_USER_CM_H */