RDMA/core: Pass port to counter bind/unbind operations
This will be useful for the next patches in the series since port number is needed for optional counters binding and unbinding. Note that this change is needed since when the operation is done qp->port isn't necessarily initialized yet and can't be used. Signed-off-by: Patrisious Haddad <phaddad@nvidia.com> Reviewed-by: Mark Bloch <mbloch@nvidia.com> Link: https://patch.msgid.link/b6f6797844acbd517358e8d2a270ea9b3e6ecba1.1741875070.git.leon@kernel.org Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
da3711074f
commit
88ae02feda
|
@ -93,7 +93,7 @@ static void auto_mode_init_counter(struct rdma_counter *counter,
|
|||
}
|
||||
|
||||
static int __rdma_counter_bind_qp(struct rdma_counter *counter,
|
||||
struct ib_qp *qp)
|
||||
struct ib_qp *qp, u32 port)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -104,7 +104,7 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&counter->lock);
|
||||
ret = qp->device->ops.counter_bind_qp(counter, qp);
|
||||
ret = qp->device->ops.counter_bind_qp(counter, qp, port);
|
||||
mutex_unlock(&counter->lock);
|
||||
|
||||
return ret;
|
||||
|
@ -196,7 +196,7 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
|
|||
kref_init(&counter->kref);
|
||||
mutex_init(&counter->lock);
|
||||
|
||||
ret = __rdma_counter_bind_qp(counter, qp);
|
||||
ret = __rdma_counter_bind_qp(counter, qp, port);
|
||||
if (ret)
|
||||
goto err_mode;
|
||||
|
||||
|
@ -247,7 +247,7 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
|
|||
return match;
|
||||
}
|
||||
|
||||
static int __rdma_counter_unbind_qp(struct ib_qp *qp)
|
||||
static int __rdma_counter_unbind_qp(struct ib_qp *qp, u32 port)
|
||||
{
|
||||
struct rdma_counter *counter = qp->counter;
|
||||
int ret;
|
||||
|
@ -256,7 +256,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&counter->lock);
|
||||
ret = qp->device->ops.counter_unbind_qp(qp);
|
||||
ret = qp->device->ops.counter_unbind_qp(qp, port);
|
||||
mutex_unlock(&counter->lock);
|
||||
|
||||
return ret;
|
||||
|
@ -348,7 +348,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port)
|
|||
|
||||
counter = rdma_get_counter_auto_mode(qp, port);
|
||||
if (counter) {
|
||||
ret = __rdma_counter_bind_qp(counter, qp);
|
||||
ret = __rdma_counter_bind_qp(counter, qp, port);
|
||||
if (ret) {
|
||||
kref_put(&counter->kref, counter_release);
|
||||
return ret;
|
||||
|
@ -368,7 +368,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port)
|
|||
* @force:
|
||||
* true - Decrease the counter ref-count anyway (e.g., qp destroy)
|
||||
*/
|
||||
int rdma_counter_unbind_qp(struct ib_qp *qp, bool force)
|
||||
int rdma_counter_unbind_qp(struct ib_qp *qp, u32 port, bool force)
|
||||
{
|
||||
struct rdma_counter *counter = qp->counter;
|
||||
int ret;
|
||||
|
@ -376,7 +376,7 @@ int rdma_counter_unbind_qp(struct ib_qp *qp, bool force)
|
|||
if (!counter)
|
||||
return -EINVAL;
|
||||
|
||||
ret = __rdma_counter_unbind_qp(qp);
|
||||
ret = __rdma_counter_unbind_qp(qp, port);
|
||||
if (ret && !force)
|
||||
return ret;
|
||||
|
||||
|
@ -523,7 +523,7 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
|
|||
goto err_task;
|
||||
}
|
||||
|
||||
ret = __rdma_counter_bind_qp(counter, qp);
|
||||
ret = __rdma_counter_bind_qp(counter, qp, port);
|
||||
if (ret)
|
||||
goto err_task;
|
||||
|
||||
|
@ -614,7 +614,7 @@ int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = rdma_counter_unbind_qp(qp, false);
|
||||
ret = rdma_counter_unbind_qp(qp, port, false);
|
||||
|
||||
out:
|
||||
rdma_restrack_put(&qp->res);
|
||||
|
|
|
@ -2105,7 +2105,7 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
|
|||
if (!qp->uobject)
|
||||
rdma_rw_cleanup_mrs(qp);
|
||||
|
||||
rdma_counter_unbind_qp(qp, true);
|
||||
rdma_counter_unbind_qp(qp, qp->port, true);
|
||||
ret = qp->device->ops.destroy_qp(qp, udata);
|
||||
if (ret) {
|
||||
if (sec)
|
||||
|
|
|
@ -562,7 +562,7 @@ static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
|
|||
}
|
||||
|
||||
static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
|
||||
struct ib_qp *qp)
|
||||
struct ib_qp *qp, u32 port)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(qp->device);
|
||||
int err;
|
||||
|
@ -594,7 +594,7 @@ fail_set_counter:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
|
||||
static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp, u32 port)
|
||||
{
|
||||
return mlx5_ib_qp_set_counter(qp, NULL);
|
||||
}
|
||||
|
|
|
@ -2644,12 +2644,13 @@ struct ib_device_ops {
|
|||
* @counter - The counter to be bound. If counter->id is zero then
|
||||
* the driver needs to allocate a new counter and set counter->id
|
||||
*/
|
||||
int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
|
||||
int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp,
|
||||
u32 port);
|
||||
/**
|
||||
* counter_unbind_qp - Unbind the qp from the dynamically-allocated
|
||||
* counter and bind it onto the default one
|
||||
*/
|
||||
int (*counter_unbind_qp)(struct ib_qp *qp);
|
||||
int (*counter_unbind_qp)(struct ib_qp *qp, u32 port);
|
||||
/**
|
||||
* counter_dealloc -De-allocate the hw counter
|
||||
*/
|
||||
|
|
|
@ -51,7 +51,7 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
|
|||
bool bind_opcnt,
|
||||
struct netlink_ext_ack *extack);
|
||||
int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port);
|
||||
int rdma_counter_unbind_qp(struct ib_qp *qp, bool force);
|
||||
int rdma_counter_unbind_qp(struct ib_qp *qp, u32 port, bool force);
|
||||
|
||||
int rdma_counter_query_stats(struct rdma_counter *counter);
|
||||
u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index);
|
||||
|
|
Loading…
Reference in New Issue