mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-21 04:53:46 -04:00
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A quiet cycle after the larger 5.8 effort. Substantially cleanup and
driver work with a few smaller features this time.
- Driver updates for hfi1, rxe, mlx5, hns, qedr, usnic, bnxt_re
- Removal of dead or redundant code across the drivers
- RAW resource tracker dumps to include a device specific data blob
for device objects to aide device debugging
- Further advance the IOCTL interface, remove the ability to turn it
off. Add QUERY_CONTEXT, QUERY_MR, and QUERY_PD commands
- Remove stubs related to devices with no pkey table
- A shared CQ scheme to allow multiple ULPs to share the CQ rings of
a device to give higher performance
- Several more static checker, syzkaller and rare crashers fixed"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (121 commits)
RDMA/mlx5: Fix flow destination setting for RDMA TX flow table
RDMA/rxe: Remove pkey table
RDMA/umem: Add a schedule point in ib_umem_get()
RDMA/hns: Fix the unneeded process when getting a general type of CQE error
RDMA/hns: Fix error during modify qp RTS2RTS
RDMA/hns: Delete unnecessary memset when allocating VF resource
RDMA/hns: Remove redundant parameters in set_rc_wqe()
RDMA/hns: Remove support for HIP08_A
RDMA/hns: Refactor hns_roce_v2_set_hem()
RDMA/hns: Remove redundant hardware opcode definitions
RDMA/netlink: Remove CAP_NET_RAW check when dump a raw QP
RDMA/include: Replace license text with SPDX tags
RDMA/rtrs: remove WQ_MEM_RECLAIM for rtrs_wq
RDMA/rtrs-clt: add an additional random 8 seconds before reconnecting
RDMA/cma: Execute rdma_cm destruction from a handler properly
RDMA/cma: Remove unneeded locking for req paths
RDMA/cma: Using the standard locking pattern when delivering the removal event
RDMA/cma: Simplify DEVICE_REMOVAL for internal_id
RDMA/efa: Add EFA 0xefa1 PCI ID
RDMA/efa: User/kernel compatibility handshake mechanism
...
This commit is contained in:
@@ -14,6 +14,7 @@
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include "mlx5_ib.h"
|
||||
#include "devx.h"
|
||||
#include "qp.h"
|
||||
#include <linux/xarray.h>
|
||||
|
||||
@@ -89,22 +90,6 @@ struct devx_async_event_file {
|
||||
u8 is_destroyed:1;
|
||||
};
|
||||
|
||||
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
|
||||
struct devx_obj {
|
||||
struct mlx5_ib_dev *ib_dev;
|
||||
u64 obj_id;
|
||||
u32 dinlen; /* destroy inbox length */
|
||||
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
|
||||
u32 flags;
|
||||
union {
|
||||
struct mlx5_ib_devx_mr devx_mr;
|
||||
struct mlx5_core_dct core_dct;
|
||||
struct mlx5_core_cq core_cq;
|
||||
u32 flow_counter_bulk_size;
|
||||
};
|
||||
struct list_head event_sub; /* holds devx_event_subscription entries */
|
||||
};
|
||||
|
||||
struct devx_umem {
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct ib_umem *umem;
|
||||
@@ -171,48 +156,6 @@ void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
|
||||
mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
}
|
||||
|
||||
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
|
||||
{
|
||||
struct devx_obj *devx_obj = obj;
|
||||
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
|
||||
|
||||
switch (opcode) {
|
||||
case MLX5_CMD_OP_DESTROY_TIR:
|
||||
*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
|
||||
*dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
|
||||
obj_id);
|
||||
return true;
|
||||
|
||||
case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
|
||||
*dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
*dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
|
||||
table_id);
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
|
||||
{
|
||||
struct devx_obj *devx_obj = obj;
|
||||
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
|
||||
|
||||
if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
|
||||
|
||||
if (offset && offset >= devx_obj->flow_counter_bulk_size)
|
||||
return false;
|
||||
|
||||
*counter_id = MLX5_GET(dealloc_flow_counter_in,
|
||||
devx_obj->dinbox,
|
||||
flow_counter_id);
|
||||
*counter_id += offset;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_legacy_unaffiliated_event_num(u16 event_num)
|
||||
{
|
||||
switch (event_num) {
|
||||
@@ -2419,17 +2362,24 @@ static int devx_event_notifier(struct notifier_block *nb,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
|
||||
int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_devx_event_table *table = &dev->devx_event_table;
|
||||
int uid;
|
||||
|
||||
xa_init(&table->event_xa);
|
||||
mutex_init(&table->event_xa_lock);
|
||||
MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
|
||||
mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
|
||||
uid = mlx5_ib_devx_create(dev, false);
|
||||
if (uid > 0) {
|
||||
dev->devx_whitelist_uid = uid;
|
||||
xa_init(&table->event_xa);
|
||||
mutex_init(&table->event_xa_lock);
|
||||
MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
|
||||
mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
|
||||
void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_devx_event_table *table = &dev->devx_event_table;
|
||||
struct devx_event_subscription *sub, *tmp;
|
||||
@@ -2437,17 +2387,21 @@ void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
|
||||
void *entry;
|
||||
unsigned long id;
|
||||
|
||||
mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
|
||||
mutex_lock(&dev->devx_event_table.event_xa_lock);
|
||||
xa_for_each(&table->event_xa, id, entry) {
|
||||
event = entry;
|
||||
list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
|
||||
xa_list)
|
||||
devx_cleanup_subscription(dev, sub);
|
||||
kfree(entry);
|
||||
if (dev->devx_whitelist_uid) {
|
||||
mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
|
||||
mutex_lock(&dev->devx_event_table.event_xa_lock);
|
||||
xa_for_each(&table->event_xa, id, entry) {
|
||||
event = entry;
|
||||
list_for_each_entry_safe(
|
||||
sub, tmp, &event->unaffiliated_list, xa_list)
|
||||
devx_cleanup_subscription(dev, sub);
|
||||
kfree(entry);
|
||||
}
|
||||
mutex_unlock(&dev->devx_event_table.event_xa_lock);
|
||||
xa_destroy(&table->event_xa);
|
||||
|
||||
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
|
||||
}
|
||||
mutex_unlock(&dev->devx_event_table.event_xa_lock);
|
||||
xa_destroy(&table->event_xa);
|
||||
}
|
||||
|
||||
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
|
||||
|
||||
Reference in New Issue
Block a user