Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
   4 */
   5
   6#include <rdma/ib_user_verbs.h>
   7#include <rdma/ib_verbs.h>
   8#include <rdma/uverbs_types.h>
   9#include <rdma/uverbs_ioctl.h>
  10#include <rdma/mlx5_user_ioctl_cmds.h>
  11#include <rdma/mlx5_user_ioctl_verbs.h>
  12#include <rdma/ib_umem.h>
  13#include <rdma/uverbs_std_types.h>
  14#include <linux/mlx5/driver.h>
  15#include <linux/mlx5/fs.h>
  16#include "mlx5_ib.h"
  17#include "devx.h"
  18#include "qp.h"
  19#include <linux/xarray.h>
  20
  21#define UVERBS_MODULE_NAME mlx5_ib
  22#include <rdma/uverbs_named_ioctl.h>
  23
  24static void dispatch_event_fd(struct list_head *fd_list, const void *data);
  25
  26enum devx_obj_flags {
  27	DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
  28	DEVX_OBJ_FLAGS_DCT = 1 << 1,
  29	DEVX_OBJ_FLAGS_CQ = 1 << 2,
  30	DEVX_OBJ_FLAGS_HW_FREED = 1 << 3,
  31};
  32
  33#define MAX_ASYNC_CMDS 8
  34
  35struct mlx5_async_cmd {
  36	struct ib_uobject *uobject;
  37	void *in;
  38	int in_size;
  39	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  40	int err;
  41	struct mlx5_async_work cb_work;
  42	struct completion comp;
  43};
  44
  45struct devx_async_data {
  46	struct mlx5_ib_dev *mdev;
  47	struct list_head list;
  48	struct devx_async_cmd_event_file *ev_file;
  49	struct mlx5_async_work cb_work;
  50	u16 cmd_out_len;
  51	/* must be last field in this structure */
  52	struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
  53};
  54
  55struct devx_async_event_data {
  56	struct list_head list; /* headed in ev_file->event_list */
  57	struct mlx5_ib_uapi_devx_async_event_hdr hdr;
  58};
  59
  60/* first level XA value data structure */
  61struct devx_event {
  62	struct xarray object_ids; /* second XA level, Key = object id */
  63	struct list_head unaffiliated_list;
  64};
  65
  66/* second level XA value data structure */
  67struct devx_obj_event {
  68	struct rcu_head rcu;
  69	struct list_head obj_sub_list;
  70};
  71
  72struct devx_event_subscription {
  73	struct list_head file_list; /* headed in ev_file->
  74				     * subscribed_events_list
  75				     */
  76	struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
  77				   * devx_obj_event->obj_sub_list
  78				   */
  79	struct list_head obj_list; /* headed in devx_object */
  80	struct list_head event_list; /* headed in ev_file->event_list or in
  81				      * temp list via subscription
  82				      */
  83
  84	u8 is_cleaned:1;
  85	u32 xa_key_level1;
  86	u32 xa_key_level2;
  87	struct rcu_head	rcu;
  88	u64 cookie;
  89	struct devx_async_event_file *ev_file;
  90	struct eventfd_ctx *eventfd;
  91};
  92
  93struct devx_async_event_file {
  94	struct ib_uobject uobj;
  95	/* Head of events that are subscribed to this FD */
  96	struct list_head subscribed_events_list;
  97	spinlock_t lock;
  98	wait_queue_head_t poll_wait;
  99	struct list_head event_list;
 100	struct mlx5_ib_dev *dev;
 101	u8 omit_data:1;
 102	u8 is_overflow_err:1;
 103	u8 is_destroyed:1;
 104};
 105
 106struct devx_umem {
 107	struct mlx5_core_dev		*mdev;
 108	struct ib_umem			*umem;
 109	u32				dinlen;
 110	u32				dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
 111};
 112
 113struct devx_umem_reg_cmd {
 114	void				*in;
 115	u32				inlen;
 116	u32				out[MLX5_ST_SZ_DW(create_umem_out)];
 117};
 118
 119static struct mlx5_ib_ucontext *
 120devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
 121{
 122	return to_mucontext(ib_uverbs_get_ucontext(attrs));
 123}
 124
 125int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
 126{
 127	u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
 128	u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
 129	void *uctx;
 130	int err;
 131	u16 uid;
 132	u32 cap = 0;
 133
 134	/* 0 means not supported */
 135	if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
 136		return -EINVAL;
 137
 138	uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
 139	if (is_user && capable(CAP_NET_RAW) &&
 140	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
 141		cap |= MLX5_UCTX_CAP_RAW_TX;
 142	if (is_user && capable(CAP_SYS_RAWIO) &&
 143	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
 144	     MLX5_UCTX_CAP_INTERNAL_DEV_RES))
 145		cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
 146
 147	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
 148	MLX5_SET(uctx, uctx, cap, cap);
 149
 150	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
 151	if (err)
 152		return err;
 153
 154	uid = MLX5_GET(create_uctx_out, out, uid);
 155	return uid;
 156}
 157
 158void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
 159{
 160	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
 161	u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
 162
 163	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
 164	MLX5_SET(destroy_uctx_in, in, uid, uid);
 165
 166	mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
 167}
 168
 169static bool is_legacy_unaffiliated_event_num(u16 event_num)
 170{
 171	switch (event_num) {
 172	case MLX5_EVENT_TYPE_PORT_CHANGE:
 173		return true;
 174	default:
 175		return false;
 176	}
 177}
 178
 179static bool is_legacy_obj_event_num(u16 event_num)
 180{
 181	switch (event_num) {
 182	case MLX5_EVENT_TYPE_PATH_MIG:
 183	case MLX5_EVENT_TYPE_COMM_EST:
 184	case MLX5_EVENT_TYPE_SQ_DRAINED:
 185	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 186	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 187	case MLX5_EVENT_TYPE_CQ_ERROR:
 188	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 189	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 190	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 191	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 192	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 193	case MLX5_EVENT_TYPE_DCT_DRAINED:
 194	case MLX5_EVENT_TYPE_COMP:
 195	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
 196	case MLX5_EVENT_TYPE_XRQ_ERROR:
 197		return true;
 198	default:
 199		return false;
 200	}
 201}
 202
 203static u16 get_legacy_obj_type(u16 opcode)
 204{
 205	switch (opcode) {
 206	case MLX5_CMD_OP_CREATE_RQ:
 207		return MLX5_EVENT_QUEUE_TYPE_RQ;
 208	case MLX5_CMD_OP_CREATE_QP:
 209		return MLX5_EVENT_QUEUE_TYPE_QP;
 210	case MLX5_CMD_OP_CREATE_SQ:
 211		return MLX5_EVENT_QUEUE_TYPE_SQ;
 212	case MLX5_CMD_OP_CREATE_DCT:
 213		return MLX5_EVENT_QUEUE_TYPE_DCT;
 214	default:
 215		return 0;
 216	}
 217}
 218
 219static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
 220{
 221	u16 opcode;
 222
 223	opcode = (obj->obj_id >> 32) & 0xffff;
 224
 225	if (is_legacy_obj_event_num(event_num))
 226		return get_legacy_obj_type(opcode);
 227
 228	switch (opcode) {
 229	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 230		return (obj->obj_id >> 48);
 231	case MLX5_CMD_OP_CREATE_RQ:
 232		return MLX5_OBJ_TYPE_RQ;
 233	case MLX5_CMD_OP_CREATE_QP:
 234		return MLX5_OBJ_TYPE_QP;
 235	case MLX5_CMD_OP_CREATE_SQ:
 236		return MLX5_OBJ_TYPE_SQ;
 237	case MLX5_CMD_OP_CREATE_DCT:
 238		return MLX5_OBJ_TYPE_DCT;
 239	case MLX5_CMD_OP_CREATE_TIR:
 240		return MLX5_OBJ_TYPE_TIR;
 241	case MLX5_CMD_OP_CREATE_TIS:
 242		return MLX5_OBJ_TYPE_TIS;
 243	case MLX5_CMD_OP_CREATE_PSV:
 244		return MLX5_OBJ_TYPE_PSV;
 245	case MLX5_OBJ_TYPE_MKEY:
 246		return MLX5_OBJ_TYPE_MKEY;
 247	case MLX5_CMD_OP_CREATE_RMP:
 248		return MLX5_OBJ_TYPE_RMP;
 249	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 250		return MLX5_OBJ_TYPE_XRC_SRQ;
 251	case MLX5_CMD_OP_CREATE_XRQ:
 252		return MLX5_OBJ_TYPE_XRQ;
 253	case MLX5_CMD_OP_CREATE_RQT:
 254		return MLX5_OBJ_TYPE_RQT;
 255	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 256		return MLX5_OBJ_TYPE_FLOW_COUNTER;
 257	case MLX5_CMD_OP_CREATE_CQ:
 258		return MLX5_OBJ_TYPE_CQ;
 259	default:
 260		return 0;
 261	}
 262}
 263
 264static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
 265{
 266	switch (event_type) {
 267	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 268	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 269	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 270	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 271	case MLX5_EVENT_TYPE_PATH_MIG:
 272	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 273	case MLX5_EVENT_TYPE_COMM_EST:
 274	case MLX5_EVENT_TYPE_SQ_DRAINED:
 275	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 276	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 277		return eqe->data.qp_srq.type;
 278	case MLX5_EVENT_TYPE_CQ_ERROR:
 279	case MLX5_EVENT_TYPE_XRQ_ERROR:
 280		return 0;
 281	case MLX5_EVENT_TYPE_DCT_DRAINED:
 282	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
 283		return MLX5_EVENT_QUEUE_TYPE_DCT;
 284	default:
 285		return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
 286	}
 287}
 288
 289static u32 get_dec_obj_id(u64 obj_id)
 290{
 291	return (obj_id & 0xffffffff);
 292}
 293
 294/*
 295 * As the obj_id in the firmware is not globally unique the object type
 296 * must be considered upon checking for a valid object id.
 297 * For that the opcode of the creator command is encoded as part of the obj_id.
 298 */
 299static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
 300{
 301	return ((u64)opcode << 32) | obj_id;
 302}
 303
 304static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
 305{
 306	switch (opcode) {
 307	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 308		return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
 309	case MLX5_CMD_OP_CREATE_UMEM:
 310		return MLX5_GET(create_umem_out, out, umem_id);
 311	case MLX5_CMD_OP_CREATE_MKEY:
 312		return MLX5_GET(create_mkey_out, out, mkey_index);
 313	case MLX5_CMD_OP_CREATE_CQ:
 314		return MLX5_GET(create_cq_out, out, cqn);
 315	case MLX5_CMD_OP_ALLOC_PD:
 316		return MLX5_GET(alloc_pd_out, out, pd);
 317	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 318		return MLX5_GET(alloc_transport_domain_out, out,
 319				transport_domain);
 320	case MLX5_CMD_OP_CREATE_RMP:
 321		return MLX5_GET(create_rmp_out, out, rmpn);
 322	case MLX5_CMD_OP_CREATE_SQ:
 323		return MLX5_GET(create_sq_out, out, sqn);
 324	case MLX5_CMD_OP_CREATE_RQ:
 325		return MLX5_GET(create_rq_out, out, rqn);
 326	case MLX5_CMD_OP_CREATE_RQT:
 327		return MLX5_GET(create_rqt_out, out, rqtn);
 328	case MLX5_CMD_OP_CREATE_TIR:
 329		return MLX5_GET(create_tir_out, out, tirn);
 330	case MLX5_CMD_OP_CREATE_TIS:
 331		return MLX5_GET(create_tis_out, out, tisn);
 332	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 333		return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
 334	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 335		return MLX5_GET(create_flow_table_out, out, table_id);
 336	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 337		return MLX5_GET(create_flow_group_out, out, group_id);
 338	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 339		return MLX5_GET(set_fte_in, in, flow_index);
 340	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 341		return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
 342	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
 343		return MLX5_GET(alloc_packet_reformat_context_out, out,
 344				packet_reformat_id);
 345	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
 346		return MLX5_GET(alloc_modify_header_context_out, out,
 347				modify_header_id);
 348	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 349		return MLX5_GET(create_scheduling_element_out, out,
 350				scheduling_element_id);
 351	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 352		return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
 353	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 354		return MLX5_GET(set_l2_table_entry_in, in, table_index);
 355	case MLX5_CMD_OP_CREATE_QP:
 356		return MLX5_GET(create_qp_out, out, qpn);
 357	case MLX5_CMD_OP_CREATE_SRQ:
 358		return MLX5_GET(create_srq_out, out, srqn);
 359	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 360		return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
 361	case MLX5_CMD_OP_CREATE_DCT:
 362		return MLX5_GET(create_dct_out, out, dctn);
 363	case MLX5_CMD_OP_CREATE_XRQ:
 364		return MLX5_GET(create_xrq_out, out, xrqn);
 365	case MLX5_CMD_OP_ATTACH_TO_MCG:
 366		return MLX5_GET(attach_to_mcg_in, in, qpn);
 367	case MLX5_CMD_OP_ALLOC_XRCD:
 368		return MLX5_GET(alloc_xrcd_out, out, xrcd);
 369	case MLX5_CMD_OP_CREATE_PSV:
 370		return MLX5_GET(create_psv_out, out, psv0_index);
 371	default:
 372		/* The entry must match to one of the devx_is_obj_create_cmd */
 373		WARN_ON(true);
 374		return 0;
 375	}
 376}
 377
 378static u64 devx_get_obj_id(const void *in)
 379{
 380	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 381	u64 obj_id;
 382
 383	switch (opcode) {
 384	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
 385	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
 386		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
 387					MLX5_GET(general_obj_in_cmd_hdr, in,
 388						 obj_type) << 16,
 389					MLX5_GET(general_obj_in_cmd_hdr, in,
 390						 obj_id));
 391		break;
 392	case MLX5_CMD_OP_QUERY_MKEY:
 393		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
 394					MLX5_GET(query_mkey_in, in,
 395						 mkey_index));
 396		break;
 397	case MLX5_CMD_OP_QUERY_CQ:
 398		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 399					MLX5_GET(query_cq_in, in, cqn));
 400		break;
 401	case MLX5_CMD_OP_MODIFY_CQ:
 402		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 403					MLX5_GET(modify_cq_in, in, cqn));
 404		break;
 405	case MLX5_CMD_OP_QUERY_SQ:
 406		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 407					MLX5_GET(query_sq_in, in, sqn));
 408		break;
 409	case MLX5_CMD_OP_MODIFY_SQ:
 410		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 411					MLX5_GET(modify_sq_in, in, sqn));
 412		break;
 413	case MLX5_CMD_OP_QUERY_RQ:
 414		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 415					MLX5_GET(query_rq_in, in, rqn));
 416		break;
 417	case MLX5_CMD_OP_MODIFY_RQ:
 418		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 419					MLX5_GET(modify_rq_in, in, rqn));
 420		break;
 421	case MLX5_CMD_OP_QUERY_RMP:
 422		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
 423					MLX5_GET(query_rmp_in, in, rmpn));
 424		break;
 425	case MLX5_CMD_OP_MODIFY_RMP:
 426		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
 427					MLX5_GET(modify_rmp_in, in, rmpn));
 428		break;
 429	case MLX5_CMD_OP_QUERY_RQT:
 430		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 431					MLX5_GET(query_rqt_in, in, rqtn));
 432		break;
 433	case MLX5_CMD_OP_MODIFY_RQT:
 434		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 435					MLX5_GET(modify_rqt_in, in, rqtn));
 436		break;
 437	case MLX5_CMD_OP_QUERY_TIR:
 438		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 439					MLX5_GET(query_tir_in, in, tirn));
 440		break;
 441	case MLX5_CMD_OP_MODIFY_TIR:
 442		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 443					MLX5_GET(modify_tir_in, in, tirn));
 444		break;
 445	case MLX5_CMD_OP_QUERY_TIS:
 446		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 447					MLX5_GET(query_tis_in, in, tisn));
 448		break;
 449	case MLX5_CMD_OP_MODIFY_TIS:
 450		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 451					MLX5_GET(modify_tis_in, in, tisn));
 452		break;
 453	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 454		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
 455					MLX5_GET(query_flow_table_in, in,
 456						 table_id));
 457		break;
 458	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 459		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
 460					MLX5_GET(modify_flow_table_in, in,
 461						 table_id));
 462		break;
 463	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 464		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
 465					MLX5_GET(query_flow_group_in, in,
 466						 group_id));
 467		break;
 468	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 469		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
 470					MLX5_GET(query_fte_in, in,
 471						 flow_index));
 472		break;
 473	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 474		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
 475					MLX5_GET(set_fte_in, in, flow_index));
 476		break;
 477	case MLX5_CMD_OP_QUERY_Q_COUNTER:
 478		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
 479					MLX5_GET(query_q_counter_in, in,
 480						 counter_set_id));
 481		break;
 482	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 483		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
 484					MLX5_GET(query_flow_counter_in, in,
 485						 flow_counter_id));
 486		break;
 487	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
 488		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
 489					MLX5_GET(query_modify_header_context_in,
 490						 in, modify_header_id));
 491		break;
 492	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
 493		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
 494					MLX5_GET(query_scheduling_element_in,
 495						 in, scheduling_element_id));
 496		break;
 497	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
 498		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
 499					MLX5_GET(modify_scheduling_element_in,
 500						 in, scheduling_element_id));
 501		break;
 502	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 503		obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
 504					MLX5_GET(add_vxlan_udp_dport_in, in,
 505						 vxlan_udp_port));
 506		break;
 507	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 508		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
 509					MLX5_GET(query_l2_table_entry_in, in,
 510						 table_index));
 511		break;
 512	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 513		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
 514					MLX5_GET(set_l2_table_entry_in, in,
 515						 table_index));
 516		break;
 517	case MLX5_CMD_OP_QUERY_QP:
 518		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 519					MLX5_GET(query_qp_in, in, qpn));
 520		break;
 521	case MLX5_CMD_OP_RST2INIT_QP:
 522		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 523					MLX5_GET(rst2init_qp_in, in, qpn));
 524		break;
 525	case MLX5_CMD_OP_INIT2INIT_QP:
 526		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 527					MLX5_GET(init2init_qp_in, in, qpn));
 528		break;
 529	case MLX5_CMD_OP_INIT2RTR_QP:
 530		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 531					MLX5_GET(init2rtr_qp_in, in, qpn));
 532		break;
 533	case MLX5_CMD_OP_RTR2RTS_QP:
 534		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 535					MLX5_GET(rtr2rts_qp_in, in, qpn));
 536		break;
 537	case MLX5_CMD_OP_RTS2RTS_QP:
 538		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 539					MLX5_GET(rts2rts_qp_in, in, qpn));
 540		break;
 541	case MLX5_CMD_OP_SQERR2RTS_QP:
 542		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 543					MLX5_GET(sqerr2rts_qp_in, in, qpn));
 544		break;
 545	case MLX5_CMD_OP_2ERR_QP:
 546		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 547					MLX5_GET(qp_2err_in, in, qpn));
 548		break;
 549	case MLX5_CMD_OP_2RST_QP:
 550		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 551					MLX5_GET(qp_2rst_in, in, qpn));
 552		break;
 553	case MLX5_CMD_OP_QUERY_DCT:
 554		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 555					MLX5_GET(query_dct_in, in, dctn));
 556		break;
 557	case MLX5_CMD_OP_QUERY_XRQ:
 558	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
 559	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
 560		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
 561					MLX5_GET(query_xrq_in, in, xrqn));
 562		break;
 563	case MLX5_CMD_OP_QUERY_XRC_SRQ:
 564		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
 565					MLX5_GET(query_xrc_srq_in, in,
 566						 xrc_srqn));
 567		break;
 568	case MLX5_CMD_OP_ARM_XRC_SRQ:
 569		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
 570					MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
 571		break;
 572	case MLX5_CMD_OP_QUERY_SRQ:
 573		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
 574					MLX5_GET(query_srq_in, in, srqn));
 575		break;
 576	case MLX5_CMD_OP_ARM_RQ:
 577		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 578					MLX5_GET(arm_rq_in, in, srq_number));
 579		break;
 580	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 581		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 582					MLX5_GET(drain_dct_in, in, dctn));
 583		break;
 584	case MLX5_CMD_OP_ARM_XRQ:
 585	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
 586	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
 587	case MLX5_CMD_OP_MODIFY_XRQ:
 588		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
 589					MLX5_GET(arm_xrq_in, in, xrqn));
 590		break;
 591	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
 592		obj_id = get_enc_obj_id
 593				(MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
 594				 MLX5_GET(query_packet_reformat_context_in,
 595					  in, packet_reformat_id));
 596		break;
 597	default:
 598		obj_id = 0;
 599	}
 600
 601	return obj_id;
 602}
 603
 604static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
 605				 struct ib_uobject *uobj, const void *in)
 606{
 607	struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
 608	u64 obj_id = devx_get_obj_id(in);
 609
 610	if (!obj_id)
 611		return false;
 612
 613	switch (uobj_get_object_id(uobj)) {
 614	case UVERBS_OBJECT_CQ:
 615		return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 616				      to_mcq(uobj->object)->mcq.cqn) ==
 617				      obj_id;
 618
 619	case UVERBS_OBJECT_SRQ:
 620	{
 621		struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
 622		u16 opcode;
 623
 624		switch (srq->common.res) {
 625		case MLX5_RES_XSRQ:
 626			opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
 627			break;
 628		case MLX5_RES_XRQ:
 629			opcode = MLX5_CMD_OP_CREATE_XRQ;
 630			break;
 631		default:
 632			if (!dev->mdev->issi)
 633				opcode = MLX5_CMD_OP_CREATE_SRQ;
 634			else
 635				opcode = MLX5_CMD_OP_CREATE_RMP;
 636		}
 637
 638		return get_enc_obj_id(opcode,
 639				      to_msrq(uobj->object)->msrq.srqn) ==
 640				      obj_id;
 641	}
 642
 643	case UVERBS_OBJECT_QP:
 644	{
 645		struct mlx5_ib_qp *qp = to_mqp(uobj->object);
 646
 647		if (qp->type == IB_QPT_RAW_PACKET ||
 648		    (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
 649			struct mlx5_ib_raw_packet_qp *raw_packet_qp =
 650							 &qp->raw_packet_qp;
 651			struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
 652			struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
 653
 654			return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 655					       rq->base.mqp.qpn) == obj_id ||
 656				get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 657					       sq->base.mqp.qpn) == obj_id ||
 658				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 659					       rq->tirn) == obj_id ||
 660				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 661					       sq->tisn) == obj_id);
 662		}
 663
 664		if (qp->type == MLX5_IB_QPT_DCT)
 665			return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 666					      qp->dct.mdct.mqp.qpn) == obj_id;
 667		return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 668				      qp->ibqp.qp_num) == obj_id;
 669	}
 670
 671	case UVERBS_OBJECT_WQ:
 672		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 673				      to_mrwq(uobj->object)->core_qp.qpn) ==
 674				      obj_id;
 675
 676	case UVERBS_OBJECT_RWQ_IND_TBL:
 677		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 678				      to_mrwq_ind_table(uobj->object)->rqtn) ==
 679				      obj_id;
 680
 681	case MLX5_IB_OBJECT_DEVX_OBJ:
 682	{
 683		u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 684		struct devx_obj *devx_uobj = uobj->object;
 685
 686		if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
 687		    devx_uobj->flow_counter_bulk_size) {
 688			u64 end;
 689
 690			end = devx_uobj->obj_id +
 691				devx_uobj->flow_counter_bulk_size;
 692			return devx_uobj->obj_id <= obj_id && end > obj_id;
 693		}
 694
 695		return devx_uobj->obj_id == obj_id;
 696	}
 697
 698	default:
 699		return false;
 700	}
 701}
 702
 703static void devx_set_umem_valid(const void *in)
 704{
 705	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 706
 707	switch (opcode) {
 708	case MLX5_CMD_OP_CREATE_MKEY:
 709		MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
 710		break;
 711	case MLX5_CMD_OP_CREATE_CQ:
 712	{
 713		void *cqc;
 714
 715		MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
 716		cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 717		MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
 718		break;
 719	}
 720	case MLX5_CMD_OP_CREATE_QP:
 721	{
 722		void *qpc;
 723
 724		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 725		MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
 726		MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
 727		break;
 728	}
 729
 730	case MLX5_CMD_OP_CREATE_RQ:
 731	{
 732		void *rqc, *wq;
 733
 734		rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
 735		wq  = MLX5_ADDR_OF(rqc, rqc, wq);
 736		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 737		MLX5_SET(wq, wq, wq_umem_valid, 1);
 738		break;
 739	}
 740
 741	case MLX5_CMD_OP_CREATE_SQ:
 742	{
 743		void *sqc, *wq;
 744
 745		sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
 746		wq = MLX5_ADDR_OF(sqc, sqc, wq);
 747		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 748		MLX5_SET(wq, wq, wq_umem_valid, 1);
 749		break;
 750	}
 751
 752	case MLX5_CMD_OP_MODIFY_CQ:
 753		MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
 754		break;
 755
 756	case MLX5_CMD_OP_CREATE_RMP:
 757	{
 758		void *rmpc, *wq;
 759
 760		rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
 761		wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
 762		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 763		MLX5_SET(wq, wq, wq_umem_valid, 1);
 764		break;
 765	}
 766
 767	case MLX5_CMD_OP_CREATE_XRQ:
 768	{
 769		void *xrqc, *wq;
 770
 771		xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
 772		wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
 773		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 774		MLX5_SET(wq, wq, wq_umem_valid, 1);
 775		break;
 776	}
 777
 778	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 779	{
 780		void *xrc_srqc;
 781
 782		MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
 783		xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
 784					xrc_srq_context_entry);
 785		MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
 786		break;
 787	}
 788
 789	default:
 790		return;
 791	}
 792}
 793
 794static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
 795{
 796	*opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 797
 798	switch (*opcode) {
 799	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 800	case MLX5_CMD_OP_CREATE_MKEY:
 801	case MLX5_CMD_OP_CREATE_CQ:
 802	case MLX5_CMD_OP_ALLOC_PD:
 803	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 804	case MLX5_CMD_OP_CREATE_RMP:
 805	case MLX5_CMD_OP_CREATE_SQ:
 806	case MLX5_CMD_OP_CREATE_RQ:
 807	case MLX5_CMD_OP_CREATE_RQT:
 808	case MLX5_CMD_OP_CREATE_TIR:
 809	case MLX5_CMD_OP_CREATE_TIS:
 810	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 811	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 812	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 813	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 814	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
 815	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
 816	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 817	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 818	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 819	case MLX5_CMD_OP_CREATE_QP:
 820	case MLX5_CMD_OP_CREATE_SRQ:
 821	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 822	case MLX5_CMD_OP_CREATE_DCT:
 823	case MLX5_CMD_OP_CREATE_XRQ:
 824	case MLX5_CMD_OP_ATTACH_TO_MCG:
 825	case MLX5_CMD_OP_ALLOC_XRCD:
 826		return true;
 827	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 828	{
 829		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
 830		if (op_mod == 0)
 831			return true;
 832		return false;
 833	}
 834	case MLX5_CMD_OP_CREATE_PSV:
 835	{
 836		u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
 837
 838		if (num_psv == 1)
 839			return true;
 840		return false;
 841	}
 842	default:
 843		return false;
 844	}
 845}
 846
 847static bool devx_is_obj_modify_cmd(const void *in)
 848{
 849	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 850
 851	switch (opcode) {
 852	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
 853	case MLX5_CMD_OP_MODIFY_CQ:
 854	case MLX5_CMD_OP_MODIFY_RMP:
 855	case MLX5_CMD_OP_MODIFY_SQ:
 856	case MLX5_CMD_OP_MODIFY_RQ:
 857	case MLX5_CMD_OP_MODIFY_RQT:
 858	case MLX5_CMD_OP_MODIFY_TIR:
 859	case MLX5_CMD_OP_MODIFY_TIS:
 860	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 861	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
 862	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 863	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 864	case MLX5_CMD_OP_RST2INIT_QP:
 865	case MLX5_CMD_OP_INIT2RTR_QP:
 866	case MLX5_CMD_OP_INIT2INIT_QP:
 867	case MLX5_CMD_OP_RTR2RTS_QP:
 868	case MLX5_CMD_OP_RTS2RTS_QP:
 869	case MLX5_CMD_OP_SQERR2RTS_QP:
 870	case MLX5_CMD_OP_2ERR_QP:
 871	case MLX5_CMD_OP_2RST_QP:
 872	case MLX5_CMD_OP_ARM_XRC_SRQ:
 873	case MLX5_CMD_OP_ARM_RQ:
 874	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 875	case MLX5_CMD_OP_ARM_XRQ:
 876	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
 877	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
 878	case MLX5_CMD_OP_MODIFY_XRQ:
 879		return true;
 880	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 881	{
 882		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
 883
 884		if (op_mod == 1)
 885			return true;
 886		return false;
 887	}
 888	default:
 889		return false;
 890	}
 891}
 892
 893static bool devx_is_obj_query_cmd(const void *in)
 894{
 895	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 896
 897	switch (opcode) {
 898	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
 899	case MLX5_CMD_OP_QUERY_MKEY:
 900	case MLX5_CMD_OP_QUERY_CQ:
 901	case MLX5_CMD_OP_QUERY_RMP:
 902	case MLX5_CMD_OP_QUERY_SQ:
 903	case MLX5_CMD_OP_QUERY_RQ:
 904	case MLX5_CMD_OP_QUERY_RQT:
 905	case MLX5_CMD_OP_QUERY_TIR:
 906	case MLX5_CMD_OP_QUERY_TIS:
 907	case MLX5_CMD_OP_QUERY_Q_COUNTER:
 908	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 909	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 910	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 911	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 912	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
 913	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
 914	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 915	case MLX5_CMD_OP_QUERY_QP:
 916	case MLX5_CMD_OP_QUERY_SRQ:
 917	case MLX5_CMD_OP_QUERY_XRC_SRQ:
 918	case MLX5_CMD_OP_QUERY_DCT:
 919	case MLX5_CMD_OP_QUERY_XRQ:
 920	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
 921	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
 922	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
 923		return true;
 924	default:
 925		return false;
 926	}
 927}
 928
 929static bool devx_is_whitelist_cmd(void *in)
 930{
 931	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 932
 933	switch (opcode) {
 934	case MLX5_CMD_OP_QUERY_HCA_CAP:
 935	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 936	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 937	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
 938		return true;
 939	default:
 940		return false;
 941	}
 942}
 943
 944static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
 945{
 946	if (devx_is_whitelist_cmd(cmd_in)) {
 947		struct mlx5_ib_dev *dev;
 948
 949		if (c->devx_uid)
 950			return c->devx_uid;
 951
 952		dev = to_mdev(c->ibucontext.device);
 953		if (dev->devx_whitelist_uid)
 954			return dev->devx_whitelist_uid;
 955
 956		return -EOPNOTSUPP;
 957	}
 958
 959	if (!c->devx_uid)
 960		return -EINVAL;
 961
 962	return c->devx_uid;
 963}
 964
 965static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
 966{
 967	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 968
 969	/* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
 970	if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
 971	     MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
 972	    (opcode >= MLX5_CMD_OP_GENERAL_START &&
 973	     opcode < MLX5_CMD_OP_GENERAL_END))
 974		return true;
 975
 976	switch (opcode) {
 977	case MLX5_CMD_OP_QUERY_HCA_CAP:
 978	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 979	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 980	case MLX5_CMD_OP_QUERY_VPORT_STATE:
 981	case MLX5_CMD_OP_QUERY_ADAPTER:
 982	case MLX5_CMD_OP_QUERY_ISSI:
 983	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
 984	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
 985	case MLX5_CMD_OP_QUERY_VNIC_ENV:
 986	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 987	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
 988	case MLX5_CMD_OP_NOP:
 989	case MLX5_CMD_OP_QUERY_CONG_STATUS:
 990	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
 991	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
 992	case MLX5_CMD_OP_QUERY_LAG:
 993	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
 994		return true;
 995	default:
 996		return false;
 997	}
 998}
 999
1000static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
1001	struct uverbs_attr_bundle *attrs)
1002{
1003	struct mlx5_ib_ucontext *c;
1004	struct mlx5_ib_dev *dev;
1005	int user_vector;
1006	int dev_eqn;
1007	int err;
1008
1009	if (uverbs_copy_from(&user_vector, attrs,
1010			     MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
1011		return -EFAULT;
1012
1013	c = devx_ufile2uctx(attrs);
1014	if (IS_ERR(c))
1015		return PTR_ERR(c);
1016	dev = to_mdev(c->ibucontext.device);
1017
1018	err = mlx5_comp_eqn_get(dev->mdev, user_vector, &dev_eqn);
1019	if (err < 0)
1020		return err;
1021
1022	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1023			   &dev_eqn, sizeof(dev_eqn)))
1024		return -EFAULT;
1025
1026	return 0;
1027}
1028
1029/*
1030 *Security note:
1031 * The hardware protection mechanism works like this: Each device object that
1032 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1033 * the device specification manual) upon its creation. Then upon doorbell,
1034 * hardware fetches the object context for which the doorbell was rang, and
1035 * validates that the UAR through which the DB was rang matches the UAR ID
1036 * of the object.
1037 * If no match the doorbell is silently ignored by the hardware. Of course,
1038 * the user cannot ring a doorbell on a UAR that was not mapped to it.
1039 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1040 * mailboxes (except tagging them with UID), we expose to the user its UAR
1041 * ID, so it can embed it in these objects in the expected specification
1042 * format. So the only thing the user can do is hurt itself by creating a
1043 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1044 * may ring a doorbell on its objects.
1045 * The consequence of that will be that another user can schedule a QP/SQ
1046 * of the buggy user for execution (just insert it to the hardware schedule
1047 * queue or arm its CQ for event generation), no further harm is expected.
1048 */
1049static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1050	struct uverbs_attr_bundle *attrs)
1051{
1052	struct mlx5_ib_ucontext *c;
1053	struct mlx5_ib_dev *dev;
1054	u32 user_idx;
1055	s32 dev_idx;
1056
1057	c = devx_ufile2uctx(attrs);
1058	if (IS_ERR(c))
1059		return PTR_ERR(c);
1060	dev = to_mdev(c->ibucontext.device);
1061
1062	if (uverbs_copy_from(&user_idx, attrs,
1063			     MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1064		return -EFAULT;
1065
1066	dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1067	if (dev_idx < 0)
1068		return dev_idx;
1069
1070	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1071			   &dev_idx, sizeof(dev_idx)))
1072		return -EFAULT;
1073
1074	return 0;
1075}
1076
1077static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1078	struct uverbs_attr_bundle *attrs)
1079{
1080	struct mlx5_ib_ucontext *c;
1081	struct mlx5_ib_dev *dev;
1082	void *cmd_in = uverbs_attr_get_alloced_ptr(
1083		attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1084	int cmd_out_len = uverbs_attr_get_len(attrs,
1085					MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1086	void *cmd_out;
1087	int err, err2;
1088	int uid;
1089
1090	c = devx_ufile2uctx(attrs);
1091	if (IS_ERR(c))
1092		return PTR_ERR(c);
1093	dev = to_mdev(c->ibucontext.device);
1094
1095	uid = devx_get_uid(c, cmd_in);
1096	if (uid < 0)
1097		return uid;
1098
1099	/* Only white list of some general HCA commands are allowed for this method. */
1100	if (!devx_is_general_cmd(cmd_in, dev))
1101		return -EINVAL;
1102
1103	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1104	if (IS_ERR(cmd_out))
1105		return PTR_ERR(cmd_out);
1106
1107	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1108	err = mlx5_cmd_do(dev->mdev, cmd_in,
1109			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1110			  cmd_out, cmd_out_len);
1111	if (err && err != -EREMOTEIO)
1112		return err;
1113
1114	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1115			      cmd_out_len);
1116
1117	return err2 ?: err;
1118}
1119
1120static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1121				       u32 *dinlen,
1122				       u32 *obj_id)
1123{
1124	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
1125	u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1126
1127	*obj_id = devx_get_created_obj_id(in, out, opcode);
1128	*dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1129	MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1130
1131	switch (opcode) {
1132	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1133		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1134		MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1135		MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
1136			 MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
1137		break;
1138
1139	case MLX5_CMD_OP_CREATE_UMEM:
1140		MLX5_SET(destroy_umem_in, din, opcode,
1141			 MLX5_CMD_OP_DESTROY_UMEM);
1142		MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
1143		break;
1144	case MLX5_CMD_OP_CREATE_MKEY:
1145		MLX5_SET(destroy_mkey_in, din, opcode,
1146			 MLX5_CMD_OP_DESTROY_MKEY);
1147		MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
1148		break;
1149	case MLX5_CMD_OP_CREATE_CQ:
1150		MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1151		MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
1152		break;
1153	case MLX5_CMD_OP_ALLOC_PD:
1154		MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1155		MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
1156		break;
1157	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1158		MLX5_SET(dealloc_transport_domain_in, din, opcode,
1159			 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1160		MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
1161			 *obj_id);
1162		break;
1163	case MLX5_CMD_OP_CREATE_RMP:
1164		MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1165		MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
1166		break;
1167	case MLX5_CMD_OP_CREATE_SQ:
1168		MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1169		MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
1170		break;
1171	case MLX5_CMD_OP_CREATE_RQ:
1172		MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1173		MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
1174		break;
1175	case MLX5_CMD_OP_CREATE_RQT:
1176		MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1177		MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
1178		break;
1179	case MLX5_CMD_OP_CREATE_TIR:
1180		MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1181		MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1182		break;
1183	case MLX5_CMD_OP_CREATE_TIS:
1184		MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1185		MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
1186		break;
1187	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1188		MLX5_SET(dealloc_q_counter_in, din, opcode,
1189			 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1190		MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
1191		break;
1192	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1193		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1194		MLX5_SET(destroy_flow_table_in, din, other_vport,
1195			 MLX5_GET(create_flow_table_in,  in, other_vport));
1196		MLX5_SET(destroy_flow_table_in, din, vport_number,
1197			 MLX5_GET(create_flow_table_in,  in, vport_number));
1198		MLX5_SET(destroy_flow_table_in, din, table_type,
1199			 MLX5_GET(create_flow_table_in,  in, table_type));
1200		MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1201		MLX5_SET(destroy_flow_table_in, din, opcode,
1202			 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1203		break;
1204	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1205		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1206		MLX5_SET(destroy_flow_group_in, din, other_vport,
1207			 MLX5_GET(create_flow_group_in, in, other_vport));
1208		MLX5_SET(destroy_flow_group_in, din, vport_number,
1209			 MLX5_GET(create_flow_group_in, in, vport_number));
1210		MLX5_SET(destroy_flow_group_in, din, table_type,
1211			 MLX5_GET(create_flow_group_in, in, table_type));
1212		MLX5_SET(destroy_flow_group_in, din, table_id,
1213			 MLX5_GET(create_flow_group_in, in, table_id));
1214		MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1215		MLX5_SET(destroy_flow_group_in, din, opcode,
1216			 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1217		break;
1218	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1219		*dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1220		MLX5_SET(delete_fte_in, din, other_vport,
1221			 MLX5_GET(set_fte_in,  in, other_vport));
1222		MLX5_SET(delete_fte_in, din, vport_number,
1223			 MLX5_GET(set_fte_in, in, vport_number));
1224		MLX5_SET(delete_fte_in, din, table_type,
1225			 MLX5_GET(set_fte_in, in, table_type));
1226		MLX5_SET(delete_fte_in, din, table_id,
1227			 MLX5_GET(set_fte_in, in, table_id));
1228		MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1229		MLX5_SET(delete_fte_in, din, opcode,
1230			 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1231		break;
1232	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1233		MLX5_SET(dealloc_flow_counter_in, din, opcode,
1234			 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1235		MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
1236			 *obj_id);
1237		break;
1238	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1239		MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
1240			 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1241		MLX5_SET(dealloc_packet_reformat_context_in, din,
1242			 packet_reformat_id, *obj_id);
1243		break;
1244	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1245		MLX5_SET(dealloc_modify_header_context_in, din, opcode,
1246			 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1247		MLX5_SET(dealloc_modify_header_context_in, din,
1248			 modify_header_id, *obj_id);
1249		break;
1250	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1251		*dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1252		MLX5_SET(destroy_scheduling_element_in, din,
1253			 scheduling_hierarchy,
1254			 MLX5_GET(create_scheduling_element_in, in,
1255				  scheduling_hierarchy));
1256		MLX5_SET(destroy_scheduling_element_in, din,
1257			 scheduling_element_id, *obj_id);
1258		MLX5_SET(destroy_scheduling_element_in, din, opcode,
1259			 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1260		break;
1261	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1262		*dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1263		MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1264		MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
1265			 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1266		break;
1267	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1268		*dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1269		MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1270		MLX5_SET(delete_l2_table_entry_in, din, opcode,
1271			 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1272		break;
1273	case MLX5_CMD_OP_CREATE_QP:
1274		MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1275		MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
1276		break;
1277	case MLX5_CMD_OP_CREATE_SRQ:
1278		MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1279		MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
1280		break;
1281	case MLX5_CMD_OP_CREATE_XRC_SRQ:
1282		MLX5_SET(destroy_xrc_srq_in, din, opcode,
1283			 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1284		MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
1285		break;
1286	case MLX5_CMD_OP_CREATE_DCT:
1287		MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1288		MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
1289		break;
1290	case MLX5_CMD_OP_CREATE_XRQ:
1291		MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1292		MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
1293		break;
1294	case MLX5_CMD_OP_ATTACH_TO_MCG:
1295		*dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1296		MLX5_SET(detach_from_mcg_in, din, qpn,
1297			 MLX5_GET(attach_to_mcg_in, in, qpn));
1298		memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1299		       MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1300		       MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1301		MLX5_SET(detach_from_mcg_in, din, opcode,
1302			 MLX5_CMD_OP_DETACH_FROM_MCG);
1303		MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
1304		break;
1305	case MLX5_CMD_OP_ALLOC_XRCD:
1306		MLX5_SET(dealloc_xrcd_in, din, opcode,
1307			 MLX5_CMD_OP_DEALLOC_XRCD);
1308		MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
1309		break;
1310	case MLX5_CMD_OP_CREATE_PSV:
1311		MLX5_SET(destroy_psv_in, din, opcode,
1312			 MLX5_CMD_OP_DESTROY_PSV);
1313		MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
1314		break;
1315	default:
1316		/* The entry must match to one of the devx_is_obj_create_cmd */
1317		WARN_ON(true);
1318		break;
1319	}
1320}
1321
1322static int devx_handle_mkey_indirect(struct devx_obj *obj,
1323				     struct mlx5_ib_dev *dev,
1324				     void *in, void *out)
1325{
1326	struct mlx5_ib_mkey *mkey = &obj->mkey;
1327	void *mkc;
1328	u8 key;
1329
1330	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1331	key = MLX5_GET(mkc, mkc, mkey_7_0);
1332	mkey->key = mlx5_idx_to_mkey(
1333			MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1334	mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1335	mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1336	init_waitqueue_head(&mkey->wait);
1337
1338	return mlx5r_store_odp_mkey(dev, mkey);
1339}
1340
1341static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1342				   struct devx_obj *obj,
1343				   void *in, int in_len)
1344{
1345	int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1346			MLX5_FLD_SZ_BYTES(create_mkey_in,
1347			memory_key_mkey_entry);
1348	void *mkc;
1349	u8 access_mode;
1350
1351	if (in_len < min_len)
1352		return -EINVAL;
1353
1354	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1355
1356	access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1357	access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1358
1359	if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1360		access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1361		if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1362			obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1363		return 0;
1364	}
1365
1366	MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1367	return 0;
1368}
1369
1370static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1371				      struct devx_event_subscription *sub)
1372{
1373	struct devx_event *event;
1374	struct devx_obj_event *xa_val_level2;
1375
1376	if (sub->is_cleaned)
1377		return;
1378
1379	sub->is_cleaned = 1;
1380	list_del_rcu(&sub->xa_list);
1381
1382	if (list_empty(&sub->obj_list))
1383		return;
1384
1385	list_del_rcu(&sub->obj_list);
1386	/* check whether key level 1 for this obj_sub_list is empty */
1387	event = xa_load(&dev->devx_event_table.event_xa,
1388			sub->xa_key_level1);
1389	WARN_ON(!event);
1390
1391	xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1392	if (list_empty(&xa_val_level2->obj_sub_list)) {
1393		xa_erase(&event->object_ids,
1394			 sub->xa_key_level2);
1395		kfree_rcu(xa_val_level2, rcu);
1396	}
1397}
1398
1399static int devx_obj_cleanup(struct ib_uobject *uobject,
1400			    enum rdma_remove_reason why,
1401			    struct uverbs_attr_bundle *attrs)
1402{
1403	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1404	struct mlx5_devx_event_table *devx_event_table;
1405	struct devx_obj *obj = uobject->object;
1406	struct devx_event_subscription *sub_entry, *tmp;
1407	struct mlx5_ib_dev *dev;
1408	int ret;
1409
1410	dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1411	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
1412	    xa_erase(&obj->ib_dev->odp_mkeys,
1413		     mlx5_base_mkey(obj->mkey.key)))
1414		/*
1415		 * The pagefault_single_data_segment() does commands against
1416		 * the mmkey, we must wait for that to stop before freeing the
1417		 * mkey, as another allocation could get the same mkey #.
1418		 */
1419		mlx5r_deref_wait_odp_mkey(&obj->mkey);
1420
1421	if (obj->flags & DEVX_OBJ_FLAGS_HW_FREED)
1422		ret = 0;
1423	else if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1424		ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1425	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1426		ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1427	else
1428		ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1429				    obj->dinlen, out, sizeof(out));
1430	if (ret)
1431		return ret;
1432
1433	devx_event_table = &dev->devx_event_table;
1434
1435	mutex_lock(&devx_event_table->event_xa_lock);
1436	list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1437		devx_cleanup_subscription(dev, sub_entry);
1438	mutex_unlock(&devx_event_table->event_xa_lock);
1439
1440	kfree(obj);
1441	return ret;
1442}
1443
1444static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1445{
1446	struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1447	struct mlx5_devx_event_table *table;
1448	struct devx_event *event;
1449	struct devx_obj_event *obj_event;
1450	u32 obj_id = mcq->cqn;
1451
1452	table = &obj->ib_dev->devx_event_table;
1453	rcu_read_lock();
1454	event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1455	if (!event)
1456		goto out;
1457
1458	obj_event = xa_load(&event->object_ids, obj_id);
1459	if (!obj_event)
1460		goto out;
1461
1462	dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1463out:
1464	rcu_read_unlock();
1465}
1466
1467static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
1468{
1469	if (!MLX5_CAP_GEN(dev->mdev, apu) ||
1470	    !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
1471		return false;
1472
1473	return true;
1474}
1475
1476static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1477	struct uverbs_attr_bundle *attrs)
1478{
1479	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1480	int cmd_out_len =  uverbs_attr_get_len(attrs,
1481					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1482	int cmd_in_len = uverbs_attr_get_len(attrs,
1483					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1484	void *cmd_out;
1485	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1486		attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1487	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1488		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1489	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1490	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1491	struct devx_obj *obj;
1492	u16 obj_type = 0;
1493	int err, err2 = 0;
1494	int uid;
1495	u32 obj_id;
1496	u16 opcode;
1497
1498	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1499		return -EINVAL;
1500
1501	uid = devx_get_uid(c, cmd_in);
1502	if (uid < 0)
1503		return uid;
1504
1505	if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1506		return -EINVAL;
1507
1508	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1509	if (IS_ERR(cmd_out))
1510		return PTR_ERR(cmd_out);
1511
1512	obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1513	if (!obj)
1514		return -ENOMEM;
1515
1516	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1517	if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1518		err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1519		if (err)
1520			goto obj_free;
1521	} else {
1522		devx_set_umem_valid(cmd_in);
1523	}
1524
1525	if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1526		obj->flags |= DEVX_OBJ_FLAGS_DCT;
1527		err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
1528					   cmd_in_len, cmd_out, cmd_out_len);
1529	} else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
1530		   !is_apu_cq(dev, cmd_in)) {
1531		obj->flags |= DEVX_OBJ_FLAGS_CQ;
1532		obj->core_cq.comp = devx_cq_comp;
1533		err = mlx5_create_cq(dev->mdev, &obj->core_cq,
1534				     cmd_in, cmd_in_len, cmd_out,
1535				     cmd_out_len);
1536	} else {
1537		err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
1538				  cmd_out, cmd_out_len);
1539	}
1540
1541	if (err == -EREMOTEIO)
1542		err2 = uverbs_copy_to(attrs,
1543				      MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1544				      cmd_out, cmd_out_len);
1545	if (err)
1546		goto obj_free;
1547
1548	if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1549		u32 bulk = MLX5_GET(alloc_flow_counter_in,
1550				    cmd_in,
1551				    flow_counter_bulk_log_size);
1552
1553		if (bulk)
1554			bulk = 1 << bulk;
1555		else
1556			bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
1557						cmd_in,
1558						flow_counter_bulk);
1559		obj->flow_counter_bulk_size = bulk;
1560	}
1561
1562	uobj->object = obj;
1563	INIT_LIST_HEAD(&obj->event_sub);
1564	obj->ib_dev = dev;
1565	devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1566				   &obj_id);
1567	WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1568
1569	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1570	if (err)
1571		goto obj_destroy;
1572
1573	if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1574		obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1575	obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1576
1577	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1578		err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1579		if (err)
1580			goto obj_destroy;
1581	}
1582	return 0;
1583
1584obj_destroy:
1585	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1586		mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1587	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1588		mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1589	else
1590		mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1591			      sizeof(out));
1592obj_free:
1593	kfree(obj);
1594	return err2 ?: err;
1595}
1596
1597static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1598	struct uverbs_attr_bundle *attrs)
1599{
1600	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1601	int cmd_out_len = uverbs_attr_get_len(attrs,
1602					MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1603	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1604							  MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1605	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1606		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1607	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1608	void *cmd_out;
1609	int err, err2;
1610	int uid;
1611
1612	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1613		return -EINVAL;
1614
1615	uid = devx_get_uid(c, cmd_in);
1616	if (uid < 0)
1617		return uid;
1618
1619	if (!devx_is_obj_modify_cmd(cmd_in))
1620		return -EINVAL;
1621
1622	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1623		return -EINVAL;
1624
1625	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1626	if (IS_ERR(cmd_out))
1627		return PTR_ERR(cmd_out);
1628
1629	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1630	devx_set_umem_valid(cmd_in);
1631
1632	err = mlx5_cmd_do(mdev->mdev, cmd_in,
1633			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1634			  cmd_out, cmd_out_len);
1635	if (err && err != -EREMOTEIO)
1636		return err;
1637
1638	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1639			      cmd_out, cmd_out_len);
1640
1641	return err2 ?: err;
1642}
1643
1644static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1645	struct uverbs_attr_bundle *attrs)
1646{
1647	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1648	int cmd_out_len = uverbs_attr_get_len(attrs,
1649					      MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1650	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1651							  MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1652	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1653		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1654	void *cmd_out;
1655	int err, err2;
1656	int uid;
1657	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1658
1659	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1660		return -EINVAL;
1661
1662	uid = devx_get_uid(c, cmd_in);
1663	if (uid < 0)
1664		return uid;
1665
1666	if (!devx_is_obj_query_cmd(cmd_in))
1667		return -EINVAL;
1668
1669	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1670		return -EINVAL;
1671
1672	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1673	if (IS_ERR(cmd_out))
1674		return PTR_ERR(cmd_out);
1675
1676	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1677	err = mlx5_cmd_do(mdev->mdev, cmd_in,
1678			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1679			  cmd_out, cmd_out_len);
1680	if (err && err != -EREMOTEIO)
1681		return err;
1682
1683	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1684			      cmd_out, cmd_out_len);
1685
1686	return err2 ?: err;
1687}
1688
1689struct devx_async_event_queue {
1690	spinlock_t		lock;
1691	wait_queue_head_t	poll_wait;
1692	struct list_head	event_list;
1693	atomic_t		bytes_in_use;
1694	u8			is_destroyed:1;
1695};
1696
1697struct devx_async_cmd_event_file {
1698	struct ib_uobject		uobj;
1699	struct devx_async_event_queue	ev_queue;
1700	struct mlx5_async_ctx		async_ctx;
1701};
1702
1703static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1704{
1705	spin_lock_init(&ev_queue->lock);
1706	INIT_LIST_HEAD(&ev_queue->event_list);
1707	init_waitqueue_head(&ev_queue->poll_wait);
1708	atomic_set(&ev_queue->bytes_in_use, 0);
1709	ev_queue->is_destroyed = 0;
1710}
1711
1712static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1713	struct uverbs_attr_bundle *attrs)
1714{
1715	struct devx_async_cmd_event_file *ev_file;
1716
1717	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1718		attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1719	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1720
1721	ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1722			       uobj);
1723	devx_init_event_queue(&ev_file->ev_queue);
1724	mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1725	return 0;
1726}
1727
1728static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1729	struct uverbs_attr_bundle *attrs)
1730{
1731	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1732		attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1733	struct devx_async_event_file *ev_file;
1734	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1735		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1736	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1737	u32 flags;
1738	int err;
1739
1740	err = uverbs_get_flags32(&flags, attrs,
1741		MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1742		MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1743
1744	if (err)
1745		return err;
1746
1747	ev_file = container_of(uobj, struct devx_async_event_file,
1748			       uobj);
1749	spin_lock_init(&ev_file->lock);
1750	INIT_LIST_HEAD(&ev_file->event_list);
1751	init_waitqueue_head(&ev_file->poll_wait);
1752	if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1753		ev_file->omit_data = 1;
1754	INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1755	ev_file->dev = dev;
1756	get_device(&dev->ib_dev.dev);
1757	return 0;
1758}
1759
1760static void devx_query_callback(int status, struct mlx5_async_work *context)
1761{
1762	struct devx_async_data *async_data =
1763		container_of(context, struct devx_async_data, cb_work);
1764	struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1765	struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1766	unsigned long flags;
1767
1768	/*
1769	 * Note that if the struct devx_async_cmd_event_file uobj begins to be
1770	 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1771	 * routine returns, ensuring that it always remains valid here.
1772	 */
1773	spin_lock_irqsave(&ev_queue->lock, flags);
1774	list_add_tail(&async_data->list, &ev_queue->event_list);
1775	spin_unlock_irqrestore(&ev_queue->lock, flags);
1776
1777	wake_up_interruptible(&ev_queue->poll_wait);
1778}
1779
1780#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1781
1782static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1783	struct uverbs_attr_bundle *attrs)
1784{
1785	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1786				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1787	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1788				attrs,
1789				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1790	u16 cmd_out_len;
1791	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1792		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1793	struct ib_uobject *fd_uobj;
1794	int err;
1795	int uid;
1796	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1797	struct devx_async_cmd_event_file *ev_file;
1798	struct devx_async_data *async_data;
1799
1800	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1801		return -EINVAL;
1802
1803	uid = devx_get_uid(c, cmd_in);
1804	if (uid < 0)
1805		return uid;
1806
1807	if (!devx_is_obj_query_cmd(cmd_in))
1808		return -EINVAL;
1809
1810	err = uverbs_get_const(&cmd_out_len, attrs,
1811			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1812	if (err)
1813		return err;
1814
1815	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1816		return -EINVAL;
1817
1818	fd_uobj = uverbs_attr_get_uobject(attrs,
1819				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1820	if (IS_ERR(fd_uobj))
1821		return PTR_ERR(fd_uobj);
1822
1823	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1824			       uobj);
1825
1826	if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1827			MAX_ASYNC_BYTES_IN_USE) {
1828		atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1829		return -EAGAIN;
1830	}
1831
1832	async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1833					  cmd_out_len), GFP_KERNEL);
1834	if (!async_data) {
1835		err = -ENOMEM;
1836		goto sub_bytes;
1837	}
1838
1839	err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1840			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1841	if (err)
1842		goto free_async;
1843
1844	async_data->cmd_out_len = cmd_out_len;
1845	async_data->mdev = mdev;
1846	async_data->ev_file = ev_file;
1847
1848	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1849	err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1850		    uverbs_attr_get_len(attrs,
1851				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1852		    async_data->hdr.out_data,
1853		    async_data->cmd_out_len,
1854		    devx_query_callback, &async_data->cb_work);
1855
1856	if (err)
1857		goto free_async;
1858
1859	return 0;
1860
1861free_async:
1862	kvfree(async_data);
1863sub_bytes:
1864	atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1865	return err;
1866}
1867
1868static void
1869subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1870			   u32 key_level1,
1871			   bool is_level2,
1872			   u32 key_level2)
1873{
1874	struct devx_event *event;
1875	struct devx_obj_event *xa_val_level2;
1876
1877	/* Level 1 is valid for future use, no need to free */
1878	if (!is_level2)
1879		return;
1880
1881	event = xa_load(&devx_event_table->event_xa, key_level1);
1882	WARN_ON(!event);
1883
1884	xa_val_level2 = xa_load(&event->object_ids,
1885				key_level2);
1886	if (list_empty(&xa_val_level2->obj_sub_list)) {
1887		xa_erase(&event->object_ids,
1888			 key_level2);
1889		kfree_rcu(xa_val_level2, rcu);
1890	}
1891}
1892
1893static int
1894subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1895			 u32 key_level1,
1896			 bool is_level2,
1897			 u32 key_level2)
1898{
1899	struct devx_obj_event *obj_event;
1900	struct devx_event *event;
1901	int err;
1902
1903	event = xa_load(&devx_event_table->event_xa, key_level1);
1904	if (!event) {
1905		event = kzalloc(sizeof(*event), GFP_KERNEL);
1906		if (!event)
1907			return -ENOMEM;
1908
1909		INIT_LIST_HEAD(&event->unaffiliated_list);
1910		xa_init(&event->object_ids);
1911
1912		err = xa_insert(&devx_event_table->event_xa,
1913				key_level1,
1914				event,
1915				GFP_KERNEL);
1916		if (err) {
1917			kfree(event);
1918			return err;
1919		}
1920	}
1921
1922	if (!is_level2)
1923		return 0;
1924
1925	obj_event = xa_load(&event->object_ids, key_level2);
1926	if (!obj_event) {
1927		obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1928		if (!obj_event)
1929			/* Level1 is valid for future use, no need to free */
1930			return -ENOMEM;
1931
1932		err = xa_insert(&event->object_ids,
1933				key_level2,
1934				obj_event,
1935				GFP_KERNEL);
1936		if (err) {
1937			kfree(obj_event);
1938			return err;
1939		}
1940		INIT_LIST_HEAD(&obj_event->obj_sub_list);
1941	}
1942
1943	return 0;
1944}
1945
1946static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1947				   struct devx_obj *obj)
1948{
1949	int i;
1950
1951	for (i = 0; i < num_events; i++) {
1952		if (obj) {
1953			if (!is_legacy_obj_event_num(event_type_num_list[i]))
1954				return false;
1955		} else if (!is_legacy_unaffiliated_event_num(
1956				event_type_num_list[i])) {
1957			return false;
1958		}
1959	}
1960
1961	return true;
1962}
1963
1964#define MAX_SUPP_EVENT_NUM 255
1965static bool is_valid_events(struct mlx5_core_dev *dev,
1966			    int num_events, u16 *event_type_num_list,
1967			    struct devx_obj *obj)
1968{
1969	__be64 *aff_events;
1970	__be64 *unaff_events;
1971	int mask_entry;
1972	int mask_bit;
1973	int i;
1974
1975	if (MLX5_CAP_GEN(dev, event_cap)) {
1976		aff_events = MLX5_CAP_DEV_EVENT(dev,
1977						user_affiliated_events);
1978		unaff_events = MLX5_CAP_DEV_EVENT(dev,
1979						  user_unaffiliated_events);
1980	} else {
1981		return is_valid_events_legacy(num_events, event_type_num_list,
1982					      obj);
1983	}
1984
1985	for (i = 0; i < num_events; i++) {
1986		if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1987			return false;
1988
1989		mask_entry = event_type_num_list[i] / 64;
1990		mask_bit = event_type_num_list[i] % 64;
1991
1992		if (obj) {
1993			/* CQ completion */
1994			if (event_type_num_list[i] == 0)
1995				continue;
1996
1997			if (!(be64_to_cpu(aff_events[mask_entry]) &
1998					(1ull << mask_bit)))
1999				return false;
2000
2001			continue;
2002		}
2003
2004		if (!(be64_to_cpu(unaff_events[mask_entry]) &
2005				(1ull << mask_bit)))
2006			return false;
2007	}
2008
2009	return true;
2010}
2011
2012#define MAX_NUM_EVENTS 16
2013static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
2014	struct uverbs_attr_bundle *attrs)
2015{
2016	struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
2017				attrs,
2018				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
2019	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2020		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2021	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2022	struct ib_uobject *fd_uobj;
2023	struct devx_obj *obj = NULL;
2024	struct devx_async_event_file *ev_file;
2025	struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
2026	u16 *event_type_num_list;
2027	struct devx_event_subscription *event_sub, *tmp_sub;
2028	struct list_head sub_list;
2029	int redirect_fd;
2030	bool use_eventfd = false;
2031	int num_events;
2032	u16 obj_type = 0;
2033	u64 cookie = 0;
2034	u32 obj_id = 0;
2035	int err;
2036	int i;
2037
2038	if (!c->devx_uid)
2039		return -EINVAL;
2040
2041	if (!IS_ERR(devx_uobj)) {
2042		obj = (struct devx_obj *)devx_uobj->object;
2043		if (obj)
2044			obj_id = get_dec_obj_id(obj->obj_id);
2045	}
2046
2047	fd_uobj = uverbs_attr_get_uobject(attrs,
2048				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
2049	if (IS_ERR(fd_uobj))
2050		return PTR_ERR(fd_uobj);
2051
2052	ev_file = container_of(fd_uobj, struct devx_async_event_file,
2053			       uobj);
2054
2055	if (uverbs_attr_is_valid(attrs,
2056				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
2057		err = uverbs_copy_from(&redirect_fd, attrs,
2058			       MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
2059		if (err)
2060			return err;
2061
2062		use_eventfd = true;
2063	}
2064
2065	if (uverbs_attr_is_valid(attrs,
2066				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
2067		if (use_eventfd)
2068			return -EINVAL;
2069
2070		err = uverbs_copy_from(&cookie, attrs,
2071				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
2072		if (err)
2073			return err;
2074	}
2075
2076	num_events = uverbs_attr_ptr_get_array_size(
2077		attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2078		sizeof(u16));
2079
2080	if (num_events < 0)
2081		return num_events;
2082
2083	if (num_events > MAX_NUM_EVENTS)
2084		return -EINVAL;
2085
2086	event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2087			MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2088
2089	if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2090		return -EINVAL;
2091
2092	INIT_LIST_HEAD(&sub_list);
2093
2094	/* Protect from concurrent subscriptions to same XA entries to allow
2095	 * both to succeed
2096	 */
2097	mutex_lock(&devx_event_table->event_xa_lock);
2098	for (i = 0; i < num_events; i++) {
2099		u32 key_level1;
2100
2101		if (obj)
2102			obj_type = get_dec_obj_type(obj,
2103						    event_type_num_list[i]);
2104		key_level1 = event_type_num_list[i] | obj_type << 16;
2105
2106		err = subscribe_event_xa_alloc(devx_event_table,
2107					       key_level1,
2108					       obj,
2109					       obj_id);
2110		if (err)
2111			goto err;
2112
2113		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2114		if (!event_sub) {
2115			err = -ENOMEM;
2116			goto err;
2117		}
2118
2119		list_add_tail(&event_sub->event_list, &sub_list);
2120		uverbs_uobject_get(&ev_file->uobj);
2121		if (use_eventfd) {
2122			event_sub->eventfd =
2123				eventfd_ctx_fdget(redirect_fd);
2124
2125			if (IS_ERR(event_sub->eventfd)) {
2126				err = PTR_ERR(event_sub->eventfd);
2127				event_sub->eventfd = NULL;
2128				goto err;
2129			}
2130		}
2131
2132		event_sub->cookie = cookie;
2133		event_sub->ev_file = ev_file;
2134		/* May be needed upon cleanup the devx object/subscription */
2135		event_sub->xa_key_level1 = key_level1;
2136		event_sub->xa_key_level2 = obj_id;
2137		INIT_LIST_HEAD(&event_sub->obj_list);
2138	}
2139
2140	/* Once all the allocations and the XA data insertions were done we
2141	 * can go ahead and add all the subscriptions to the relevant lists
2142	 * without concern of a failure.
2143	 */
2144	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2145		struct devx_event *event;
2146		struct devx_obj_event *obj_event;
2147
2148		list_del_init(&event_sub->event_list);
2149
2150		spin_lock_irq(&ev_file->lock);
2151		list_add_tail_rcu(&event_sub->file_list,
2152				  &ev_file->subscribed_events_list);
2153		spin_unlock_irq(&ev_file->lock);
2154
2155		event = xa_load(&devx_event_table->event_xa,
2156				event_sub->xa_key_level1);
2157		WARN_ON(!event);
2158
2159		if (!obj) {
2160			list_add_tail_rcu(&event_sub->xa_list,
2161					  &event->unaffiliated_list);
2162			continue;
2163		}
2164
2165		obj_event = xa_load(&event->object_ids, obj_id);
2166		WARN_ON(!obj_event);
2167		list_add_tail_rcu(&event_sub->xa_list,
2168				  &obj_event->obj_sub_list);
2169		list_add_tail_rcu(&event_sub->obj_list,
2170				  &obj->event_sub);
2171	}
2172
2173	mutex_unlock(&devx_event_table->event_xa_lock);
2174	return 0;
2175
2176err:
2177	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2178		list_del(&event_sub->event_list);
2179
2180		subscribe_event_xa_dealloc(devx_event_table,
2181					   event_sub->xa_key_level1,
2182					   obj,
2183					   obj_id);
2184
2185		if (event_sub->eventfd)
2186			eventfd_ctx_put(event_sub->eventfd);
2187		uverbs_uobject_put(&event_sub->ev_file->uobj);
2188		kfree(event_sub);
2189	}
2190
2191	mutex_unlock(&devx_event_table->event_xa_lock);
2192	return err;
2193}
2194
2195static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2196			 struct uverbs_attr_bundle *attrs,
2197			 struct devx_umem *obj, u32 access_flags)
2198{
2199	u64 addr;
2200	size_t size;
2201	int err;
2202
2203	if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2204	    uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2205		return -EFAULT;
2206
2207	err = ib_check_mr_access(&dev->ib_dev, access_flags);
2208	if (err)
2209		return err;
2210
2211	if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
2212		struct ib_umem_dmabuf *umem_dmabuf;
2213		int dmabuf_fd;
2214
2215		err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
2216					MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
2217		if (err)
2218			return -EFAULT;
2219
2220		umem_dmabuf = ib_umem_dmabuf_get_pinned(
2221			&dev->ib_dev, addr, size, dmabuf_fd, access_flags);
2222		if (IS_ERR(umem_dmabuf))
2223			return PTR_ERR(umem_dmabuf);
2224		obj->umem = &umem_dmabuf->umem;
2225	} else {
2226		obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
2227		if (IS_ERR(obj->umem))
2228			return PTR_ERR(obj->umem);
2229	}
2230	return 0;
2231}
2232
2233static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
2234					       unsigned long pgsz_bitmap)
2235{
2236	unsigned long page_size;
2237
2238	/* Don't bother checking larger page sizes as offset must be zero and
2239	 * total DEVX umem length must be equal to total umem length.
2240	 */
2241	pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
2242					 PAGE_SHIFT),
2243				   MLX5_ADAPTER_PAGE_SHIFT);
2244	if (!pgsz_bitmap)
2245		return 0;
2246
2247	page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
2248	if (!page_size)
2249		return 0;
2250
2251	/* If the page_size is less than the CPU page size then we can use the
2252	 * offset and create a umem which is a subset of the page list.
2253	 * For larger page sizes we can't be sure the DMA  list reflects the
2254	 * VA so we must ensure that the umem extent is exactly equal to the
2255	 * page list. Reduce the page size until one of these cases is true.
2256	 */
2257	while ((ib_umem_dma_offset(umem, page_size) != 0 ||
2258		(umem->length % page_size) != 0) &&
2259		page_size > PAGE_SIZE)
2260		page_size /= 2;
2261
2262	return page_size;
2263}
2264
2265static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
2266				   struct uverbs_attr_bundle *attrs,
2267				   struct devx_umem *obj,
2268				   struct devx_umem_reg_cmd *cmd,
2269				   int access)
2270{
2271	unsigned long pgsz_bitmap;
2272	unsigned int page_size;
2273	__be64 *mtt;
2274	void *umem;
2275	int ret;
2276
2277	/*
2278	 * If the user does not pass in pgsz_bitmap then the user promises not
2279	 * to use umem_offset!=0 in any commands that allocate on top of the
2280	 * umem.
2281	 *
2282	 * If the user wants to use a umem_offset then it must pass in
2283	 * pgsz_bitmap which guides the maximum page size and thus maximum
2284	 * object alignment inside the umem. See the PRM.
2285	 *
2286	 * Users are not allowed to use IOVA here, mkeys are not supported on
2287	 * umem.
2288	 */
2289	ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
2290			MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2291			GENMASK_ULL(63,
2292				    min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
2293	if (ret)
2294		return ret;
2295
2296	page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
2297	if (!page_size)
2298		return -EINVAL;
2299
2300	cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2301		     (MLX5_ST_SZ_BYTES(mtt) *
2302		      ib_umem_num_dma_blocks(obj->umem, page_size));
2303	cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2304	if (IS_ERR(cmd->in))
2305		return PTR_ERR(cmd->in);
2306
2307	umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2308	mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2309
2310	MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2311	MLX5_SET64(umem, umem, num_of_mtt,
2312		   ib_umem_num_dma_blocks(obj->umem, page_size));
2313	MLX5_SET(umem, umem, log_page_size,
2314		 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
2315	MLX5_SET(umem, umem, page_offset,
2316		 ib_umem_dma_offset(obj->umem, page_size));
2317
2318	if (mlx5_umem_needs_ats(dev, obj->umem, access))
2319		MLX5_SET(umem, umem, ats, 1);
2320
2321	mlx5_ib_populate_pas(obj->umem, page_size, mtt,
2322			     (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2323				     MLX5_IB_MTT_READ);
2324	return 0;
2325}
2326
2327static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2328	struct uverbs_attr_bundle *attrs)
2329{
2330	struct devx_umem_reg_cmd cmd;
2331	struct devx_umem *obj;
2332	struct ib_uobject *uobj = uverbs_attr_get_uobject(
2333		attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2334	u32 obj_id;
2335	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2336		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2337	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2338	int access_flags;
2339	int err;
2340
2341	if (!c->devx_uid)
2342		return -EINVAL;
2343
2344	err = uverbs_get_flags32(&access_flags, attrs,
2345				 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2346				 IB_ACCESS_LOCAL_WRITE |
2347				 IB_ACCESS_REMOTE_WRITE |
2348				 IB_ACCESS_REMOTE_READ |
2349				 IB_ACCESS_RELAXED_ORDERING);
2350	if (err)
2351		return err;
2352
2353	obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2354	if (!obj)
2355		return -ENOMEM;
2356
2357	err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
2358	if (err)
2359		goto err_obj_free;
2360
2361	err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
2362	if (err)
2363		goto err_umem_release;
2364
2365	MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2366	err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2367			    sizeof(cmd.out));
2368	if (err)
2369		goto err_umem_release;
2370
2371	obj->mdev = dev->mdev;
2372	uobj->object = obj;
2373	devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2374	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2375
2376	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
2377			     sizeof(obj_id));
2378	return err;
2379
2380err_umem_release:
2381	ib_umem_release(obj->umem);
2382err_obj_free:
2383	kfree(obj);
2384	return err;
2385}
2386
2387static int devx_umem_cleanup(struct ib_uobject *uobject,
2388			     enum rdma_remove_reason why,
2389			     struct uverbs_attr_bundle *attrs)
2390{
2391	struct devx_umem *obj = uobject->object;
2392	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2393	int err;
2394
2395	err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2396	if (err)
2397		return err;
2398
2399	ib_umem_release(obj->umem);
2400	kfree(obj);
2401	return 0;
2402}
2403
2404static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2405				  unsigned long event_type)
2406{
2407	__be64 *unaff_events;
2408	int mask_entry;
2409	int mask_bit;
2410
2411	if (!MLX5_CAP_GEN(dev, event_cap))
2412		return is_legacy_unaffiliated_event_num(event_type);
2413
2414	unaff_events = MLX5_CAP_DEV_EVENT(dev,
2415					  user_unaffiliated_events);
2416	WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2417
2418	mask_entry = event_type / 64;
2419	mask_bit = event_type % 64;
2420
2421	if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2422		return false;
2423
2424	return true;
2425}
2426
2427static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2428{
2429	struct mlx5_eqe *eqe = data;
2430	u32 obj_id = 0;
2431
2432	switch (event_type) {
2433	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2434	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2435	case MLX5_EVENT_TYPE_PATH_MIG:
2436	case MLX5_EVENT_TYPE_COMM_EST:
2437	case MLX5_EVENT_TYPE_SQ_DRAINED:
2438	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2439	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2440	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2441	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2442	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2443		obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2444		break;
2445	case MLX5_EVENT_TYPE_XRQ_ERROR:
2446		obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2447		break;
2448	case MLX5_EVENT_TYPE_DCT_DRAINED:
2449	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2450		obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2451		break;
2452	case MLX5_EVENT_TYPE_CQ_ERROR:
2453		obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2454		break;
2455	default:
2456		obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2457		break;
2458	}
2459
2460	return obj_id;
2461}
2462
2463static int deliver_event(struct devx_event_subscription *event_sub,
2464			 const void *data)
2465{
2466	struct devx_async_event_file *ev_file;
2467	struct devx_async_event_data *event_data;
2468	unsigned long flags;
2469
2470	ev_file = event_sub->ev_file;
2471
2472	if (ev_file->omit_data) {
2473		spin_lock_irqsave(&ev_file->lock, flags);
2474		if (!list_empty(&event_sub->event_list) ||
2475		    ev_file->is_destroyed) {
2476			spin_unlock_irqrestore(&ev_file->lock, flags);
2477			return 0;
2478		}
2479
2480		list_add_tail(&event_sub->event_list, &ev_file->event_list);
2481		spin_unlock_irqrestore(&ev_file->lock, flags);
2482		wake_up_interruptible(&ev_file->poll_wait);
2483		return 0;
2484	}
2485
2486	event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2487			     GFP_ATOMIC);
2488	if (!event_data) {
2489		spin_lock_irqsave(&ev_file->lock, flags);
2490		ev_file->is_overflow_err = 1;
2491		spin_unlock_irqrestore(&ev_file->lock, flags);
2492		return -ENOMEM;
2493	}
2494
2495	event_data->hdr.cookie = event_sub->cookie;
2496	memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2497
2498	spin_lock_irqsave(&ev_file->lock, flags);
2499	if (!ev_file->is_destroyed)
2500		list_add_tail(&event_data->list, &ev_file->event_list);
2501	else
2502		kfree(event_data);
2503	spin_unlock_irqrestore(&ev_file->lock, flags);
2504	wake_up_interruptible(&ev_file->poll_wait);
2505
2506	return 0;
2507}
2508
2509static void dispatch_event_fd(struct list_head *fd_list,
2510			      const void *data)
2511{
2512	struct devx_event_subscription *item;
2513
2514	list_for_each_entry_rcu(item, fd_list, xa_list) {
2515		if (item->eventfd)
2516			eventfd_signal(item->eventfd);
2517		else
2518			deliver_event(item, data);
2519	}
2520}
2521
2522static int devx_event_notifier(struct notifier_block *nb,
2523			       unsigned long event_type, void *data)
2524{
2525	struct mlx5_devx_event_table *table;
2526	struct mlx5_ib_dev *dev;
2527	struct devx_event *event;
2528	struct devx_obj_event *obj_event;
2529	u16 obj_type = 0;
2530	bool is_unaffiliated;
2531	u32 obj_id;
2532
2533	/* Explicit filtering to kernel events which may occur frequently */
2534	if (event_type == MLX5_EVENT_TYPE_CMD ||
2535	    event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2536		return NOTIFY_OK;
2537
2538	table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2539	dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2540	is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2541
2542	if (!is_unaffiliated)
2543		obj_type = get_event_obj_type(event_type, data);
2544
2545	rcu_read_lock();
2546	event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2547	if (!event) {
2548		rcu_read_unlock();
2549		return NOTIFY_DONE;
2550	}
2551
2552	if (is_unaffiliated) {
2553		dispatch_event_fd(&event->unaffiliated_list, data);
2554		rcu_read_unlock();
2555		return NOTIFY_OK;
2556	}
2557
2558	obj_id = devx_get_obj_id_from_event(event_type, data);
2559	obj_event = xa_load(&event->object_ids, obj_id);
2560	if (!obj_event) {
2561		rcu_read_unlock();
2562		return NOTIFY_DONE;
2563	}
2564
2565	dispatch_event_fd(&obj_event->obj_sub_list, data);
2566
2567	rcu_read_unlock();
2568	return NOTIFY_OK;
2569}
2570
2571int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
2572{
2573	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2574	int uid;
2575
2576	uid = mlx5_ib_devx_create(dev, false);
2577	if (uid > 0) {
2578		dev->devx_whitelist_uid = uid;
2579		xa_init(&table->event_xa);
2580		mutex_init(&table->event_xa_lock);
2581		MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2582		mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2583	}
2584
2585	return 0;
2586}
2587
2588void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
2589{
2590	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2591	struct devx_event_subscription *sub, *tmp;
2592	struct devx_event *event;
2593	void *entry;
2594	unsigned long id;
2595
2596	if (dev->devx_whitelist_uid) {
2597		mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2598		mutex_lock(&dev->devx_event_table.event_xa_lock);
2599		xa_for_each(&table->event_xa, id, entry) {
2600			event = entry;
2601			list_for_each_entry_safe(
2602				sub, tmp, &event->unaffiliated_list, xa_list)
2603				devx_cleanup_subscription(dev, sub);
2604			kfree(entry);
2605		}
2606		mutex_unlock(&dev->devx_event_table.event_xa_lock);
2607		xa_destroy(&table->event_xa);
2608
2609		mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
2610	}
2611}
2612
2613static void devx_async_destroy_cb(int status, struct mlx5_async_work *context)
2614{
2615	struct mlx5_async_cmd *devx_out = container_of(context,
2616					  struct mlx5_async_cmd, cb_work);
2617	struct devx_obj *obj = devx_out->uobject->object;
2618
2619	if (!status)
2620		obj->flags |= DEVX_OBJ_FLAGS_HW_FREED;
2621
2622	complete(&devx_out->comp);
2623}
2624
2625static void devx_async_destroy(struct mlx5_ib_dev *dev,
2626			       struct mlx5_async_cmd *cmd)
2627{
2628	init_completion(&cmd->comp);
2629	cmd->err = mlx5_cmd_exec_cb(&dev->async_ctx, cmd->in, cmd->in_size,
2630				    &cmd->out, sizeof(cmd->out),
2631				    devx_async_destroy_cb, &cmd->cb_work);
2632}
2633
2634static void devx_wait_async_destroy(struct mlx5_async_cmd *cmd)
2635{
2636	if (!cmd->err)
2637		wait_for_completion(&cmd->comp);
2638	atomic_set(&cmd->uobject->usecnt, 0);
2639}
2640
2641void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
2642{
2643	struct mlx5_async_cmd async_cmd[MAX_ASYNC_CMDS];
2644	struct ib_ucontext *ucontext = ufile->ucontext;
2645	struct ib_device *device = ucontext->device;
2646	struct mlx5_ib_dev *dev = to_mdev(device);
2647	struct ib_uobject *uobject;
2648	struct devx_obj *obj;
2649	int head = 0;
2650	int tail = 0;
2651
2652	list_for_each_entry(uobject, &ufile->uobjects, list) {
2653		WARN_ON(uverbs_try_lock_object(uobject, UVERBS_LOOKUP_WRITE));
2654
2655		/*
2656		 * Currently we only support QP destruction, if other objects
2657		 * are to be destroyed need to add type synchronization to the
2658		 * cleanup algorithm and handle pre/post FW cleanup for the
2659		 * new types if needed.
2660		 */
2661		if (uobj_get_object_id(uobject) != MLX5_IB_OBJECT_DEVX_OBJ ||
2662		    (get_dec_obj_type(uobject->object, MLX5_EVENT_TYPE_MAX) !=
2663		     MLX5_OBJ_TYPE_QP)) {
2664			atomic_set(&uobject->usecnt, 0);
2665			continue;
2666		}
2667
2668		obj = uobject->object;
2669
2670		async_cmd[tail % MAX_ASYNC_CMDS].in = obj->dinbox;
2671		async_cmd[tail % MAX_ASYNC_CMDS].in_size = obj->dinlen;
2672		async_cmd[tail % MAX_ASYNC_CMDS].uobject = uobject;
2673
2674		devx_async_destroy(dev, &async_cmd[tail % MAX_ASYNC_CMDS]);
2675		tail++;
2676
2677		if (tail - head == MAX_ASYNC_CMDS) {
2678			devx_wait_async_destroy(&async_cmd[head % MAX_ASYNC_CMDS]);
2679			head++;
2680		}
2681	}
2682
2683	while (head != tail) {
2684		devx_wait_async_destroy(&async_cmd[head % MAX_ASYNC_CMDS]);
2685		head++;
2686	}
2687}
2688
2689static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2690					 size_t count, loff_t *pos)
2691{
2692	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2693	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2694	struct devx_async_data *event;
2695	int ret = 0;
2696	size_t eventsz;
2697
2698	spin_lock_irq(&ev_queue->lock);
2699
2700	while (list_empty(&ev_queue->event_list)) {
2701		spin_unlock_irq(&ev_queue->lock);
2702
2703		if (filp->f_flags & O_NONBLOCK)
2704			return -EAGAIN;
2705
2706		if (wait_event_interruptible(
2707			    ev_queue->poll_wait,
2708			    (!list_empty(&ev_queue->event_list) ||
2709			     ev_queue->is_destroyed))) {
2710			return -ERESTARTSYS;
2711		}
2712
2713		spin_lock_irq(&ev_queue->lock);
2714		if (ev_queue->is_destroyed) {
2715			spin_unlock_irq(&ev_queue->lock);
2716			return -EIO;
2717		}
2718	}
2719
2720	event = list_entry(ev_queue->event_list.next,
2721			   struct devx_async_data, list);
2722	eventsz = event->cmd_out_len +
2723			sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2724
2725	if (eventsz > count) {
2726		spin_unlock_irq(&ev_queue->lock);
2727		return -ENOSPC;
2728	}
2729
2730	list_del(ev_queue->event_list.next);
2731	spin_unlock_irq(&ev_queue->lock);
2732
2733	if (copy_to_user(buf, &event->hdr, eventsz))
2734		ret = -EFAULT;
2735	else
2736		ret = eventsz;
2737
2738	atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2739	kvfree(event);
2740	return ret;
2741}
2742
2743static __poll_t devx_async_cmd_event_poll(struct file *filp,
2744					      struct poll_table_struct *wait)
2745{
2746	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2747	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2748	__poll_t pollflags = 0;
2749
2750	poll_wait(filp, &ev_queue->poll_wait, wait);
2751
2752	spin_lock_irq(&ev_queue->lock);
2753	if (ev_queue->is_destroyed)
2754		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2755	else if (!list_empty(&ev_queue->event_list))
2756		pollflags = EPOLLIN | EPOLLRDNORM;
2757	spin_unlock_irq(&ev_queue->lock);
2758
2759	return pollflags;
2760}
2761
2762static const struct file_operations devx_async_cmd_event_fops = {
2763	.owner	 = THIS_MODULE,
2764	.read	 = devx_async_cmd_event_read,
2765	.poll    = devx_async_cmd_event_poll,
2766	.release = uverbs_uobject_fd_release,
2767};
2768
2769static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2770				     size_t count, loff_t *pos)
2771{
2772	struct devx_async_event_file *ev_file = filp->private_data;
2773	struct devx_event_subscription *event_sub;
2774	struct devx_async_event_data *event;
2775	int ret = 0;
2776	size_t eventsz;
2777	bool omit_data;
2778	void *event_data;
2779
2780	omit_data = ev_file->omit_data;
2781
2782	spin_lock_irq(&ev_file->lock);
2783
2784	if (ev_file->is_overflow_err) {
2785		ev_file->is_overflow_err = 0;
2786		spin_unlock_irq(&ev_file->lock);
2787		return -EOVERFLOW;
2788	}
2789
2790
2791	while (list_empty(&ev_file->event_list)) {
2792		spin_unlock_irq(&ev_file->lock);
2793
2794		if (filp->f_flags & O_NONBLOCK)
2795			return -EAGAIN;
2796
2797		if (wait_event_interruptible(ev_file->poll_wait,
2798			    (!list_empty(&ev_file->event_list) ||
2799			     ev_file->is_destroyed))) {
2800			return -ERESTARTSYS;
2801		}
2802
2803		spin_lock_irq(&ev_file->lock);
2804		if (ev_file->is_destroyed) {
2805			spin_unlock_irq(&ev_file->lock);
2806			return -EIO;
2807		}
2808	}
2809
2810	if (omit_data) {
2811		event_sub = list_first_entry(&ev_file->event_list,
2812					struct devx_event_subscription,
2813					event_list);
2814		eventsz = sizeof(event_sub->cookie);
2815		event_data = &event_sub->cookie;
2816	} else {
2817		event = list_first_entry(&ev_file->event_list,
2818				      struct devx_async_event_data, list);
2819		eventsz = sizeof(struct mlx5_eqe) +
2820			sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2821		event_data = &event->hdr;
2822	}
2823
2824	if (eventsz > count) {
2825		spin_unlock_irq(&ev_file->lock);
2826		return -EINVAL;
2827	}
2828
2829	if (omit_data)
2830		list_del_init(&event_sub->event_list);
2831	else
2832		list_del(&event->list);
2833
2834	spin_unlock_irq(&ev_file->lock);
2835
2836	if (copy_to_user(buf, event_data, eventsz))
2837		/* This points to an application issue, not a kernel concern */
2838		ret = -EFAULT;
2839	else
2840		ret = eventsz;
2841
2842	if (!omit_data)
2843		kfree(event);
2844	return ret;
2845}
2846
2847static __poll_t devx_async_event_poll(struct file *filp,
2848				      struct poll_table_struct *wait)
2849{
2850	struct devx_async_event_file *ev_file = filp->private_data;
2851	__poll_t pollflags = 0;
2852
2853	poll_wait(filp, &ev_file->poll_wait, wait);
2854
2855	spin_lock_irq(&ev_file->lock);
2856	if (ev_file->is_destroyed)
2857		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2858	else if (!list_empty(&ev_file->event_list))
2859		pollflags = EPOLLIN | EPOLLRDNORM;
2860	spin_unlock_irq(&ev_file->lock);
2861
2862	return pollflags;
2863}
2864
2865static void devx_free_subscription(struct rcu_head *rcu)
2866{
2867	struct devx_event_subscription *event_sub =
2868		container_of(rcu, struct devx_event_subscription, rcu);
2869
2870	if (event_sub->eventfd)
2871		eventfd_ctx_put(event_sub->eventfd);
2872	uverbs_uobject_put(&event_sub->ev_file->uobj);
2873	kfree(event_sub);
2874}
2875
2876static const struct file_operations devx_async_event_fops = {
2877	.owner	 = THIS_MODULE,
2878	.read	 = devx_async_event_read,
2879	.poll    = devx_async_event_poll,
2880	.release = uverbs_uobject_fd_release,
2881};
2882
2883static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2884					      enum rdma_remove_reason why)
2885{
2886	struct devx_async_cmd_event_file *comp_ev_file =
2887		container_of(uobj, struct devx_async_cmd_event_file,
2888			     uobj);
2889	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2890	struct devx_async_data *entry, *tmp;
2891
2892	spin_lock_irq(&ev_queue->lock);
2893	ev_queue->is_destroyed = 1;
2894	spin_unlock_irq(&ev_queue->lock);
2895	wake_up_interruptible(&ev_queue->poll_wait);
2896
2897	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2898
2899	spin_lock_irq(&comp_ev_file->ev_queue.lock);
2900	list_for_each_entry_safe(entry, tmp,
2901				 &comp_ev_file->ev_queue.event_list, list) {
2902		list_del(&entry->list);
2903		kvfree(entry);
2904	}
2905	spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2906};
2907
2908static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2909					  enum rdma_remove_reason why)
2910{
2911	struct devx_async_event_file *ev_file =
2912		container_of(uobj, struct devx_async_event_file,
2913			     uobj);
2914	struct devx_event_subscription *event_sub, *event_sub_tmp;
2915	struct mlx5_ib_dev *dev = ev_file->dev;
2916
2917	spin_lock_irq(&ev_file->lock);
2918	ev_file->is_destroyed = 1;
2919
2920	/* free the pending events allocation */
2921	if (ev_file->omit_data) {
2922		struct devx_event_subscription *event_sub, *tmp;
2923
2924		list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2925					 event_list)
2926			list_del_init(&event_sub->event_list);
2927
2928	} else {
2929		struct devx_async_event_data *entry, *tmp;
2930
2931		list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2932					 list) {
2933			list_del(&entry->list);
2934			kfree(entry);
2935		}
2936	}
2937
2938	spin_unlock_irq(&ev_file->lock);
2939	wake_up_interruptible(&ev_file->poll_wait);
2940
2941	mutex_lock(&dev->devx_event_table.event_xa_lock);
2942	/* delete the subscriptions which are related to this FD */
2943	list_for_each_entry_safe(event_sub, event_sub_tmp,
2944				 &ev_file->subscribed_events_list, file_list) {
2945		devx_cleanup_subscription(dev, event_sub);
2946		list_del_rcu(&event_sub->file_list);
2947		/* subscription may not be used by the read API any more */
2948		call_rcu(&event_sub->rcu, devx_free_subscription);
2949	}
2950	mutex_unlock(&dev->devx_event_table.event_xa_lock);
2951
2952	put_device(&dev->ib_dev.dev);
2953};
2954
2955DECLARE_UVERBS_NAMED_METHOD(
2956	MLX5_IB_METHOD_DEVX_UMEM_REG,
2957	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2958			MLX5_IB_OBJECT_DEVX_UMEM,
2959			UVERBS_ACCESS_NEW,
2960			UA_MANDATORY),
2961	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2962			   UVERBS_ATTR_TYPE(u64),
2963			   UA_MANDATORY),
2964	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2965			   UVERBS_ATTR_TYPE(u64),
2966			   UA_MANDATORY),
2967	UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
2968			   UA_OPTIONAL),
2969	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2970			     enum ib_access_flags),
2971	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2972			     u64),
2973	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2974			    UVERBS_ATTR_TYPE(u32),
2975			    UA_MANDATORY));
2976
2977DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2978	MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2979	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2980			MLX5_IB_OBJECT_DEVX_UMEM,
2981			UVERBS_ACCESS_DESTROY,
2982			UA_MANDATORY));
2983
2984DECLARE_UVERBS_NAMED_METHOD(
2985	MLX5_IB_METHOD_DEVX_QUERY_EQN,
2986	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2987			   UVERBS_ATTR_TYPE(u32),
2988			   UA_MANDATORY),
2989	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2990			    UVERBS_ATTR_TYPE(u32),
2991			    UA_MANDATORY));
2992
2993DECLARE_UVERBS_NAMED_METHOD(
2994	MLX5_IB_METHOD_DEVX_QUERY_UAR,
2995	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2996			   UVERBS_ATTR_TYPE(u32),
2997			   UA_MANDATORY),
2998	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2999			    UVERBS_ATTR_TYPE(u32),
3000			    UA_MANDATORY));
3001
3002DECLARE_UVERBS_NAMED_METHOD(
3003	MLX5_IB_METHOD_DEVX_OTHER,
3004	UVERBS_ATTR_PTR_IN(
3005		MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
3006		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
3007		UA_MANDATORY,
3008		UA_ALLOC_AND_COPY),
3009	UVERBS_ATTR_PTR_OUT(
3010		MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
3011		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
3012		UA_MANDATORY));
3013
3014DECLARE_UVERBS_NAMED_METHOD(
3015	MLX5_IB_METHOD_DEVX_OBJ_CREATE,
3016	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
3017			MLX5_IB_OBJECT_DEVX_OBJ,
3018			UVERBS_ACCESS_NEW,
3019			UA_MANDATORY),
3020	UVERBS_ATTR_PTR_IN(
3021		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
3022		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
3023		UA_MANDATORY,
3024		UA_ALLOC_AND_COPY),
3025	UVERBS_ATTR_PTR_OUT(
3026		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
3027		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
3028		UA_MANDATORY));
3029
3030DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3031	MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
3032	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
3033			MLX5_IB_OBJECT_DEVX_OBJ,
3034			UVERBS_ACCESS_DESTROY,
3035			UA_MANDATORY));
3036
3037DECLARE_UVERBS_NAMED_METHOD(
3038	MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
3039	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
3040			UVERBS_IDR_ANY_OBJECT,
3041			UVERBS_ACCESS_READ,
3042			UA_MANDATORY),
3043	UVERBS_ATTR_PTR_IN(
3044		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
3045		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
3046		UA_MANDATORY,
3047		UA_ALLOC_AND_COPY),
3048	UVERBS_ATTR_PTR_OUT(
3049		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
3050		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
3051		UA_MANDATORY));
3052
3053DECLARE_UVERBS_NAMED_METHOD(
3054	MLX5_IB_METHOD_DEVX_OBJ_QUERY,
3055	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
3056			UVERBS_IDR_ANY_OBJECT,
3057			UVERBS_ACCESS_READ,
3058			UA_MANDATORY),
3059	UVERBS_ATTR_PTR_IN(
3060		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
3061		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
3062		UA_MANDATORY,
3063		UA_ALLOC_AND_COPY),
3064	UVERBS_ATTR_PTR_OUT(
3065		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
3066		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
3067		UA_MANDATORY));
3068
3069DECLARE_UVERBS_NAMED_METHOD(
3070	MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
3071	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
3072			UVERBS_IDR_ANY_OBJECT,
3073			UVERBS_ACCESS_READ,
3074			UA_MANDATORY),
3075	UVERBS_ATTR_PTR_IN(
3076		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
3077		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
3078		UA_MANDATORY,
3079		UA_ALLOC_AND_COPY),
3080	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
3081		u16, UA_MANDATORY),
3082	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
3083		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3084		UVERBS_ACCESS_READ,
3085		UA_MANDATORY),
3086	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
3087		UVERBS_ATTR_TYPE(u64),
3088		UA_MANDATORY));
3089
3090DECLARE_UVERBS_NAMED_METHOD(
3091	MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
3092	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
3093		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3094		UVERBS_ACCESS_READ,
3095		UA_MANDATORY),
3096	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
3097		MLX5_IB_OBJECT_DEVX_OBJ,
3098		UVERBS_ACCESS_READ,
3099		UA_OPTIONAL),
3100	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
3101		UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
3102		UA_MANDATORY,
3103		UA_ALLOC_AND_COPY),
3104	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
3105		UVERBS_ATTR_TYPE(u64),
3106		UA_OPTIONAL),
3107	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
3108		UVERBS_ATTR_TYPE(u32),
3109		UA_OPTIONAL));
3110
3111DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
3112			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
3113			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
3114			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
3115			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
3116
3117DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
3118			    UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
3119			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
3120			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
3121			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
3122			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
3123			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
3124
3125DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
3126			    UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
3127			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
3128			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
3129
3130
3131DECLARE_UVERBS_NAMED_METHOD(
3132	MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
3133	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
3134			MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3135			UVERBS_ACCESS_NEW,
3136			UA_MANDATORY));
3137
3138DECLARE_UVERBS_NAMED_OBJECT(
3139	MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3140	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
3141			     devx_async_cmd_event_destroy_uobj,
3142			     &devx_async_cmd_event_fops, "[devx_async_cmd]",
3143			     O_RDONLY),
3144	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
3145
3146DECLARE_UVERBS_NAMED_METHOD(
3147	MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
3148	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
3149			MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3150			UVERBS_ACCESS_NEW,
3151			UA_MANDATORY),
3152	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
3153			enum mlx5_ib_uapi_devx_create_event_channel_flags,
3154			UA_MANDATORY));
3155
3156DECLARE_UVERBS_NAMED_OBJECT(
3157	MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3158	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
3159			     devx_async_event_destroy_uobj,
3160			     &devx_async_event_fops, "[devx_async_event]",
3161			     O_RDONLY),
3162	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
3163
3164static bool devx_is_supported(struct ib_device *device)
3165{
3166	struct mlx5_ib_dev *dev = to_mdev(device);
3167
3168	return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
3169}
3170
3171const struct uapi_definition mlx5_ib_devx_defs[] = {
3172	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3173		MLX5_IB_OBJECT_DEVX,
3174		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3175	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3176		MLX5_IB_OBJECT_DEVX_OBJ,
3177		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3178	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3179		MLX5_IB_OBJECT_DEVX_UMEM,
3180		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3181	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3182		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3183		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3184	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3185		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3186		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3187	{},
3188};