Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
   4 */
   5
   6#include <rdma/ib_user_verbs.h>
   7#include <rdma/ib_verbs.h>
   8#include <rdma/uverbs_types.h>
   9#include <rdma/uverbs_ioctl.h>
  10#include <rdma/mlx5_user_ioctl_cmds.h>
  11#include <rdma/mlx5_user_ioctl_verbs.h>
  12#include <rdma/ib_umem.h>
  13#include <rdma/uverbs_std_types.h>
  14#include <linux/mlx5/driver.h>
  15#include <linux/mlx5/fs.h>
  16#include "mlx5_ib.h"
  17#include "devx.h"
  18#include "qp.h"
  19#include <linux/xarray.h>
  20
  21#define UVERBS_MODULE_NAME mlx5_ib
  22#include <rdma/uverbs_named_ioctl.h>
  23
  24static void dispatch_event_fd(struct list_head *fd_list, const void *data);
  25
  26enum devx_obj_flags {
  27	DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
  28	DEVX_OBJ_FLAGS_DCT = 1 << 1,
  29	DEVX_OBJ_FLAGS_CQ = 1 << 2,
  30};
  31
  32struct devx_async_data {
  33	struct mlx5_ib_dev *mdev;
  34	struct list_head list;
  35	struct devx_async_cmd_event_file *ev_file;
  36	struct mlx5_async_work cb_work;
  37	u16 cmd_out_len;
  38	/* must be last field in this structure */
  39	struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
  40};
  41
  42struct devx_async_event_data {
  43	struct list_head list; /* headed in ev_file->event_list */
  44	struct mlx5_ib_uapi_devx_async_event_hdr hdr;
  45};
  46
  47/* first level XA value data structure */
  48struct devx_event {
  49	struct xarray object_ids; /* second XA level, Key = object id */
  50	struct list_head unaffiliated_list;
  51};
  52
  53/* second level XA value data structure */
  54struct devx_obj_event {
  55	struct rcu_head rcu;
  56	struct list_head obj_sub_list;
  57};
  58
  59struct devx_event_subscription {
  60	struct list_head file_list; /* headed in ev_file->
  61				     * subscribed_events_list
  62				     */
  63	struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
  64				   * devx_obj_event->obj_sub_list
  65				   */
  66	struct list_head obj_list; /* headed in devx_object */
  67	struct list_head event_list; /* headed in ev_file->event_list or in
  68				      * temp list via subscription
  69				      */
  70
  71	u8 is_cleaned:1;
  72	u32 xa_key_level1;
  73	u32 xa_key_level2;
  74	struct rcu_head	rcu;
  75	u64 cookie;
  76	struct devx_async_event_file *ev_file;
  77	struct eventfd_ctx *eventfd;
  78};
  79
  80struct devx_async_event_file {
  81	struct ib_uobject uobj;
  82	/* Head of events that are subscribed to this FD */
  83	struct list_head subscribed_events_list;
  84	spinlock_t lock;
  85	wait_queue_head_t poll_wait;
  86	struct list_head event_list;
  87	struct mlx5_ib_dev *dev;
  88	u8 omit_data:1;
  89	u8 is_overflow_err:1;
  90	u8 is_destroyed:1;
  91};
  92
  93struct devx_umem {
  94	struct mlx5_core_dev		*mdev;
  95	struct ib_umem			*umem;
  96	u32				dinlen;
  97	u32				dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
  98};
  99
 100struct devx_umem_reg_cmd {
 101	void				*in;
 102	u32				inlen;
 103	u32				out[MLX5_ST_SZ_DW(create_umem_out)];
 104};
 105
 106static struct mlx5_ib_ucontext *
 107devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
 108{
 109	return to_mucontext(ib_uverbs_get_ucontext(attrs));
 110}
 111
 112int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
 113{
 114	u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
 115	u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
 116	void *uctx;
 117	int err;
 118	u16 uid;
 119	u32 cap = 0;
 120
 121	/* 0 means not supported */
 122	if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
 123		return -EINVAL;
 124
 125	uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
 126	if (is_user && capable(CAP_NET_RAW) &&
 127	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
 128		cap |= MLX5_UCTX_CAP_RAW_TX;
 129	if (is_user && capable(CAP_SYS_RAWIO) &&
 130	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
 131	     MLX5_UCTX_CAP_INTERNAL_DEV_RES))
 132		cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
 133
 134	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
 135	MLX5_SET(uctx, uctx, cap, cap);
 136
 137	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
 138	if (err)
 139		return err;
 140
 141	uid = MLX5_GET(create_uctx_out, out, uid);
 142	return uid;
 143}
 144
 145void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
 146{
 147	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
 148	u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
 149
 150	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
 151	MLX5_SET(destroy_uctx_in, in, uid, uid);
 152
 153	mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
 154}
 155
 156static bool is_legacy_unaffiliated_event_num(u16 event_num)
 157{
 158	switch (event_num) {
 159	case MLX5_EVENT_TYPE_PORT_CHANGE:
 160		return true;
 161	default:
 162		return false;
 163	}
 164}
 165
 166static bool is_legacy_obj_event_num(u16 event_num)
 167{
 168	switch (event_num) {
 169	case MLX5_EVENT_TYPE_PATH_MIG:
 170	case MLX5_EVENT_TYPE_COMM_EST:
 171	case MLX5_EVENT_TYPE_SQ_DRAINED:
 172	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 173	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 174	case MLX5_EVENT_TYPE_CQ_ERROR:
 175	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 176	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 177	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 178	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 179	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 180	case MLX5_EVENT_TYPE_DCT_DRAINED:
 181	case MLX5_EVENT_TYPE_COMP:
 182	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
 183	case MLX5_EVENT_TYPE_XRQ_ERROR:
 184		return true;
 185	default:
 186		return false;
 187	}
 188}
 189
 190static u16 get_legacy_obj_type(u16 opcode)
 191{
 192	switch (opcode) {
 193	case MLX5_CMD_OP_CREATE_RQ:
 194		return MLX5_EVENT_QUEUE_TYPE_RQ;
 195	case MLX5_CMD_OP_CREATE_QP:
 196		return MLX5_EVENT_QUEUE_TYPE_QP;
 197	case MLX5_CMD_OP_CREATE_SQ:
 198		return MLX5_EVENT_QUEUE_TYPE_SQ;
 199	case MLX5_CMD_OP_CREATE_DCT:
 200		return MLX5_EVENT_QUEUE_TYPE_DCT;
 201	default:
 202		return 0;
 203	}
 204}
 205
 206static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
 207{
 208	u16 opcode;
 209
 210	opcode = (obj->obj_id >> 32) & 0xffff;
 211
 212	if (is_legacy_obj_event_num(event_num))
 213		return get_legacy_obj_type(opcode);
 214
 215	switch (opcode) {
 216	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 217		return (obj->obj_id >> 48);
 218	case MLX5_CMD_OP_CREATE_RQ:
 219		return MLX5_OBJ_TYPE_RQ;
 220	case MLX5_CMD_OP_CREATE_QP:
 221		return MLX5_OBJ_TYPE_QP;
 222	case MLX5_CMD_OP_CREATE_SQ:
 223		return MLX5_OBJ_TYPE_SQ;
 224	case MLX5_CMD_OP_CREATE_DCT:
 225		return MLX5_OBJ_TYPE_DCT;
 226	case MLX5_CMD_OP_CREATE_TIR:
 227		return MLX5_OBJ_TYPE_TIR;
 228	case MLX5_CMD_OP_CREATE_TIS:
 229		return MLX5_OBJ_TYPE_TIS;
 230	case MLX5_CMD_OP_CREATE_PSV:
 231		return MLX5_OBJ_TYPE_PSV;
 232	case MLX5_OBJ_TYPE_MKEY:
 233		return MLX5_OBJ_TYPE_MKEY;
 234	case MLX5_CMD_OP_CREATE_RMP:
 235		return MLX5_OBJ_TYPE_RMP;
 236	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 237		return MLX5_OBJ_TYPE_XRC_SRQ;
 238	case MLX5_CMD_OP_CREATE_XRQ:
 239		return MLX5_OBJ_TYPE_XRQ;
 240	case MLX5_CMD_OP_CREATE_RQT:
 241		return MLX5_OBJ_TYPE_RQT;
 242	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 243		return MLX5_OBJ_TYPE_FLOW_COUNTER;
 244	case MLX5_CMD_OP_CREATE_CQ:
 245		return MLX5_OBJ_TYPE_CQ;
 246	default:
 247		return 0;
 248	}
 249}
 250
 251static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
 252{
 253	switch (event_type) {
 254	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 255	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 256	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 257	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 258	case MLX5_EVENT_TYPE_PATH_MIG:
 259	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 260	case MLX5_EVENT_TYPE_COMM_EST:
 261	case MLX5_EVENT_TYPE_SQ_DRAINED:
 262	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 263	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 264		return eqe->data.qp_srq.type;
 265	case MLX5_EVENT_TYPE_CQ_ERROR:
 266	case MLX5_EVENT_TYPE_XRQ_ERROR:
 267		return 0;
 268	case MLX5_EVENT_TYPE_DCT_DRAINED:
 269	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
 270		return MLX5_EVENT_QUEUE_TYPE_DCT;
 271	default:
 272		return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
 273	}
 274}
 275
 276static u32 get_dec_obj_id(u64 obj_id)
 277{
 278	return (obj_id & 0xffffffff);
 279}
 280
 281/*
 282 * As the obj_id in the firmware is not globally unique the object type
 283 * must be considered upon checking for a valid object id.
 284 * For that the opcode of the creator command is encoded as part of the obj_id.
 285 */
 286static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
 287{
 288	return ((u64)opcode << 32) | obj_id;
 289}
 290
 291static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
 292{
 293	switch (opcode) {
 294	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 295		return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
 296	case MLX5_CMD_OP_CREATE_UMEM:
 297		return MLX5_GET(create_umem_out, out, umem_id);
 298	case MLX5_CMD_OP_CREATE_MKEY:
 299		return MLX5_GET(create_mkey_out, out, mkey_index);
 300	case MLX5_CMD_OP_CREATE_CQ:
 301		return MLX5_GET(create_cq_out, out, cqn);
 302	case MLX5_CMD_OP_ALLOC_PD:
 303		return MLX5_GET(alloc_pd_out, out, pd);
 304	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 305		return MLX5_GET(alloc_transport_domain_out, out,
 306				transport_domain);
 307	case MLX5_CMD_OP_CREATE_RMP:
 308		return MLX5_GET(create_rmp_out, out, rmpn);
 309	case MLX5_CMD_OP_CREATE_SQ:
 310		return MLX5_GET(create_sq_out, out, sqn);
 311	case MLX5_CMD_OP_CREATE_RQ:
 312		return MLX5_GET(create_rq_out, out, rqn);
 313	case MLX5_CMD_OP_CREATE_RQT:
 314		return MLX5_GET(create_rqt_out, out, rqtn);
 315	case MLX5_CMD_OP_CREATE_TIR:
 316		return MLX5_GET(create_tir_out, out, tirn);
 317	case MLX5_CMD_OP_CREATE_TIS:
 318		return MLX5_GET(create_tis_out, out, tisn);
 319	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 320		return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
 321	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 322		return MLX5_GET(create_flow_table_out, out, table_id);
 323	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 324		return MLX5_GET(create_flow_group_out, out, group_id);
 325	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 326		return MLX5_GET(set_fte_in, in, flow_index);
 327	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 328		return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
 329	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
 330		return MLX5_GET(alloc_packet_reformat_context_out, out,
 331				packet_reformat_id);
 332	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
 333		return MLX5_GET(alloc_modify_header_context_out, out,
 334				modify_header_id);
 335	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 336		return MLX5_GET(create_scheduling_element_out, out,
 337				scheduling_element_id);
 338	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 339		return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
 340	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 341		return MLX5_GET(set_l2_table_entry_in, in, table_index);
 342	case MLX5_CMD_OP_CREATE_QP:
 343		return MLX5_GET(create_qp_out, out, qpn);
 344	case MLX5_CMD_OP_CREATE_SRQ:
 345		return MLX5_GET(create_srq_out, out, srqn);
 346	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 347		return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
 348	case MLX5_CMD_OP_CREATE_DCT:
 349		return MLX5_GET(create_dct_out, out, dctn);
 350	case MLX5_CMD_OP_CREATE_XRQ:
 351		return MLX5_GET(create_xrq_out, out, xrqn);
 352	case MLX5_CMD_OP_ATTACH_TO_MCG:
 353		return MLX5_GET(attach_to_mcg_in, in, qpn);
 354	case MLX5_CMD_OP_ALLOC_XRCD:
 355		return MLX5_GET(alloc_xrcd_out, out, xrcd);
 356	case MLX5_CMD_OP_CREATE_PSV:
 357		return MLX5_GET(create_psv_out, out, psv0_index);
 358	default:
 359		/* The entry must match to one of the devx_is_obj_create_cmd */
 360		WARN_ON(true);
 361		return 0;
 362	}
 363}
 364
 365static u64 devx_get_obj_id(const void *in)
 366{
 367	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 368	u64 obj_id;
 369
 370	switch (opcode) {
 371	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
 372	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
 373		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
 374					MLX5_GET(general_obj_in_cmd_hdr, in,
 375						 obj_type) << 16,
 376					MLX5_GET(general_obj_in_cmd_hdr, in,
 377						 obj_id));
 378		break;
 379	case MLX5_CMD_OP_QUERY_MKEY:
 380		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
 381					MLX5_GET(query_mkey_in, in,
 382						 mkey_index));
 383		break;
 384	case MLX5_CMD_OP_QUERY_CQ:
 385		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 386					MLX5_GET(query_cq_in, in, cqn));
 387		break;
 388	case MLX5_CMD_OP_MODIFY_CQ:
 389		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 390					MLX5_GET(modify_cq_in, in, cqn));
 391		break;
 392	case MLX5_CMD_OP_QUERY_SQ:
 393		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 394					MLX5_GET(query_sq_in, in, sqn));
 395		break;
 396	case MLX5_CMD_OP_MODIFY_SQ:
 397		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 398					MLX5_GET(modify_sq_in, in, sqn));
 399		break;
 400	case MLX5_CMD_OP_QUERY_RQ:
 401		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 402					MLX5_GET(query_rq_in, in, rqn));
 403		break;
 404	case MLX5_CMD_OP_MODIFY_RQ:
 405		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 406					MLX5_GET(modify_rq_in, in, rqn));
 407		break;
 408	case MLX5_CMD_OP_QUERY_RMP:
 409		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
 410					MLX5_GET(query_rmp_in, in, rmpn));
 411		break;
 412	case MLX5_CMD_OP_MODIFY_RMP:
 413		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
 414					MLX5_GET(modify_rmp_in, in, rmpn));
 415		break;
 416	case MLX5_CMD_OP_QUERY_RQT:
 417		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 418					MLX5_GET(query_rqt_in, in, rqtn));
 419		break;
 420	case MLX5_CMD_OP_MODIFY_RQT:
 421		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 422					MLX5_GET(modify_rqt_in, in, rqtn));
 423		break;
 424	case MLX5_CMD_OP_QUERY_TIR:
 425		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 426					MLX5_GET(query_tir_in, in, tirn));
 427		break;
 428	case MLX5_CMD_OP_MODIFY_TIR:
 429		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 430					MLX5_GET(modify_tir_in, in, tirn));
 431		break;
 432	case MLX5_CMD_OP_QUERY_TIS:
 433		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 434					MLX5_GET(query_tis_in, in, tisn));
 435		break;
 436	case MLX5_CMD_OP_MODIFY_TIS:
 437		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 438					MLX5_GET(modify_tis_in, in, tisn));
 439		break;
 440	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 441		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
 442					MLX5_GET(query_flow_table_in, in,
 443						 table_id));
 444		break;
 445	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 446		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
 447					MLX5_GET(modify_flow_table_in, in,
 448						 table_id));
 449		break;
 450	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 451		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
 452					MLX5_GET(query_flow_group_in, in,
 453						 group_id));
 454		break;
 455	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 456		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
 457					MLX5_GET(query_fte_in, in,
 458						 flow_index));
 459		break;
 460	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 461		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
 462					MLX5_GET(set_fte_in, in, flow_index));
 463		break;
 464	case MLX5_CMD_OP_QUERY_Q_COUNTER:
 465		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
 466					MLX5_GET(query_q_counter_in, in,
 467						 counter_set_id));
 468		break;
 469	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 470		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
 471					MLX5_GET(query_flow_counter_in, in,
 472						 flow_counter_id));
 473		break;
 474	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
 475		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
 476					MLX5_GET(query_modify_header_context_in,
 477						 in, modify_header_id));
 478		break;
 479	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
 480		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
 481					MLX5_GET(query_scheduling_element_in,
 482						 in, scheduling_element_id));
 483		break;
 484	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
 485		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
 486					MLX5_GET(modify_scheduling_element_in,
 487						 in, scheduling_element_id));
 488		break;
 489	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 490		obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
 491					MLX5_GET(add_vxlan_udp_dport_in, in,
 492						 vxlan_udp_port));
 493		break;
 494	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 495		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
 496					MLX5_GET(query_l2_table_entry_in, in,
 497						 table_index));
 498		break;
 499	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 500		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
 501					MLX5_GET(set_l2_table_entry_in, in,
 502						 table_index));
 503		break;
 504	case MLX5_CMD_OP_QUERY_QP:
 505		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 506					MLX5_GET(query_qp_in, in, qpn));
 507		break;
 508	case MLX5_CMD_OP_RST2INIT_QP:
 509		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 510					MLX5_GET(rst2init_qp_in, in, qpn));
 511		break;
 512	case MLX5_CMD_OP_INIT2INIT_QP:
 513		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 514					MLX5_GET(init2init_qp_in, in, qpn));
 515		break;
 516	case MLX5_CMD_OP_INIT2RTR_QP:
 517		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 518					MLX5_GET(init2rtr_qp_in, in, qpn));
 519		break;
 520	case MLX5_CMD_OP_RTR2RTS_QP:
 521		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 522					MLX5_GET(rtr2rts_qp_in, in, qpn));
 523		break;
 524	case MLX5_CMD_OP_RTS2RTS_QP:
 525		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 526					MLX5_GET(rts2rts_qp_in, in, qpn));
 527		break;
 528	case MLX5_CMD_OP_SQERR2RTS_QP:
 529		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 530					MLX5_GET(sqerr2rts_qp_in, in, qpn));
 531		break;
 532	case MLX5_CMD_OP_2ERR_QP:
 533		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 534					MLX5_GET(qp_2err_in, in, qpn));
 535		break;
 536	case MLX5_CMD_OP_2RST_QP:
 537		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 538					MLX5_GET(qp_2rst_in, in, qpn));
 539		break;
 540	case MLX5_CMD_OP_QUERY_DCT:
 541		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 542					MLX5_GET(query_dct_in, in, dctn));
 543		break;
 544	case MLX5_CMD_OP_QUERY_XRQ:
 545	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
 546	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
 547		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
 548					MLX5_GET(query_xrq_in, in, xrqn));
 549		break;
 550	case MLX5_CMD_OP_QUERY_XRC_SRQ:
 551		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
 552					MLX5_GET(query_xrc_srq_in, in,
 553						 xrc_srqn));
 554		break;
 555	case MLX5_CMD_OP_ARM_XRC_SRQ:
 556		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
 557					MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
 558		break;
 559	case MLX5_CMD_OP_QUERY_SRQ:
 560		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
 561					MLX5_GET(query_srq_in, in, srqn));
 562		break;
 563	case MLX5_CMD_OP_ARM_RQ:
 564		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 565					MLX5_GET(arm_rq_in, in, srq_number));
 566		break;
 567	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 568		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 569					MLX5_GET(drain_dct_in, in, dctn));
 570		break;
 571	case MLX5_CMD_OP_ARM_XRQ:
 572	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
 573	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
 574	case MLX5_CMD_OP_MODIFY_XRQ:
 575		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
 576					MLX5_GET(arm_xrq_in, in, xrqn));
 577		break;
 578	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
 579		obj_id = get_enc_obj_id
 580				(MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
 581				 MLX5_GET(query_packet_reformat_context_in,
 582					  in, packet_reformat_id));
 583		break;
 584	default:
 585		obj_id = 0;
 586	}
 587
 588	return obj_id;
 589}
 590
 591static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
 592				 struct ib_uobject *uobj, const void *in)
 593{
 594	struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
 595	u64 obj_id = devx_get_obj_id(in);
 596
 597	if (!obj_id)
 598		return false;
 599
 600	switch (uobj_get_object_id(uobj)) {
 601	case UVERBS_OBJECT_CQ:
 602		return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 603				      to_mcq(uobj->object)->mcq.cqn) ==
 604				      obj_id;
 605
 606	case UVERBS_OBJECT_SRQ:
 607	{
 608		struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
 609		u16 opcode;
 610
 611		switch (srq->common.res) {
 612		case MLX5_RES_XSRQ:
 613			opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
 614			break;
 615		case MLX5_RES_XRQ:
 616			opcode = MLX5_CMD_OP_CREATE_XRQ;
 617			break;
 618		default:
 619			if (!dev->mdev->issi)
 620				opcode = MLX5_CMD_OP_CREATE_SRQ;
 621			else
 622				opcode = MLX5_CMD_OP_CREATE_RMP;
 623		}
 624
 625		return get_enc_obj_id(opcode,
 626				      to_msrq(uobj->object)->msrq.srqn) ==
 627				      obj_id;
 628	}
 629
 630	case UVERBS_OBJECT_QP:
 631	{
 632		struct mlx5_ib_qp *qp = to_mqp(uobj->object);
 633
 634		if (qp->type == IB_QPT_RAW_PACKET ||
 635		    (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
 636			struct mlx5_ib_raw_packet_qp *raw_packet_qp =
 637							 &qp->raw_packet_qp;
 638			struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
 639			struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
 640
 641			return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 642					       rq->base.mqp.qpn) == obj_id ||
 643				get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 644					       sq->base.mqp.qpn) == obj_id ||
 645				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 646					       rq->tirn) == obj_id ||
 647				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 648					       sq->tisn) == obj_id);
 649		}
 650
 651		if (qp->type == MLX5_IB_QPT_DCT)
 652			return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 653					      qp->dct.mdct.mqp.qpn) == obj_id;
 654		return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 655				      qp->ibqp.qp_num) == obj_id;
 656	}
 657
 658	case UVERBS_OBJECT_WQ:
 659		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 660				      to_mrwq(uobj->object)->core_qp.qpn) ==
 661				      obj_id;
 662
 663	case UVERBS_OBJECT_RWQ_IND_TBL:
 664		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 665				      to_mrwq_ind_table(uobj->object)->rqtn) ==
 666				      obj_id;
 667
 668	case MLX5_IB_OBJECT_DEVX_OBJ:
 669	{
 670		u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 671		struct devx_obj *devx_uobj = uobj->object;
 672
 673		if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
 674		    devx_uobj->flow_counter_bulk_size) {
 675			u64 end;
 676
 677			end = devx_uobj->obj_id +
 678				devx_uobj->flow_counter_bulk_size;
 679			return devx_uobj->obj_id <= obj_id && end > obj_id;
 680		}
 681
 682		return devx_uobj->obj_id == obj_id;
 683	}
 684
 685	default:
 686		return false;
 687	}
 688}
 689
 690static void devx_set_umem_valid(const void *in)
 691{
 692	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 693
 694	switch (opcode) {
 695	case MLX5_CMD_OP_CREATE_MKEY:
 696		MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
 697		break;
 698	case MLX5_CMD_OP_CREATE_CQ:
 699	{
 700		void *cqc;
 701
 702		MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
 703		cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 704		MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
 705		break;
 706	}
 707	case MLX5_CMD_OP_CREATE_QP:
 708	{
 709		void *qpc;
 710
 711		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 712		MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
 713		MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
 714		break;
 715	}
 716
 717	case MLX5_CMD_OP_CREATE_RQ:
 718	{
 719		void *rqc, *wq;
 720
 721		rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
 722		wq  = MLX5_ADDR_OF(rqc, rqc, wq);
 723		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 724		MLX5_SET(wq, wq, wq_umem_valid, 1);
 725		break;
 726	}
 727
 728	case MLX5_CMD_OP_CREATE_SQ:
 729	{
 730		void *sqc, *wq;
 731
 732		sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
 733		wq = MLX5_ADDR_OF(sqc, sqc, wq);
 734		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 735		MLX5_SET(wq, wq, wq_umem_valid, 1);
 736		break;
 737	}
 738
 739	case MLX5_CMD_OP_MODIFY_CQ:
 740		MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
 741		break;
 742
 743	case MLX5_CMD_OP_CREATE_RMP:
 744	{
 745		void *rmpc, *wq;
 746
 747		rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
 748		wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
 749		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 750		MLX5_SET(wq, wq, wq_umem_valid, 1);
 751		break;
 752	}
 753
 754	case MLX5_CMD_OP_CREATE_XRQ:
 755	{
 756		void *xrqc, *wq;
 757
 758		xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
 759		wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
 760		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 761		MLX5_SET(wq, wq, wq_umem_valid, 1);
 762		break;
 763	}
 764
 765	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 766	{
 767		void *xrc_srqc;
 768
 769		MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
 770		xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
 771					xrc_srq_context_entry);
 772		MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
 773		break;
 774	}
 775
 776	default:
 777		return;
 778	}
 779}
 780
 781static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
 782{
 783	*opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 784
 785	switch (*opcode) {
 786	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 787	case MLX5_CMD_OP_CREATE_MKEY:
 788	case MLX5_CMD_OP_CREATE_CQ:
 789	case MLX5_CMD_OP_ALLOC_PD:
 790	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 791	case MLX5_CMD_OP_CREATE_RMP:
 792	case MLX5_CMD_OP_CREATE_SQ:
 793	case MLX5_CMD_OP_CREATE_RQ:
 794	case MLX5_CMD_OP_CREATE_RQT:
 795	case MLX5_CMD_OP_CREATE_TIR:
 796	case MLX5_CMD_OP_CREATE_TIS:
 797	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 798	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 799	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 800	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 801	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
 802	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
 803	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 804	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 805	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 806	case MLX5_CMD_OP_CREATE_QP:
 807	case MLX5_CMD_OP_CREATE_SRQ:
 808	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 809	case MLX5_CMD_OP_CREATE_DCT:
 810	case MLX5_CMD_OP_CREATE_XRQ:
 811	case MLX5_CMD_OP_ATTACH_TO_MCG:
 812	case MLX5_CMD_OP_ALLOC_XRCD:
 813		return true;
 814	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 815	{
 816		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
 817		if (op_mod == 0)
 818			return true;
 819		return false;
 820	}
 821	case MLX5_CMD_OP_CREATE_PSV:
 822	{
 823		u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
 824
 825		if (num_psv == 1)
 826			return true;
 827		return false;
 828	}
 829	default:
 830		return false;
 831	}
 832}
 833
 834static bool devx_is_obj_modify_cmd(const void *in)
 835{
 836	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 837
 838	switch (opcode) {
 839	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
 840	case MLX5_CMD_OP_MODIFY_CQ:
 841	case MLX5_CMD_OP_MODIFY_RMP:
 842	case MLX5_CMD_OP_MODIFY_SQ:
 843	case MLX5_CMD_OP_MODIFY_RQ:
 844	case MLX5_CMD_OP_MODIFY_RQT:
 845	case MLX5_CMD_OP_MODIFY_TIR:
 846	case MLX5_CMD_OP_MODIFY_TIS:
 847	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 848	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
 849	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 850	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 851	case MLX5_CMD_OP_RST2INIT_QP:
 852	case MLX5_CMD_OP_INIT2RTR_QP:
 853	case MLX5_CMD_OP_INIT2INIT_QP:
 854	case MLX5_CMD_OP_RTR2RTS_QP:
 855	case MLX5_CMD_OP_RTS2RTS_QP:
 856	case MLX5_CMD_OP_SQERR2RTS_QP:
 857	case MLX5_CMD_OP_2ERR_QP:
 858	case MLX5_CMD_OP_2RST_QP:
 859	case MLX5_CMD_OP_ARM_XRC_SRQ:
 860	case MLX5_CMD_OP_ARM_RQ:
 861	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 862	case MLX5_CMD_OP_ARM_XRQ:
 863	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
 864	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
 865	case MLX5_CMD_OP_MODIFY_XRQ:
 866		return true;
 867	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 868	{
 869		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
 870
 871		if (op_mod == 1)
 872			return true;
 873		return false;
 874	}
 875	default:
 876		return false;
 877	}
 878}
 879
 880static bool devx_is_obj_query_cmd(const void *in)
 881{
 882	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 883
 884	switch (opcode) {
 885	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
 886	case MLX5_CMD_OP_QUERY_MKEY:
 887	case MLX5_CMD_OP_QUERY_CQ:
 888	case MLX5_CMD_OP_QUERY_RMP:
 889	case MLX5_CMD_OP_QUERY_SQ:
 890	case MLX5_CMD_OP_QUERY_RQ:
 891	case MLX5_CMD_OP_QUERY_RQT:
 892	case MLX5_CMD_OP_QUERY_TIR:
 893	case MLX5_CMD_OP_QUERY_TIS:
 894	case MLX5_CMD_OP_QUERY_Q_COUNTER:
 895	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 896	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 897	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 898	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 899	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
 900	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
 901	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 902	case MLX5_CMD_OP_QUERY_QP:
 903	case MLX5_CMD_OP_QUERY_SRQ:
 904	case MLX5_CMD_OP_QUERY_XRC_SRQ:
 905	case MLX5_CMD_OP_QUERY_DCT:
 906	case MLX5_CMD_OP_QUERY_XRQ:
 907	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
 908	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
 909	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
 910		return true;
 911	default:
 912		return false;
 913	}
 914}
 915
 916static bool devx_is_whitelist_cmd(void *in)
 917{
 918	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 919
 920	switch (opcode) {
 921	case MLX5_CMD_OP_QUERY_HCA_CAP:
 922	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 923	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 924	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
 925		return true;
 926	default:
 927		return false;
 928	}
 929}
 930
 931static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
 932{
 933	if (devx_is_whitelist_cmd(cmd_in)) {
 934		struct mlx5_ib_dev *dev;
 935
 936		if (c->devx_uid)
 937			return c->devx_uid;
 938
 939		dev = to_mdev(c->ibucontext.device);
 940		if (dev->devx_whitelist_uid)
 941			return dev->devx_whitelist_uid;
 942
 943		return -EOPNOTSUPP;
 944	}
 945
 946	if (!c->devx_uid)
 947		return -EINVAL;
 948
 949	return c->devx_uid;
 950}
 951
 952static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
 953{
 954	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 955
 956	/* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
 957	if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
 958	     MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
 959	    (opcode >= MLX5_CMD_OP_GENERAL_START &&
 960	     opcode < MLX5_CMD_OP_GENERAL_END))
 961		return true;
 962
 963	switch (opcode) {
 964	case MLX5_CMD_OP_QUERY_HCA_CAP:
 965	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 966	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 967	case MLX5_CMD_OP_QUERY_VPORT_STATE:
 968	case MLX5_CMD_OP_QUERY_ADAPTER:
 969	case MLX5_CMD_OP_QUERY_ISSI:
 970	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
 971	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
 972	case MLX5_CMD_OP_QUERY_VNIC_ENV:
 973	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 974	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
 975	case MLX5_CMD_OP_NOP:
 976	case MLX5_CMD_OP_QUERY_CONG_STATUS:
 977	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
 978	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
 979	case MLX5_CMD_OP_QUERY_LAG:
 980	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
 981		return true;
 982	default:
 983		return false;
 984	}
 985}
 986
 987static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
 988	struct uverbs_attr_bundle *attrs)
 989{
 990	struct mlx5_ib_ucontext *c;
 991	struct mlx5_ib_dev *dev;
 992	int user_vector;
 993	int dev_eqn;
 994	int err;
 995
 996	if (uverbs_copy_from(&user_vector, attrs,
 997			     MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
 998		return -EFAULT;
 999
1000	c = devx_ufile2uctx(attrs);
1001	if (IS_ERR(c))
1002		return PTR_ERR(c);
1003	dev = to_mdev(c->ibucontext.device);
1004
1005	err = mlx5_comp_eqn_get(dev->mdev, user_vector, &dev_eqn);
1006	if (err < 0)
1007		return err;
1008
1009	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1010			   &dev_eqn, sizeof(dev_eqn)))
1011		return -EFAULT;
1012
1013	return 0;
1014}
1015
1016/*
1017 *Security note:
1018 * The hardware protection mechanism works like this: Each device object that
1019 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1020 * the device specification manual) upon its creation. Then upon doorbell,
1021 * hardware fetches the object context for which the doorbell was rang, and
1022 * validates that the UAR through which the DB was rang matches the UAR ID
1023 * of the object.
1024 * If no match the doorbell is silently ignored by the hardware. Of course,
1025 * the user cannot ring a doorbell on a UAR that was not mapped to it.
1026 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1027 * mailboxes (except tagging them with UID), we expose to the user its UAR
1028 * ID, so it can embed it in these objects in the expected specification
1029 * format. So the only thing the user can do is hurt itself by creating a
1030 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1031 * may ring a doorbell on its objects.
1032 * The consequence of that will be that another user can schedule a QP/SQ
1033 * of the buggy user for execution (just insert it to the hardware schedule
1034 * queue or arm its CQ for event generation), no further harm is expected.
1035 */
1036static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1037	struct uverbs_attr_bundle *attrs)
1038{
1039	struct mlx5_ib_ucontext *c;
1040	struct mlx5_ib_dev *dev;
1041	u32 user_idx;
1042	s32 dev_idx;
1043
1044	c = devx_ufile2uctx(attrs);
1045	if (IS_ERR(c))
1046		return PTR_ERR(c);
1047	dev = to_mdev(c->ibucontext.device);
1048
1049	if (uverbs_copy_from(&user_idx, attrs,
1050			     MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1051		return -EFAULT;
1052
1053	dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1054	if (dev_idx < 0)
1055		return dev_idx;
1056
1057	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1058			   &dev_idx, sizeof(dev_idx)))
1059		return -EFAULT;
1060
1061	return 0;
1062}
1063
1064static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1065	struct uverbs_attr_bundle *attrs)
1066{
1067	struct mlx5_ib_ucontext *c;
1068	struct mlx5_ib_dev *dev;
1069	void *cmd_in = uverbs_attr_get_alloced_ptr(
1070		attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1071	int cmd_out_len = uverbs_attr_get_len(attrs,
1072					MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1073	void *cmd_out;
1074	int err, err2;
1075	int uid;
1076
1077	c = devx_ufile2uctx(attrs);
1078	if (IS_ERR(c))
1079		return PTR_ERR(c);
1080	dev = to_mdev(c->ibucontext.device);
1081
1082	uid = devx_get_uid(c, cmd_in);
1083	if (uid < 0)
1084		return uid;
1085
1086	/* Only white list of some general HCA commands are allowed for this method. */
1087	if (!devx_is_general_cmd(cmd_in, dev))
1088		return -EINVAL;
1089
1090	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1091	if (IS_ERR(cmd_out))
1092		return PTR_ERR(cmd_out);
1093
1094	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1095	err = mlx5_cmd_do(dev->mdev, cmd_in,
1096			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1097			  cmd_out, cmd_out_len);
1098	if (err && err != -EREMOTEIO)
1099		return err;
1100
1101	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1102			      cmd_out_len);
1103
1104	return err2 ?: err;
1105}
1106
1107static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1108				       u32 *dinlen,
1109				       u32 *obj_id)
1110{
1111	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
1112	u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1113
1114	*obj_id = devx_get_created_obj_id(in, out, opcode);
1115	*dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1116	MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1117
1118	switch (opcode) {
1119	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1120		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1121		MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1122		MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
1123			 MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
1124		break;
1125
1126	case MLX5_CMD_OP_CREATE_UMEM:
1127		MLX5_SET(destroy_umem_in, din, opcode,
1128			 MLX5_CMD_OP_DESTROY_UMEM);
1129		MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
1130		break;
1131	case MLX5_CMD_OP_CREATE_MKEY:
1132		MLX5_SET(destroy_mkey_in, din, opcode,
1133			 MLX5_CMD_OP_DESTROY_MKEY);
1134		MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
1135		break;
1136	case MLX5_CMD_OP_CREATE_CQ:
1137		MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1138		MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
1139		break;
1140	case MLX5_CMD_OP_ALLOC_PD:
1141		MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1142		MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
1143		break;
1144	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1145		MLX5_SET(dealloc_transport_domain_in, din, opcode,
1146			 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1147		MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
1148			 *obj_id);
1149		break;
1150	case MLX5_CMD_OP_CREATE_RMP:
1151		MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1152		MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
1153		break;
1154	case MLX5_CMD_OP_CREATE_SQ:
1155		MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1156		MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
1157		break;
1158	case MLX5_CMD_OP_CREATE_RQ:
1159		MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1160		MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
1161		break;
1162	case MLX5_CMD_OP_CREATE_RQT:
1163		MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1164		MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
1165		break;
1166	case MLX5_CMD_OP_CREATE_TIR:
1167		MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1168		MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1169		break;
1170	case MLX5_CMD_OP_CREATE_TIS:
1171		MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1172		MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
1173		break;
1174	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1175		MLX5_SET(dealloc_q_counter_in, din, opcode,
1176			 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1177		MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
1178		break;
1179	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1180		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1181		MLX5_SET(destroy_flow_table_in, din, other_vport,
1182			 MLX5_GET(create_flow_table_in,  in, other_vport));
1183		MLX5_SET(destroy_flow_table_in, din, vport_number,
1184			 MLX5_GET(create_flow_table_in,  in, vport_number));
1185		MLX5_SET(destroy_flow_table_in, din, table_type,
1186			 MLX5_GET(create_flow_table_in,  in, table_type));
1187		MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1188		MLX5_SET(destroy_flow_table_in, din, opcode,
1189			 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1190		break;
1191	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1192		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1193		MLX5_SET(destroy_flow_group_in, din, other_vport,
1194			 MLX5_GET(create_flow_group_in, in, other_vport));
1195		MLX5_SET(destroy_flow_group_in, din, vport_number,
1196			 MLX5_GET(create_flow_group_in, in, vport_number));
1197		MLX5_SET(destroy_flow_group_in, din, table_type,
1198			 MLX5_GET(create_flow_group_in, in, table_type));
1199		MLX5_SET(destroy_flow_group_in, din, table_id,
1200			 MLX5_GET(create_flow_group_in, in, table_id));
1201		MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1202		MLX5_SET(destroy_flow_group_in, din, opcode,
1203			 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1204		break;
1205	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1206		*dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1207		MLX5_SET(delete_fte_in, din, other_vport,
1208			 MLX5_GET(set_fte_in,  in, other_vport));
1209		MLX5_SET(delete_fte_in, din, vport_number,
1210			 MLX5_GET(set_fte_in, in, vport_number));
1211		MLX5_SET(delete_fte_in, din, table_type,
1212			 MLX5_GET(set_fte_in, in, table_type));
1213		MLX5_SET(delete_fte_in, din, table_id,
1214			 MLX5_GET(set_fte_in, in, table_id));
1215		MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1216		MLX5_SET(delete_fte_in, din, opcode,
1217			 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1218		break;
1219	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1220		MLX5_SET(dealloc_flow_counter_in, din, opcode,
1221			 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1222		MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
1223			 *obj_id);
1224		break;
1225	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1226		MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
1227			 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1228		MLX5_SET(dealloc_packet_reformat_context_in, din,
1229			 packet_reformat_id, *obj_id);
1230		break;
1231	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1232		MLX5_SET(dealloc_modify_header_context_in, din, opcode,
1233			 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1234		MLX5_SET(dealloc_modify_header_context_in, din,
1235			 modify_header_id, *obj_id);
1236		break;
1237	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1238		*dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1239		MLX5_SET(destroy_scheduling_element_in, din,
1240			 scheduling_hierarchy,
1241			 MLX5_GET(create_scheduling_element_in, in,
1242				  scheduling_hierarchy));
1243		MLX5_SET(destroy_scheduling_element_in, din,
1244			 scheduling_element_id, *obj_id);
1245		MLX5_SET(destroy_scheduling_element_in, din, opcode,
1246			 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1247		break;
1248	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1249		*dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1250		MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1251		MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
1252			 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1253		break;
1254	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1255		*dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1256		MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1257		MLX5_SET(delete_l2_table_entry_in, din, opcode,
1258			 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1259		break;
1260	case MLX5_CMD_OP_CREATE_QP:
1261		MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1262		MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
1263		break;
1264	case MLX5_CMD_OP_CREATE_SRQ:
1265		MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1266		MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
1267		break;
1268	case MLX5_CMD_OP_CREATE_XRC_SRQ:
1269		MLX5_SET(destroy_xrc_srq_in, din, opcode,
1270			 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1271		MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
1272		break;
1273	case MLX5_CMD_OP_CREATE_DCT:
1274		MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1275		MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
1276		break;
1277	case MLX5_CMD_OP_CREATE_XRQ:
1278		MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1279		MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
1280		break;
1281	case MLX5_CMD_OP_ATTACH_TO_MCG:
1282		*dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1283		MLX5_SET(detach_from_mcg_in, din, qpn,
1284			 MLX5_GET(attach_to_mcg_in, in, qpn));
1285		memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1286		       MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1287		       MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1288		MLX5_SET(detach_from_mcg_in, din, opcode,
1289			 MLX5_CMD_OP_DETACH_FROM_MCG);
1290		MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
1291		break;
1292	case MLX5_CMD_OP_ALLOC_XRCD:
1293		MLX5_SET(dealloc_xrcd_in, din, opcode,
1294			 MLX5_CMD_OP_DEALLOC_XRCD);
1295		MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
1296		break;
1297	case MLX5_CMD_OP_CREATE_PSV:
1298		MLX5_SET(destroy_psv_in, din, opcode,
1299			 MLX5_CMD_OP_DESTROY_PSV);
1300		MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
1301		break;
1302	default:
1303		/* The entry must match to one of the devx_is_obj_create_cmd */
1304		WARN_ON(true);
1305		break;
1306	}
1307}
1308
1309static int devx_handle_mkey_indirect(struct devx_obj *obj,
1310				     struct mlx5_ib_dev *dev,
1311				     void *in, void *out)
1312{
1313	struct mlx5_ib_mkey *mkey = &obj->mkey;
1314	void *mkc;
1315	u8 key;
1316
1317	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1318	key = MLX5_GET(mkc, mkc, mkey_7_0);
1319	mkey->key = mlx5_idx_to_mkey(
1320			MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1321	mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1322	mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1323	init_waitqueue_head(&mkey->wait);
1324
1325	return mlx5r_store_odp_mkey(dev, mkey);
1326}
1327
1328static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1329				   struct devx_obj *obj,
1330				   void *in, int in_len)
1331{
1332	int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1333			MLX5_FLD_SZ_BYTES(create_mkey_in,
1334			memory_key_mkey_entry);
1335	void *mkc;
1336	u8 access_mode;
1337
1338	if (in_len < min_len)
1339		return -EINVAL;
1340
1341	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1342
1343	access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1344	access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1345
1346	if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1347		access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1348		if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1349			obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1350		return 0;
1351	}
1352
1353	MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1354	return 0;
1355}
1356
1357static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1358				      struct devx_event_subscription *sub)
1359{
1360	struct devx_event *event;
1361	struct devx_obj_event *xa_val_level2;
1362
1363	if (sub->is_cleaned)
1364		return;
1365
1366	sub->is_cleaned = 1;
1367	list_del_rcu(&sub->xa_list);
1368
1369	if (list_empty(&sub->obj_list))
1370		return;
1371
1372	list_del_rcu(&sub->obj_list);
1373	/* check whether key level 1 for this obj_sub_list is empty */
1374	event = xa_load(&dev->devx_event_table.event_xa,
1375			sub->xa_key_level1);
1376	WARN_ON(!event);
1377
1378	xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1379	if (list_empty(&xa_val_level2->obj_sub_list)) {
1380		xa_erase(&event->object_ids,
1381			 sub->xa_key_level2);
1382		kfree_rcu(xa_val_level2, rcu);
1383	}
1384}
1385
1386static int devx_obj_cleanup(struct ib_uobject *uobject,
1387			    enum rdma_remove_reason why,
1388			    struct uverbs_attr_bundle *attrs)
1389{
1390	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1391	struct mlx5_devx_event_table *devx_event_table;
1392	struct devx_obj *obj = uobject->object;
1393	struct devx_event_subscription *sub_entry, *tmp;
1394	struct mlx5_ib_dev *dev;
1395	int ret;
1396
1397	dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1398	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
1399	    xa_erase(&obj->ib_dev->odp_mkeys,
1400		     mlx5_base_mkey(obj->mkey.key)))
1401		/*
1402		 * The pagefault_single_data_segment() does commands against
1403		 * the mmkey, we must wait for that to stop before freeing the
1404		 * mkey, as another allocation could get the same mkey #.
1405		 */
1406		mlx5r_deref_wait_odp_mkey(&obj->mkey);
1407
1408	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1409		ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1410	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1411		ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1412	else
1413		ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1414				    obj->dinlen, out, sizeof(out));
1415	if (ret)
1416		return ret;
1417
1418	devx_event_table = &dev->devx_event_table;
1419
1420	mutex_lock(&devx_event_table->event_xa_lock);
1421	list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1422		devx_cleanup_subscription(dev, sub_entry);
1423	mutex_unlock(&devx_event_table->event_xa_lock);
1424
1425	kfree(obj);
1426	return ret;
1427}
1428
1429static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1430{
1431	struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1432	struct mlx5_devx_event_table *table;
1433	struct devx_event *event;
1434	struct devx_obj_event *obj_event;
1435	u32 obj_id = mcq->cqn;
1436
1437	table = &obj->ib_dev->devx_event_table;
1438	rcu_read_lock();
1439	event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1440	if (!event)
1441		goto out;
1442
1443	obj_event = xa_load(&event->object_ids, obj_id);
1444	if (!obj_event)
1445		goto out;
1446
1447	dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1448out:
1449	rcu_read_unlock();
1450}
1451
1452static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
1453{
1454	if (!MLX5_CAP_GEN(dev->mdev, apu) ||
1455	    !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
1456		return false;
1457
1458	return true;
1459}
1460
1461static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1462	struct uverbs_attr_bundle *attrs)
1463{
1464	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1465	int cmd_out_len =  uverbs_attr_get_len(attrs,
1466					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1467	int cmd_in_len = uverbs_attr_get_len(attrs,
1468					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1469	void *cmd_out;
1470	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1471		attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1472	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1473		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1474	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1475	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1476	struct devx_obj *obj;
1477	u16 obj_type = 0;
1478	int err, err2 = 0;
1479	int uid;
1480	u32 obj_id;
1481	u16 opcode;
1482
1483	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1484		return -EINVAL;
1485
1486	uid = devx_get_uid(c, cmd_in);
1487	if (uid < 0)
1488		return uid;
1489
1490	if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1491		return -EINVAL;
1492
1493	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1494	if (IS_ERR(cmd_out))
1495		return PTR_ERR(cmd_out);
1496
1497	obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1498	if (!obj)
1499		return -ENOMEM;
1500
1501	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1502	if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1503		err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1504		if (err)
1505			goto obj_free;
1506	} else {
1507		devx_set_umem_valid(cmd_in);
1508	}
1509
1510	if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1511		obj->flags |= DEVX_OBJ_FLAGS_DCT;
1512		err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
1513					   cmd_in_len, cmd_out, cmd_out_len);
1514	} else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
1515		   !is_apu_cq(dev, cmd_in)) {
1516		obj->flags |= DEVX_OBJ_FLAGS_CQ;
1517		obj->core_cq.comp = devx_cq_comp;
1518		err = mlx5_create_cq(dev->mdev, &obj->core_cq,
1519				     cmd_in, cmd_in_len, cmd_out,
1520				     cmd_out_len);
1521	} else {
1522		err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
1523				  cmd_out, cmd_out_len);
1524	}
1525
1526	if (err == -EREMOTEIO)
1527		err2 = uverbs_copy_to(attrs,
1528				      MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1529				      cmd_out, cmd_out_len);
1530	if (err)
1531		goto obj_free;
1532
1533	if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1534		u32 bulk = MLX5_GET(alloc_flow_counter_in,
1535				    cmd_in,
1536				    flow_counter_bulk_log_size);
1537
1538		if (bulk)
1539			bulk = 1 << bulk;
1540		else
1541			bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
1542						cmd_in,
1543						flow_counter_bulk);
1544		obj->flow_counter_bulk_size = bulk;
1545	}
1546
1547	uobj->object = obj;
1548	INIT_LIST_HEAD(&obj->event_sub);
1549	obj->ib_dev = dev;
1550	devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1551				   &obj_id);
1552	WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1553
1554	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1555	if (err)
1556		goto obj_destroy;
1557
1558	if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1559		obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1560	obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1561
1562	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1563		err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1564		if (err)
1565			goto obj_destroy;
1566	}
1567	return 0;
1568
1569obj_destroy:
1570	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1571		mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1572	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1573		mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1574	else
1575		mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1576			      sizeof(out));
1577obj_free:
1578	kfree(obj);
1579	return err2 ?: err;
1580}
1581
1582static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1583	struct uverbs_attr_bundle *attrs)
1584{
1585	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1586	int cmd_out_len = uverbs_attr_get_len(attrs,
1587					MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1588	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1589							  MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1590	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1591		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1592	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1593	void *cmd_out;
1594	int err, err2;
1595	int uid;
1596
1597	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1598		return -EINVAL;
1599
1600	uid = devx_get_uid(c, cmd_in);
1601	if (uid < 0)
1602		return uid;
1603
1604	if (!devx_is_obj_modify_cmd(cmd_in))
1605		return -EINVAL;
1606
1607	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1608		return -EINVAL;
1609
1610	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1611	if (IS_ERR(cmd_out))
1612		return PTR_ERR(cmd_out);
1613
1614	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1615	devx_set_umem_valid(cmd_in);
1616
1617	err = mlx5_cmd_do(mdev->mdev, cmd_in,
1618			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1619			  cmd_out, cmd_out_len);
1620	if (err && err != -EREMOTEIO)
1621		return err;
1622
1623	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1624			      cmd_out, cmd_out_len);
1625
1626	return err2 ?: err;
1627}
1628
1629static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1630	struct uverbs_attr_bundle *attrs)
1631{
1632	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1633	int cmd_out_len = uverbs_attr_get_len(attrs,
1634					      MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1635	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1636							  MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1637	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1638		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1639	void *cmd_out;
1640	int err, err2;
1641	int uid;
1642	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1643
1644	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1645		return -EINVAL;
1646
1647	uid = devx_get_uid(c, cmd_in);
1648	if (uid < 0)
1649		return uid;
1650
1651	if (!devx_is_obj_query_cmd(cmd_in))
1652		return -EINVAL;
1653
1654	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1655		return -EINVAL;
1656
1657	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1658	if (IS_ERR(cmd_out))
1659		return PTR_ERR(cmd_out);
1660
1661	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1662	err = mlx5_cmd_do(mdev->mdev, cmd_in,
1663			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1664			  cmd_out, cmd_out_len);
1665	if (err && err != -EREMOTEIO)
1666		return err;
1667
1668	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1669			      cmd_out, cmd_out_len);
1670
1671	return err2 ?: err;
1672}
1673
1674struct devx_async_event_queue {
1675	spinlock_t		lock;
1676	wait_queue_head_t	poll_wait;
1677	struct list_head	event_list;
1678	atomic_t		bytes_in_use;
1679	u8			is_destroyed:1;
1680};
1681
1682struct devx_async_cmd_event_file {
1683	struct ib_uobject		uobj;
1684	struct devx_async_event_queue	ev_queue;
1685	struct mlx5_async_ctx		async_ctx;
1686};
1687
1688static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1689{
1690	spin_lock_init(&ev_queue->lock);
1691	INIT_LIST_HEAD(&ev_queue->event_list);
1692	init_waitqueue_head(&ev_queue->poll_wait);
1693	atomic_set(&ev_queue->bytes_in_use, 0);
1694	ev_queue->is_destroyed = 0;
1695}
1696
1697static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1698	struct uverbs_attr_bundle *attrs)
1699{
1700	struct devx_async_cmd_event_file *ev_file;
1701
1702	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1703		attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1704	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1705
1706	ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1707			       uobj);
1708	devx_init_event_queue(&ev_file->ev_queue);
1709	mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1710	return 0;
1711}
1712
1713static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1714	struct uverbs_attr_bundle *attrs)
1715{
1716	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1717		attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1718	struct devx_async_event_file *ev_file;
1719	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1720		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1721	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1722	u32 flags;
1723	int err;
1724
1725	err = uverbs_get_flags32(&flags, attrs,
1726		MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1727		MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1728
1729	if (err)
1730		return err;
1731
1732	ev_file = container_of(uobj, struct devx_async_event_file,
1733			       uobj);
1734	spin_lock_init(&ev_file->lock);
1735	INIT_LIST_HEAD(&ev_file->event_list);
1736	init_waitqueue_head(&ev_file->poll_wait);
1737	if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1738		ev_file->omit_data = 1;
1739	INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1740	ev_file->dev = dev;
1741	get_device(&dev->ib_dev.dev);
1742	return 0;
1743}
1744
1745static void devx_query_callback(int status, struct mlx5_async_work *context)
1746{
1747	struct devx_async_data *async_data =
1748		container_of(context, struct devx_async_data, cb_work);
1749	struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1750	struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1751	unsigned long flags;
1752
1753	/*
1754	 * Note that if the struct devx_async_cmd_event_file uobj begins to be
1755	 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1756	 * routine returns, ensuring that it always remains valid here.
1757	 */
1758	spin_lock_irqsave(&ev_queue->lock, flags);
1759	list_add_tail(&async_data->list, &ev_queue->event_list);
1760	spin_unlock_irqrestore(&ev_queue->lock, flags);
1761
1762	wake_up_interruptible(&ev_queue->poll_wait);
1763}
1764
1765#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1766
1767static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1768	struct uverbs_attr_bundle *attrs)
1769{
1770	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1771				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1772	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1773				attrs,
1774				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1775	u16 cmd_out_len;
1776	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1777		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1778	struct ib_uobject *fd_uobj;
1779	int err;
1780	int uid;
1781	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1782	struct devx_async_cmd_event_file *ev_file;
1783	struct devx_async_data *async_data;
1784
1785	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1786		return -EINVAL;
1787
1788	uid = devx_get_uid(c, cmd_in);
1789	if (uid < 0)
1790		return uid;
1791
1792	if (!devx_is_obj_query_cmd(cmd_in))
1793		return -EINVAL;
1794
1795	err = uverbs_get_const(&cmd_out_len, attrs,
1796			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1797	if (err)
1798		return err;
1799
1800	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1801		return -EINVAL;
1802
1803	fd_uobj = uverbs_attr_get_uobject(attrs,
1804				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1805	if (IS_ERR(fd_uobj))
1806		return PTR_ERR(fd_uobj);
1807
1808	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1809			       uobj);
1810
1811	if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1812			MAX_ASYNC_BYTES_IN_USE) {
1813		atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1814		return -EAGAIN;
1815	}
1816
1817	async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1818					  cmd_out_len), GFP_KERNEL);
1819	if (!async_data) {
1820		err = -ENOMEM;
1821		goto sub_bytes;
1822	}
1823
1824	err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1825			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1826	if (err)
1827		goto free_async;
1828
1829	async_data->cmd_out_len = cmd_out_len;
1830	async_data->mdev = mdev;
1831	async_data->ev_file = ev_file;
1832
1833	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1834	err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1835		    uverbs_attr_get_len(attrs,
1836				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1837		    async_data->hdr.out_data,
1838		    async_data->cmd_out_len,
1839		    devx_query_callback, &async_data->cb_work);
1840
1841	if (err)
1842		goto free_async;
1843
1844	return 0;
1845
1846free_async:
1847	kvfree(async_data);
1848sub_bytes:
1849	atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1850	return err;
1851}
1852
1853static void
1854subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1855			   u32 key_level1,
1856			   bool is_level2,
1857			   u32 key_level2)
1858{
1859	struct devx_event *event;
1860	struct devx_obj_event *xa_val_level2;
1861
1862	/* Level 1 is valid for future use, no need to free */
1863	if (!is_level2)
1864		return;
1865
1866	event = xa_load(&devx_event_table->event_xa, key_level1);
1867	WARN_ON(!event);
1868
1869	xa_val_level2 = xa_load(&event->object_ids,
1870				key_level2);
1871	if (list_empty(&xa_val_level2->obj_sub_list)) {
1872		xa_erase(&event->object_ids,
1873			 key_level2);
1874		kfree_rcu(xa_val_level2, rcu);
1875	}
1876}
1877
1878static int
1879subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1880			 u32 key_level1,
1881			 bool is_level2,
1882			 u32 key_level2)
1883{
1884	struct devx_obj_event *obj_event;
1885	struct devx_event *event;
1886	int err;
1887
1888	event = xa_load(&devx_event_table->event_xa, key_level1);
1889	if (!event) {
1890		event = kzalloc(sizeof(*event), GFP_KERNEL);
1891		if (!event)
1892			return -ENOMEM;
1893
1894		INIT_LIST_HEAD(&event->unaffiliated_list);
1895		xa_init(&event->object_ids);
1896
1897		err = xa_insert(&devx_event_table->event_xa,
1898				key_level1,
1899				event,
1900				GFP_KERNEL);
1901		if (err) {
1902			kfree(event);
1903			return err;
1904		}
1905	}
1906
1907	if (!is_level2)
1908		return 0;
1909
1910	obj_event = xa_load(&event->object_ids, key_level2);
1911	if (!obj_event) {
1912		obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1913		if (!obj_event)
1914			/* Level1 is valid for future use, no need to free */
1915			return -ENOMEM;
1916
1917		err = xa_insert(&event->object_ids,
1918				key_level2,
1919				obj_event,
1920				GFP_KERNEL);
1921		if (err) {
1922			kfree(obj_event);
1923			return err;
1924		}
1925		INIT_LIST_HEAD(&obj_event->obj_sub_list);
1926	}
1927
1928	return 0;
1929}
1930
1931static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1932				   struct devx_obj *obj)
1933{
1934	int i;
1935
1936	for (i = 0; i < num_events; i++) {
1937		if (obj) {
1938			if (!is_legacy_obj_event_num(event_type_num_list[i]))
1939				return false;
1940		} else if (!is_legacy_unaffiliated_event_num(
1941				event_type_num_list[i])) {
1942			return false;
1943		}
1944	}
1945
1946	return true;
1947}
1948
1949#define MAX_SUPP_EVENT_NUM 255
1950static bool is_valid_events(struct mlx5_core_dev *dev,
1951			    int num_events, u16 *event_type_num_list,
1952			    struct devx_obj *obj)
1953{
1954	__be64 *aff_events;
1955	__be64 *unaff_events;
1956	int mask_entry;
1957	int mask_bit;
1958	int i;
1959
1960	if (MLX5_CAP_GEN(dev, event_cap)) {
1961		aff_events = MLX5_CAP_DEV_EVENT(dev,
1962						user_affiliated_events);
1963		unaff_events = MLX5_CAP_DEV_EVENT(dev,
1964						  user_unaffiliated_events);
1965	} else {
1966		return is_valid_events_legacy(num_events, event_type_num_list,
1967					      obj);
1968	}
1969
1970	for (i = 0; i < num_events; i++) {
1971		if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1972			return false;
1973
1974		mask_entry = event_type_num_list[i] / 64;
1975		mask_bit = event_type_num_list[i] % 64;
1976
1977		if (obj) {
1978			/* CQ completion */
1979			if (event_type_num_list[i] == 0)
1980				continue;
1981
1982			if (!(be64_to_cpu(aff_events[mask_entry]) &
1983					(1ull << mask_bit)))
1984				return false;
1985
1986			continue;
1987		}
1988
1989		if (!(be64_to_cpu(unaff_events[mask_entry]) &
1990				(1ull << mask_bit)))
1991			return false;
1992	}
1993
1994	return true;
1995}
1996
1997#define MAX_NUM_EVENTS 16
1998static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1999	struct uverbs_attr_bundle *attrs)
2000{
2001	struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
2002				attrs,
2003				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
2004	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2005		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2006	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2007	struct ib_uobject *fd_uobj;
2008	struct devx_obj *obj = NULL;
2009	struct devx_async_event_file *ev_file;
2010	struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
2011	u16 *event_type_num_list;
2012	struct devx_event_subscription *event_sub, *tmp_sub;
2013	struct list_head sub_list;
2014	int redirect_fd;
2015	bool use_eventfd = false;
2016	int num_events;
 
2017	u16 obj_type = 0;
2018	u64 cookie = 0;
2019	u32 obj_id = 0;
2020	int err;
2021	int i;
2022
2023	if (!c->devx_uid)
2024		return -EINVAL;
2025
2026	if (!IS_ERR(devx_uobj)) {
2027		obj = (struct devx_obj *)devx_uobj->object;
2028		if (obj)
2029			obj_id = get_dec_obj_id(obj->obj_id);
2030	}
2031
2032	fd_uobj = uverbs_attr_get_uobject(attrs,
2033				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
2034	if (IS_ERR(fd_uobj))
2035		return PTR_ERR(fd_uobj);
2036
2037	ev_file = container_of(fd_uobj, struct devx_async_event_file,
2038			       uobj);
2039
2040	if (uverbs_attr_is_valid(attrs,
2041				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
2042		err = uverbs_copy_from(&redirect_fd, attrs,
2043			       MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
2044		if (err)
2045			return err;
2046
2047		use_eventfd = true;
2048	}
2049
2050	if (uverbs_attr_is_valid(attrs,
2051				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
2052		if (use_eventfd)
2053			return -EINVAL;
2054
2055		err = uverbs_copy_from(&cookie, attrs,
2056				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
2057		if (err)
2058			return err;
2059	}
2060
2061	num_events = uverbs_attr_ptr_get_array_size(
2062		attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2063		sizeof(u16));
2064
2065	if (num_events < 0)
2066		return num_events;
2067
2068	if (num_events > MAX_NUM_EVENTS)
2069		return -EINVAL;
2070
2071	event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2072			MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2073
2074	if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2075		return -EINVAL;
2076
2077	INIT_LIST_HEAD(&sub_list);
2078
2079	/* Protect from concurrent subscriptions to same XA entries to allow
2080	 * both to succeed
2081	 */
2082	mutex_lock(&devx_event_table->event_xa_lock);
2083	for (i = 0; i < num_events; i++) {
2084		u32 key_level1;
2085
2086		if (obj)
2087			obj_type = get_dec_obj_type(obj,
2088						    event_type_num_list[i]);
2089		key_level1 = event_type_num_list[i] | obj_type << 16;
2090
2091		err = subscribe_event_xa_alloc(devx_event_table,
2092					       key_level1,
2093					       obj,
2094					       obj_id);
2095		if (err)
2096			goto err;
2097
 
2098		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2099		if (!event_sub) {
2100			err = -ENOMEM;
2101			goto err;
2102		}
2103
2104		list_add_tail(&event_sub->event_list, &sub_list);
2105		uverbs_uobject_get(&ev_file->uobj);
2106		if (use_eventfd) {
2107			event_sub->eventfd =
2108				eventfd_ctx_fdget(redirect_fd);
2109
2110			if (IS_ERR(event_sub->eventfd)) {
2111				err = PTR_ERR(event_sub->eventfd);
2112				event_sub->eventfd = NULL;
2113				goto err;
2114			}
2115		}
2116
2117		event_sub->cookie = cookie;
2118		event_sub->ev_file = ev_file;
2119		/* May be needed upon cleanup the devx object/subscription */
2120		event_sub->xa_key_level1 = key_level1;
2121		event_sub->xa_key_level2 = obj_id;
2122		INIT_LIST_HEAD(&event_sub->obj_list);
2123	}
2124
2125	/* Once all the allocations and the XA data insertions were done we
2126	 * can go ahead and add all the subscriptions to the relevant lists
2127	 * without concern of a failure.
2128	 */
2129	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2130		struct devx_event *event;
2131		struct devx_obj_event *obj_event;
2132
2133		list_del_init(&event_sub->event_list);
2134
2135		spin_lock_irq(&ev_file->lock);
2136		list_add_tail_rcu(&event_sub->file_list,
2137				  &ev_file->subscribed_events_list);
2138		spin_unlock_irq(&ev_file->lock);
2139
2140		event = xa_load(&devx_event_table->event_xa,
2141				event_sub->xa_key_level1);
2142		WARN_ON(!event);
2143
2144		if (!obj) {
2145			list_add_tail_rcu(&event_sub->xa_list,
2146					  &event->unaffiliated_list);
2147			continue;
2148		}
2149
2150		obj_event = xa_load(&event->object_ids, obj_id);
2151		WARN_ON(!obj_event);
2152		list_add_tail_rcu(&event_sub->xa_list,
2153				  &obj_event->obj_sub_list);
2154		list_add_tail_rcu(&event_sub->obj_list,
2155				  &obj->event_sub);
2156	}
2157
2158	mutex_unlock(&devx_event_table->event_xa_lock);
2159	return 0;
2160
2161err:
2162	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2163		list_del(&event_sub->event_list);
2164
2165		subscribe_event_xa_dealloc(devx_event_table,
2166					   event_sub->xa_key_level1,
2167					   obj,
2168					   obj_id);
2169
2170		if (event_sub->eventfd)
2171			eventfd_ctx_put(event_sub->eventfd);
2172		uverbs_uobject_put(&event_sub->ev_file->uobj);
2173		kfree(event_sub);
2174	}
2175
2176	mutex_unlock(&devx_event_table->event_xa_lock);
2177	return err;
2178}
2179
2180static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2181			 struct uverbs_attr_bundle *attrs,
2182			 struct devx_umem *obj, u32 access_flags)
2183{
2184	u64 addr;
2185	size_t size;
2186	int err;
2187
2188	if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2189	    uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2190		return -EFAULT;
2191
2192	err = ib_check_mr_access(&dev->ib_dev, access_flags);
2193	if (err)
2194		return err;
2195
2196	if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
2197		struct ib_umem_dmabuf *umem_dmabuf;
2198		int dmabuf_fd;
2199
2200		err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
2201					MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
2202		if (err)
2203			return -EFAULT;
2204
2205		umem_dmabuf = ib_umem_dmabuf_get_pinned(
2206			&dev->ib_dev, addr, size, dmabuf_fd, access_flags);
2207		if (IS_ERR(umem_dmabuf))
2208			return PTR_ERR(umem_dmabuf);
2209		obj->umem = &umem_dmabuf->umem;
2210	} else {
2211		obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
2212		if (IS_ERR(obj->umem))
2213			return PTR_ERR(obj->umem);
2214	}
2215	return 0;
2216}
2217
2218static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
2219					       unsigned long pgsz_bitmap)
2220{
2221	unsigned long page_size;
2222
2223	/* Don't bother checking larger page sizes as offset must be zero and
2224	 * total DEVX umem length must be equal to total umem length.
2225	 */
2226	pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
2227					 PAGE_SHIFT),
2228				   MLX5_ADAPTER_PAGE_SHIFT);
2229	if (!pgsz_bitmap)
2230		return 0;
2231
2232	page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
2233	if (!page_size)
2234		return 0;
2235
2236	/* If the page_size is less than the CPU page size then we can use the
2237	 * offset and create a umem which is a subset of the page list.
2238	 * For larger page sizes we can't be sure the DMA  list reflects the
2239	 * VA so we must ensure that the umem extent is exactly equal to the
2240	 * page list. Reduce the page size until one of these cases is true.
2241	 */
2242	while ((ib_umem_dma_offset(umem, page_size) != 0 ||
2243		(umem->length % page_size) != 0) &&
2244		page_size > PAGE_SIZE)
2245		page_size /= 2;
2246
2247	return page_size;
2248}
2249
2250static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
2251				   struct uverbs_attr_bundle *attrs,
2252				   struct devx_umem *obj,
2253				   struct devx_umem_reg_cmd *cmd,
2254				   int access)
2255{
2256	unsigned long pgsz_bitmap;
2257	unsigned int page_size;
2258	__be64 *mtt;
2259	void *umem;
2260	int ret;
2261
2262	/*
2263	 * If the user does not pass in pgsz_bitmap then the user promises not
2264	 * to use umem_offset!=0 in any commands that allocate on top of the
2265	 * umem.
2266	 *
2267	 * If the user wants to use a umem_offset then it must pass in
2268	 * pgsz_bitmap which guides the maximum page size and thus maximum
2269	 * object alignment inside the umem. See the PRM.
2270	 *
2271	 * Users are not allowed to use IOVA here, mkeys are not supported on
2272	 * umem.
2273	 */
2274	ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
2275			MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2276			GENMASK_ULL(63,
2277				    min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
2278	if (ret)
2279		return ret;
2280
2281	page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
2282	if (!page_size)
2283		return -EINVAL;
2284
2285	cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2286		     (MLX5_ST_SZ_BYTES(mtt) *
2287		      ib_umem_num_dma_blocks(obj->umem, page_size));
2288	cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2289	if (IS_ERR(cmd->in))
2290		return PTR_ERR(cmd->in);
2291
2292	umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2293	mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2294
2295	MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2296	MLX5_SET64(umem, umem, num_of_mtt,
2297		   ib_umem_num_dma_blocks(obj->umem, page_size));
2298	MLX5_SET(umem, umem, log_page_size,
2299		 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
2300	MLX5_SET(umem, umem, page_offset,
2301		 ib_umem_dma_offset(obj->umem, page_size));
2302
2303	if (mlx5_umem_needs_ats(dev, obj->umem, access))
2304		MLX5_SET(umem, umem, ats, 1);
2305
2306	mlx5_ib_populate_pas(obj->umem, page_size, mtt,
2307			     (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2308				     MLX5_IB_MTT_READ);
2309	return 0;
2310}
2311
2312static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2313	struct uverbs_attr_bundle *attrs)
2314{
2315	struct devx_umem_reg_cmd cmd;
2316	struct devx_umem *obj;
2317	struct ib_uobject *uobj = uverbs_attr_get_uobject(
2318		attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2319	u32 obj_id;
2320	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2321		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2322	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2323	int access_flags;
2324	int err;
2325
2326	if (!c->devx_uid)
2327		return -EINVAL;
2328
2329	err = uverbs_get_flags32(&access_flags, attrs,
2330				 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2331				 IB_ACCESS_LOCAL_WRITE |
2332				 IB_ACCESS_REMOTE_WRITE |
2333				 IB_ACCESS_REMOTE_READ |
2334				 IB_ACCESS_RELAXED_ORDERING);
2335	if (err)
2336		return err;
2337
2338	obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2339	if (!obj)
2340		return -ENOMEM;
2341
2342	err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
2343	if (err)
2344		goto err_obj_free;
2345
2346	err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
2347	if (err)
2348		goto err_umem_release;
2349
2350	MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2351	err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2352			    sizeof(cmd.out));
2353	if (err)
2354		goto err_umem_release;
2355
2356	obj->mdev = dev->mdev;
2357	uobj->object = obj;
2358	devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2359	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2360
2361	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
2362			     sizeof(obj_id));
2363	return err;
2364
2365err_umem_release:
2366	ib_umem_release(obj->umem);
2367err_obj_free:
2368	kfree(obj);
2369	return err;
2370}
2371
2372static int devx_umem_cleanup(struct ib_uobject *uobject,
2373			     enum rdma_remove_reason why,
2374			     struct uverbs_attr_bundle *attrs)
2375{
2376	struct devx_umem *obj = uobject->object;
2377	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2378	int err;
2379
2380	err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2381	if (err)
2382		return err;
2383
2384	ib_umem_release(obj->umem);
2385	kfree(obj);
2386	return 0;
2387}
2388
2389static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2390				  unsigned long event_type)
2391{
2392	__be64 *unaff_events;
2393	int mask_entry;
2394	int mask_bit;
2395
2396	if (!MLX5_CAP_GEN(dev, event_cap))
2397		return is_legacy_unaffiliated_event_num(event_type);
2398
2399	unaff_events = MLX5_CAP_DEV_EVENT(dev,
2400					  user_unaffiliated_events);
2401	WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2402
2403	mask_entry = event_type / 64;
2404	mask_bit = event_type % 64;
2405
2406	if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2407		return false;
2408
2409	return true;
2410}
2411
2412static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2413{
2414	struct mlx5_eqe *eqe = data;
2415	u32 obj_id = 0;
2416
2417	switch (event_type) {
2418	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2419	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2420	case MLX5_EVENT_TYPE_PATH_MIG:
2421	case MLX5_EVENT_TYPE_COMM_EST:
2422	case MLX5_EVENT_TYPE_SQ_DRAINED:
2423	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2424	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2425	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2426	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2427	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2428		obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2429		break;
2430	case MLX5_EVENT_TYPE_XRQ_ERROR:
2431		obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2432		break;
2433	case MLX5_EVENT_TYPE_DCT_DRAINED:
2434	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2435		obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2436		break;
2437	case MLX5_EVENT_TYPE_CQ_ERROR:
2438		obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2439		break;
2440	default:
2441		obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2442		break;
2443	}
2444
2445	return obj_id;
2446}
2447
2448static int deliver_event(struct devx_event_subscription *event_sub,
2449			 const void *data)
2450{
2451	struct devx_async_event_file *ev_file;
2452	struct devx_async_event_data *event_data;
2453	unsigned long flags;
2454
2455	ev_file = event_sub->ev_file;
2456
2457	if (ev_file->omit_data) {
2458		spin_lock_irqsave(&ev_file->lock, flags);
2459		if (!list_empty(&event_sub->event_list) ||
2460		    ev_file->is_destroyed) {
2461			spin_unlock_irqrestore(&ev_file->lock, flags);
2462			return 0;
2463		}
2464
2465		list_add_tail(&event_sub->event_list, &ev_file->event_list);
2466		spin_unlock_irqrestore(&ev_file->lock, flags);
2467		wake_up_interruptible(&ev_file->poll_wait);
2468		return 0;
2469	}
2470
2471	event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2472			     GFP_ATOMIC);
2473	if (!event_data) {
2474		spin_lock_irqsave(&ev_file->lock, flags);
2475		ev_file->is_overflow_err = 1;
2476		spin_unlock_irqrestore(&ev_file->lock, flags);
2477		return -ENOMEM;
2478	}
2479
2480	event_data->hdr.cookie = event_sub->cookie;
2481	memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2482
2483	spin_lock_irqsave(&ev_file->lock, flags);
2484	if (!ev_file->is_destroyed)
2485		list_add_tail(&event_data->list, &ev_file->event_list);
2486	else
2487		kfree(event_data);
2488	spin_unlock_irqrestore(&ev_file->lock, flags);
2489	wake_up_interruptible(&ev_file->poll_wait);
2490
2491	return 0;
2492}
2493
2494static void dispatch_event_fd(struct list_head *fd_list,
2495			      const void *data)
2496{
2497	struct devx_event_subscription *item;
2498
2499	list_for_each_entry_rcu(item, fd_list, xa_list) {
2500		if (item->eventfd)
2501			eventfd_signal(item->eventfd);
2502		else
2503			deliver_event(item, data);
2504	}
2505}
2506
2507static int devx_event_notifier(struct notifier_block *nb,
2508			       unsigned long event_type, void *data)
2509{
2510	struct mlx5_devx_event_table *table;
2511	struct mlx5_ib_dev *dev;
2512	struct devx_event *event;
2513	struct devx_obj_event *obj_event;
2514	u16 obj_type = 0;
2515	bool is_unaffiliated;
2516	u32 obj_id;
2517
2518	/* Explicit filtering to kernel events which may occur frequently */
2519	if (event_type == MLX5_EVENT_TYPE_CMD ||
2520	    event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2521		return NOTIFY_OK;
2522
2523	table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2524	dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2525	is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2526
2527	if (!is_unaffiliated)
2528		obj_type = get_event_obj_type(event_type, data);
2529
2530	rcu_read_lock();
2531	event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2532	if (!event) {
2533		rcu_read_unlock();
2534		return NOTIFY_DONE;
2535	}
2536
2537	if (is_unaffiliated) {
2538		dispatch_event_fd(&event->unaffiliated_list, data);
2539		rcu_read_unlock();
2540		return NOTIFY_OK;
2541	}
2542
2543	obj_id = devx_get_obj_id_from_event(event_type, data);
2544	obj_event = xa_load(&event->object_ids, obj_id);
2545	if (!obj_event) {
2546		rcu_read_unlock();
2547		return NOTIFY_DONE;
2548	}
2549
2550	dispatch_event_fd(&obj_event->obj_sub_list, data);
2551
2552	rcu_read_unlock();
2553	return NOTIFY_OK;
2554}
2555
2556int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
2557{
2558	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2559	int uid;
2560
2561	uid = mlx5_ib_devx_create(dev, false);
2562	if (uid > 0) {
2563		dev->devx_whitelist_uid = uid;
2564		xa_init(&table->event_xa);
2565		mutex_init(&table->event_xa_lock);
2566		MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2567		mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2568	}
2569
2570	return 0;
2571}
2572
2573void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
2574{
2575	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2576	struct devx_event_subscription *sub, *tmp;
2577	struct devx_event *event;
2578	void *entry;
2579	unsigned long id;
2580
2581	if (dev->devx_whitelist_uid) {
2582		mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2583		mutex_lock(&dev->devx_event_table.event_xa_lock);
2584		xa_for_each(&table->event_xa, id, entry) {
2585			event = entry;
2586			list_for_each_entry_safe(
2587				sub, tmp, &event->unaffiliated_list, xa_list)
2588				devx_cleanup_subscription(dev, sub);
2589			kfree(entry);
2590		}
2591		mutex_unlock(&dev->devx_event_table.event_xa_lock);
2592		xa_destroy(&table->event_xa);
2593
2594		mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
2595	}
2596}
2597
2598static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2599					 size_t count, loff_t *pos)
2600{
2601	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2602	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2603	struct devx_async_data *event;
2604	int ret = 0;
2605	size_t eventsz;
2606
2607	spin_lock_irq(&ev_queue->lock);
2608
2609	while (list_empty(&ev_queue->event_list)) {
2610		spin_unlock_irq(&ev_queue->lock);
2611
2612		if (filp->f_flags & O_NONBLOCK)
2613			return -EAGAIN;
2614
2615		if (wait_event_interruptible(
2616			    ev_queue->poll_wait,
2617			    (!list_empty(&ev_queue->event_list) ||
2618			     ev_queue->is_destroyed))) {
2619			return -ERESTARTSYS;
2620		}
2621
2622		spin_lock_irq(&ev_queue->lock);
2623		if (ev_queue->is_destroyed) {
2624			spin_unlock_irq(&ev_queue->lock);
2625			return -EIO;
2626		}
2627	}
2628
2629	event = list_entry(ev_queue->event_list.next,
2630			   struct devx_async_data, list);
2631	eventsz = event->cmd_out_len +
2632			sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2633
2634	if (eventsz > count) {
2635		spin_unlock_irq(&ev_queue->lock);
2636		return -ENOSPC;
2637	}
2638
2639	list_del(ev_queue->event_list.next);
2640	spin_unlock_irq(&ev_queue->lock);
2641
2642	if (copy_to_user(buf, &event->hdr, eventsz))
2643		ret = -EFAULT;
2644	else
2645		ret = eventsz;
2646
2647	atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2648	kvfree(event);
2649	return ret;
2650}
2651
2652static __poll_t devx_async_cmd_event_poll(struct file *filp,
2653					      struct poll_table_struct *wait)
2654{
2655	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2656	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2657	__poll_t pollflags = 0;
2658
2659	poll_wait(filp, &ev_queue->poll_wait, wait);
2660
2661	spin_lock_irq(&ev_queue->lock);
2662	if (ev_queue->is_destroyed)
2663		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2664	else if (!list_empty(&ev_queue->event_list))
2665		pollflags = EPOLLIN | EPOLLRDNORM;
2666	spin_unlock_irq(&ev_queue->lock);
2667
2668	return pollflags;
2669}
2670
2671static const struct file_operations devx_async_cmd_event_fops = {
2672	.owner	 = THIS_MODULE,
2673	.read	 = devx_async_cmd_event_read,
2674	.poll    = devx_async_cmd_event_poll,
2675	.release = uverbs_uobject_fd_release,
2676	.llseek	 = no_llseek,
2677};
2678
2679static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2680				     size_t count, loff_t *pos)
2681{
2682	struct devx_async_event_file *ev_file = filp->private_data;
2683	struct devx_event_subscription *event_sub;
2684	struct devx_async_event_data *event;
2685	int ret = 0;
2686	size_t eventsz;
2687	bool omit_data;
2688	void *event_data;
2689
2690	omit_data = ev_file->omit_data;
2691
2692	spin_lock_irq(&ev_file->lock);
2693
2694	if (ev_file->is_overflow_err) {
2695		ev_file->is_overflow_err = 0;
2696		spin_unlock_irq(&ev_file->lock);
2697		return -EOVERFLOW;
2698	}
2699
2700
2701	while (list_empty(&ev_file->event_list)) {
2702		spin_unlock_irq(&ev_file->lock);
2703
2704		if (filp->f_flags & O_NONBLOCK)
2705			return -EAGAIN;
2706
2707		if (wait_event_interruptible(ev_file->poll_wait,
2708			    (!list_empty(&ev_file->event_list) ||
2709			     ev_file->is_destroyed))) {
2710			return -ERESTARTSYS;
2711		}
2712
2713		spin_lock_irq(&ev_file->lock);
2714		if (ev_file->is_destroyed) {
2715			spin_unlock_irq(&ev_file->lock);
2716			return -EIO;
2717		}
2718	}
2719
2720	if (omit_data) {
2721		event_sub = list_first_entry(&ev_file->event_list,
2722					struct devx_event_subscription,
2723					event_list);
2724		eventsz = sizeof(event_sub->cookie);
2725		event_data = &event_sub->cookie;
2726	} else {
2727		event = list_first_entry(&ev_file->event_list,
2728				      struct devx_async_event_data, list);
2729		eventsz = sizeof(struct mlx5_eqe) +
2730			sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2731		event_data = &event->hdr;
2732	}
2733
2734	if (eventsz > count) {
2735		spin_unlock_irq(&ev_file->lock);
2736		return -EINVAL;
2737	}
2738
2739	if (omit_data)
2740		list_del_init(&event_sub->event_list);
2741	else
2742		list_del(&event->list);
2743
2744	spin_unlock_irq(&ev_file->lock);
2745
2746	if (copy_to_user(buf, event_data, eventsz))
2747		/* This points to an application issue, not a kernel concern */
2748		ret = -EFAULT;
2749	else
2750		ret = eventsz;
2751
2752	if (!omit_data)
2753		kfree(event);
2754	return ret;
2755}
2756
2757static __poll_t devx_async_event_poll(struct file *filp,
2758				      struct poll_table_struct *wait)
2759{
2760	struct devx_async_event_file *ev_file = filp->private_data;
2761	__poll_t pollflags = 0;
2762
2763	poll_wait(filp, &ev_file->poll_wait, wait);
2764
2765	spin_lock_irq(&ev_file->lock);
2766	if (ev_file->is_destroyed)
2767		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2768	else if (!list_empty(&ev_file->event_list))
2769		pollflags = EPOLLIN | EPOLLRDNORM;
2770	spin_unlock_irq(&ev_file->lock);
2771
2772	return pollflags;
2773}
2774
2775static void devx_free_subscription(struct rcu_head *rcu)
2776{
2777	struct devx_event_subscription *event_sub =
2778		container_of(rcu, struct devx_event_subscription, rcu);
2779
2780	if (event_sub->eventfd)
2781		eventfd_ctx_put(event_sub->eventfd);
2782	uverbs_uobject_put(&event_sub->ev_file->uobj);
2783	kfree(event_sub);
2784}
2785
2786static const struct file_operations devx_async_event_fops = {
2787	.owner	 = THIS_MODULE,
2788	.read	 = devx_async_event_read,
2789	.poll    = devx_async_event_poll,
2790	.release = uverbs_uobject_fd_release,
2791	.llseek	 = no_llseek,
2792};
2793
2794static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2795					      enum rdma_remove_reason why)
2796{
2797	struct devx_async_cmd_event_file *comp_ev_file =
2798		container_of(uobj, struct devx_async_cmd_event_file,
2799			     uobj);
2800	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2801	struct devx_async_data *entry, *tmp;
2802
2803	spin_lock_irq(&ev_queue->lock);
2804	ev_queue->is_destroyed = 1;
2805	spin_unlock_irq(&ev_queue->lock);
2806	wake_up_interruptible(&ev_queue->poll_wait);
2807
2808	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2809
2810	spin_lock_irq(&comp_ev_file->ev_queue.lock);
2811	list_for_each_entry_safe(entry, tmp,
2812				 &comp_ev_file->ev_queue.event_list, list) {
2813		list_del(&entry->list);
2814		kvfree(entry);
2815	}
2816	spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2817};
2818
2819static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2820					  enum rdma_remove_reason why)
2821{
2822	struct devx_async_event_file *ev_file =
2823		container_of(uobj, struct devx_async_event_file,
2824			     uobj);
2825	struct devx_event_subscription *event_sub, *event_sub_tmp;
2826	struct mlx5_ib_dev *dev = ev_file->dev;
2827
2828	spin_lock_irq(&ev_file->lock);
2829	ev_file->is_destroyed = 1;
2830
2831	/* free the pending events allocation */
2832	if (ev_file->omit_data) {
2833		struct devx_event_subscription *event_sub, *tmp;
2834
2835		list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2836					 event_list)
2837			list_del_init(&event_sub->event_list);
2838
2839	} else {
2840		struct devx_async_event_data *entry, *tmp;
2841
2842		list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2843					 list) {
2844			list_del(&entry->list);
2845			kfree(entry);
2846		}
2847	}
2848
2849	spin_unlock_irq(&ev_file->lock);
2850	wake_up_interruptible(&ev_file->poll_wait);
2851
2852	mutex_lock(&dev->devx_event_table.event_xa_lock);
2853	/* delete the subscriptions which are related to this FD */
2854	list_for_each_entry_safe(event_sub, event_sub_tmp,
2855				 &ev_file->subscribed_events_list, file_list) {
2856		devx_cleanup_subscription(dev, event_sub);
2857		list_del_rcu(&event_sub->file_list);
2858		/* subscription may not be used by the read API any more */
2859		call_rcu(&event_sub->rcu, devx_free_subscription);
2860	}
2861	mutex_unlock(&dev->devx_event_table.event_xa_lock);
2862
2863	put_device(&dev->ib_dev.dev);
2864};
2865
2866DECLARE_UVERBS_NAMED_METHOD(
2867	MLX5_IB_METHOD_DEVX_UMEM_REG,
2868	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2869			MLX5_IB_OBJECT_DEVX_UMEM,
2870			UVERBS_ACCESS_NEW,
2871			UA_MANDATORY),
2872	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2873			   UVERBS_ATTR_TYPE(u64),
2874			   UA_MANDATORY),
2875	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2876			   UVERBS_ATTR_TYPE(u64),
2877			   UA_MANDATORY),
2878	UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
2879			   UA_OPTIONAL),
2880	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2881			     enum ib_access_flags),
2882	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2883			     u64),
2884	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2885			    UVERBS_ATTR_TYPE(u32),
2886			    UA_MANDATORY));
2887
2888DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2889	MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2890	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2891			MLX5_IB_OBJECT_DEVX_UMEM,
2892			UVERBS_ACCESS_DESTROY,
2893			UA_MANDATORY));
2894
2895DECLARE_UVERBS_NAMED_METHOD(
2896	MLX5_IB_METHOD_DEVX_QUERY_EQN,
2897	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2898			   UVERBS_ATTR_TYPE(u32),
2899			   UA_MANDATORY),
2900	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2901			    UVERBS_ATTR_TYPE(u32),
2902			    UA_MANDATORY));
2903
2904DECLARE_UVERBS_NAMED_METHOD(
2905	MLX5_IB_METHOD_DEVX_QUERY_UAR,
2906	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2907			   UVERBS_ATTR_TYPE(u32),
2908			   UA_MANDATORY),
2909	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2910			    UVERBS_ATTR_TYPE(u32),
2911			    UA_MANDATORY));
2912
2913DECLARE_UVERBS_NAMED_METHOD(
2914	MLX5_IB_METHOD_DEVX_OTHER,
2915	UVERBS_ATTR_PTR_IN(
2916		MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2917		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2918		UA_MANDATORY,
2919		UA_ALLOC_AND_COPY),
2920	UVERBS_ATTR_PTR_OUT(
2921		MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2922		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2923		UA_MANDATORY));
2924
2925DECLARE_UVERBS_NAMED_METHOD(
2926	MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2927	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2928			MLX5_IB_OBJECT_DEVX_OBJ,
2929			UVERBS_ACCESS_NEW,
2930			UA_MANDATORY),
2931	UVERBS_ATTR_PTR_IN(
2932		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2933		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2934		UA_MANDATORY,
2935		UA_ALLOC_AND_COPY),
2936	UVERBS_ATTR_PTR_OUT(
2937		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2938		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2939		UA_MANDATORY));
2940
2941DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2942	MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2943	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2944			MLX5_IB_OBJECT_DEVX_OBJ,
2945			UVERBS_ACCESS_DESTROY,
2946			UA_MANDATORY));
2947
2948DECLARE_UVERBS_NAMED_METHOD(
2949	MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2950	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2951			UVERBS_IDR_ANY_OBJECT,
2952			UVERBS_ACCESS_READ,
2953			UA_MANDATORY),
2954	UVERBS_ATTR_PTR_IN(
2955		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2956		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2957		UA_MANDATORY,
2958		UA_ALLOC_AND_COPY),
2959	UVERBS_ATTR_PTR_OUT(
2960		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2961		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2962		UA_MANDATORY));
2963
2964DECLARE_UVERBS_NAMED_METHOD(
2965	MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2966	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2967			UVERBS_IDR_ANY_OBJECT,
2968			UVERBS_ACCESS_READ,
2969			UA_MANDATORY),
2970	UVERBS_ATTR_PTR_IN(
2971		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2972		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2973		UA_MANDATORY,
2974		UA_ALLOC_AND_COPY),
2975	UVERBS_ATTR_PTR_OUT(
2976		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2977		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2978		UA_MANDATORY));
2979
2980DECLARE_UVERBS_NAMED_METHOD(
2981	MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2982	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2983			UVERBS_IDR_ANY_OBJECT,
2984			UVERBS_ACCESS_READ,
2985			UA_MANDATORY),
2986	UVERBS_ATTR_PTR_IN(
2987		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2988		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2989		UA_MANDATORY,
2990		UA_ALLOC_AND_COPY),
2991	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2992		u16, UA_MANDATORY),
2993	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2994		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2995		UVERBS_ACCESS_READ,
2996		UA_MANDATORY),
2997	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2998		UVERBS_ATTR_TYPE(u64),
2999		UA_MANDATORY));
3000
3001DECLARE_UVERBS_NAMED_METHOD(
3002	MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
3003	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
3004		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3005		UVERBS_ACCESS_READ,
3006		UA_MANDATORY),
3007	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
3008		MLX5_IB_OBJECT_DEVX_OBJ,
3009		UVERBS_ACCESS_READ,
3010		UA_OPTIONAL),
3011	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
3012		UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
3013		UA_MANDATORY,
3014		UA_ALLOC_AND_COPY),
3015	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
3016		UVERBS_ATTR_TYPE(u64),
3017		UA_OPTIONAL),
3018	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
3019		UVERBS_ATTR_TYPE(u32),
3020		UA_OPTIONAL));
3021
3022DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
3023			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
3024			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
3025			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
3026			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
3027
3028DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
3029			    UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
3030			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
3031			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
3032			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
3033			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
3034			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
3035
3036DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
3037			    UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
3038			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
3039			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
3040
3041
3042DECLARE_UVERBS_NAMED_METHOD(
3043	MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
3044	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
3045			MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3046			UVERBS_ACCESS_NEW,
3047			UA_MANDATORY));
3048
3049DECLARE_UVERBS_NAMED_OBJECT(
3050	MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3051	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
3052			     devx_async_cmd_event_destroy_uobj,
3053			     &devx_async_cmd_event_fops, "[devx_async_cmd]",
3054			     O_RDONLY),
3055	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
3056
3057DECLARE_UVERBS_NAMED_METHOD(
3058	MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
3059	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
3060			MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3061			UVERBS_ACCESS_NEW,
3062			UA_MANDATORY),
3063	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
3064			enum mlx5_ib_uapi_devx_create_event_channel_flags,
3065			UA_MANDATORY));
3066
3067DECLARE_UVERBS_NAMED_OBJECT(
3068	MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3069	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
3070			     devx_async_event_destroy_uobj,
3071			     &devx_async_event_fops, "[devx_async_event]",
3072			     O_RDONLY),
3073	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
3074
3075static bool devx_is_supported(struct ib_device *device)
3076{
3077	struct mlx5_ib_dev *dev = to_mdev(device);
3078
3079	return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
3080}
3081
3082const struct uapi_definition mlx5_ib_devx_defs[] = {
3083	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3084		MLX5_IB_OBJECT_DEVX,
3085		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3086	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3087		MLX5_IB_OBJECT_DEVX_OBJ,
3088		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3089	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3090		MLX5_IB_OBJECT_DEVX_UMEM,
3091		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3092	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3093		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3094		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3095	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3096		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3097		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3098	{},
3099};
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
   4 */
   5
   6#include <rdma/ib_user_verbs.h>
   7#include <rdma/ib_verbs.h>
   8#include <rdma/uverbs_types.h>
   9#include <rdma/uverbs_ioctl.h>
  10#include <rdma/mlx5_user_ioctl_cmds.h>
  11#include <rdma/mlx5_user_ioctl_verbs.h>
  12#include <rdma/ib_umem.h>
  13#include <rdma/uverbs_std_types.h>
  14#include <linux/mlx5/driver.h>
  15#include <linux/mlx5/fs.h>
  16#include "mlx5_ib.h"
  17#include "devx.h"
  18#include "qp.h"
  19#include <linux/xarray.h>
  20
  21#define UVERBS_MODULE_NAME mlx5_ib
  22#include <rdma/uverbs_named_ioctl.h>
  23
  24static void dispatch_event_fd(struct list_head *fd_list, const void *data);
  25
  26enum devx_obj_flags {
  27	DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
  28	DEVX_OBJ_FLAGS_DCT = 1 << 1,
  29	DEVX_OBJ_FLAGS_CQ = 1 << 2,
  30};
  31
  32struct devx_async_data {
  33	struct mlx5_ib_dev *mdev;
  34	struct list_head list;
  35	struct devx_async_cmd_event_file *ev_file;
  36	struct mlx5_async_work cb_work;
  37	u16 cmd_out_len;
  38	/* must be last field in this structure */
  39	struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
  40};
  41
  42struct devx_async_event_data {
  43	struct list_head list; /* headed in ev_file->event_list */
  44	struct mlx5_ib_uapi_devx_async_event_hdr hdr;
  45};
  46
  47/* first level XA value data structure */
  48struct devx_event {
  49	struct xarray object_ids; /* second XA level, Key = object id */
  50	struct list_head unaffiliated_list;
  51};
  52
  53/* second level XA value data structure */
  54struct devx_obj_event {
  55	struct rcu_head rcu;
  56	struct list_head obj_sub_list;
  57};
  58
  59struct devx_event_subscription {
  60	struct list_head file_list; /* headed in ev_file->
  61				     * subscribed_events_list
  62				     */
  63	struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
  64				   * devx_obj_event->obj_sub_list
  65				   */
  66	struct list_head obj_list; /* headed in devx_object */
  67	struct list_head event_list; /* headed in ev_file->event_list or in
  68				      * temp list via subscription
  69				      */
  70
  71	u8 is_cleaned:1;
  72	u32 xa_key_level1;
  73	u32 xa_key_level2;
  74	struct rcu_head	rcu;
  75	u64 cookie;
  76	struct devx_async_event_file *ev_file;
  77	struct eventfd_ctx *eventfd;
  78};
  79
  80struct devx_async_event_file {
  81	struct ib_uobject uobj;
  82	/* Head of events that are subscribed to this FD */
  83	struct list_head subscribed_events_list;
  84	spinlock_t lock;
  85	wait_queue_head_t poll_wait;
  86	struct list_head event_list;
  87	struct mlx5_ib_dev *dev;
  88	u8 omit_data:1;
  89	u8 is_overflow_err:1;
  90	u8 is_destroyed:1;
  91};
  92
  93struct devx_umem {
  94	struct mlx5_core_dev		*mdev;
  95	struct ib_umem			*umem;
  96	u32				dinlen;
  97	u32				dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
  98};
  99
 100struct devx_umem_reg_cmd {
 101	void				*in;
 102	u32				inlen;
 103	u32				out[MLX5_ST_SZ_DW(create_umem_out)];
 104};
 105
 106static struct mlx5_ib_ucontext *
 107devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
 108{
 109	return to_mucontext(ib_uverbs_get_ucontext(attrs));
 110}
 111
 112int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
 113{
 114	u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
 115	u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
 116	void *uctx;
 117	int err;
 118	u16 uid;
 119	u32 cap = 0;
 120
 121	/* 0 means not supported */
 122	if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
 123		return -EINVAL;
 124
 125	uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
 126	if (is_user && capable(CAP_NET_RAW) &&
 127	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
 128		cap |= MLX5_UCTX_CAP_RAW_TX;
 129	if (is_user && capable(CAP_SYS_RAWIO) &&
 130	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
 131	     MLX5_UCTX_CAP_INTERNAL_DEV_RES))
 132		cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
 133
 134	MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
 135	MLX5_SET(uctx, uctx, cap, cap);
 136
 137	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
 138	if (err)
 139		return err;
 140
 141	uid = MLX5_GET(create_uctx_out, out, uid);
 142	return uid;
 143}
 144
 145void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
 146{
 147	u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
 148	u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
 149
 150	MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
 151	MLX5_SET(destroy_uctx_in, in, uid, uid);
 152
 153	mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
 154}
 155
 156static bool is_legacy_unaffiliated_event_num(u16 event_num)
 157{
 158	switch (event_num) {
 159	case MLX5_EVENT_TYPE_PORT_CHANGE:
 160		return true;
 161	default:
 162		return false;
 163	}
 164}
 165
 166static bool is_legacy_obj_event_num(u16 event_num)
 167{
 168	switch (event_num) {
 169	case MLX5_EVENT_TYPE_PATH_MIG:
 170	case MLX5_EVENT_TYPE_COMM_EST:
 171	case MLX5_EVENT_TYPE_SQ_DRAINED:
 172	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 173	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 174	case MLX5_EVENT_TYPE_CQ_ERROR:
 175	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 176	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 177	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 178	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 179	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 180	case MLX5_EVENT_TYPE_DCT_DRAINED:
 181	case MLX5_EVENT_TYPE_COMP:
 182	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
 183	case MLX5_EVENT_TYPE_XRQ_ERROR:
 184		return true;
 185	default:
 186		return false;
 187	}
 188}
 189
 190static u16 get_legacy_obj_type(u16 opcode)
 191{
 192	switch (opcode) {
 193	case MLX5_CMD_OP_CREATE_RQ:
 194		return MLX5_EVENT_QUEUE_TYPE_RQ;
 195	case MLX5_CMD_OP_CREATE_QP:
 196		return MLX5_EVENT_QUEUE_TYPE_QP;
 197	case MLX5_CMD_OP_CREATE_SQ:
 198		return MLX5_EVENT_QUEUE_TYPE_SQ;
 199	case MLX5_CMD_OP_CREATE_DCT:
 200		return MLX5_EVENT_QUEUE_TYPE_DCT;
 201	default:
 202		return 0;
 203	}
 204}
 205
 206static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
 207{
 208	u16 opcode;
 209
 210	opcode = (obj->obj_id >> 32) & 0xffff;
 211
 212	if (is_legacy_obj_event_num(event_num))
 213		return get_legacy_obj_type(opcode);
 214
 215	switch (opcode) {
 216	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 217		return (obj->obj_id >> 48);
 218	case MLX5_CMD_OP_CREATE_RQ:
 219		return MLX5_OBJ_TYPE_RQ;
 220	case MLX5_CMD_OP_CREATE_QP:
 221		return MLX5_OBJ_TYPE_QP;
 222	case MLX5_CMD_OP_CREATE_SQ:
 223		return MLX5_OBJ_TYPE_SQ;
 224	case MLX5_CMD_OP_CREATE_DCT:
 225		return MLX5_OBJ_TYPE_DCT;
 226	case MLX5_CMD_OP_CREATE_TIR:
 227		return MLX5_OBJ_TYPE_TIR;
 228	case MLX5_CMD_OP_CREATE_TIS:
 229		return MLX5_OBJ_TYPE_TIS;
 230	case MLX5_CMD_OP_CREATE_PSV:
 231		return MLX5_OBJ_TYPE_PSV;
 232	case MLX5_OBJ_TYPE_MKEY:
 233		return MLX5_OBJ_TYPE_MKEY;
 234	case MLX5_CMD_OP_CREATE_RMP:
 235		return MLX5_OBJ_TYPE_RMP;
 236	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 237		return MLX5_OBJ_TYPE_XRC_SRQ;
 238	case MLX5_CMD_OP_CREATE_XRQ:
 239		return MLX5_OBJ_TYPE_XRQ;
 240	case MLX5_CMD_OP_CREATE_RQT:
 241		return MLX5_OBJ_TYPE_RQT;
 242	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 243		return MLX5_OBJ_TYPE_FLOW_COUNTER;
 244	case MLX5_CMD_OP_CREATE_CQ:
 245		return MLX5_OBJ_TYPE_CQ;
 246	default:
 247		return 0;
 248	}
 249}
 250
 251static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
 252{
 253	switch (event_type) {
 254	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 255	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 256	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 257	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 258	case MLX5_EVENT_TYPE_PATH_MIG:
 259	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 260	case MLX5_EVENT_TYPE_COMM_EST:
 261	case MLX5_EVENT_TYPE_SQ_DRAINED:
 262	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 263	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 264		return eqe->data.qp_srq.type;
 265	case MLX5_EVENT_TYPE_CQ_ERROR:
 266	case MLX5_EVENT_TYPE_XRQ_ERROR:
 267		return 0;
 268	case MLX5_EVENT_TYPE_DCT_DRAINED:
 269	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
 270		return MLX5_EVENT_QUEUE_TYPE_DCT;
 271	default:
 272		return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
 273	}
 274}
 275
 276static u32 get_dec_obj_id(u64 obj_id)
 277{
 278	return (obj_id & 0xffffffff);
 279}
 280
 281/*
 282 * As the obj_id in the firmware is not globally unique the object type
 283 * must be considered upon checking for a valid object id.
 284 * For that the opcode of the creator command is encoded as part of the obj_id.
 285 */
 286static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
 287{
 288	return ((u64)opcode << 32) | obj_id;
 289}
 290
 291static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
 292{
 293	switch (opcode) {
 294	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 295		return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
 296	case MLX5_CMD_OP_CREATE_UMEM:
 297		return MLX5_GET(create_umem_out, out, umem_id);
 298	case MLX5_CMD_OP_CREATE_MKEY:
 299		return MLX5_GET(create_mkey_out, out, mkey_index);
 300	case MLX5_CMD_OP_CREATE_CQ:
 301		return MLX5_GET(create_cq_out, out, cqn);
 302	case MLX5_CMD_OP_ALLOC_PD:
 303		return MLX5_GET(alloc_pd_out, out, pd);
 304	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 305		return MLX5_GET(alloc_transport_domain_out, out,
 306				transport_domain);
 307	case MLX5_CMD_OP_CREATE_RMP:
 308		return MLX5_GET(create_rmp_out, out, rmpn);
 309	case MLX5_CMD_OP_CREATE_SQ:
 310		return MLX5_GET(create_sq_out, out, sqn);
 311	case MLX5_CMD_OP_CREATE_RQ:
 312		return MLX5_GET(create_rq_out, out, rqn);
 313	case MLX5_CMD_OP_CREATE_RQT:
 314		return MLX5_GET(create_rqt_out, out, rqtn);
 315	case MLX5_CMD_OP_CREATE_TIR:
 316		return MLX5_GET(create_tir_out, out, tirn);
 317	case MLX5_CMD_OP_CREATE_TIS:
 318		return MLX5_GET(create_tis_out, out, tisn);
 319	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 320		return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
 321	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 322		return MLX5_GET(create_flow_table_out, out, table_id);
 323	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 324		return MLX5_GET(create_flow_group_out, out, group_id);
 325	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 326		return MLX5_GET(set_fte_in, in, flow_index);
 327	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 328		return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
 329	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
 330		return MLX5_GET(alloc_packet_reformat_context_out, out,
 331				packet_reformat_id);
 332	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
 333		return MLX5_GET(alloc_modify_header_context_out, out,
 334				modify_header_id);
 335	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 336		return MLX5_GET(create_scheduling_element_out, out,
 337				scheduling_element_id);
 338	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 339		return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
 340	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 341		return MLX5_GET(set_l2_table_entry_in, in, table_index);
 342	case MLX5_CMD_OP_CREATE_QP:
 343		return MLX5_GET(create_qp_out, out, qpn);
 344	case MLX5_CMD_OP_CREATE_SRQ:
 345		return MLX5_GET(create_srq_out, out, srqn);
 346	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 347		return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
 348	case MLX5_CMD_OP_CREATE_DCT:
 349		return MLX5_GET(create_dct_out, out, dctn);
 350	case MLX5_CMD_OP_CREATE_XRQ:
 351		return MLX5_GET(create_xrq_out, out, xrqn);
 352	case MLX5_CMD_OP_ATTACH_TO_MCG:
 353		return MLX5_GET(attach_to_mcg_in, in, qpn);
 354	case MLX5_CMD_OP_ALLOC_XRCD:
 355		return MLX5_GET(alloc_xrcd_out, out, xrcd);
 356	case MLX5_CMD_OP_CREATE_PSV:
 357		return MLX5_GET(create_psv_out, out, psv0_index);
 358	default:
 359		/* The entry must match to one of the devx_is_obj_create_cmd */
 360		WARN_ON(true);
 361		return 0;
 362	}
 363}
 364
 365static u64 devx_get_obj_id(const void *in)
 366{
 367	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 368	u64 obj_id;
 369
 370	switch (opcode) {
 371	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
 372	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
 373		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
 374					MLX5_GET(general_obj_in_cmd_hdr, in,
 375						 obj_type) << 16,
 376					MLX5_GET(general_obj_in_cmd_hdr, in,
 377						 obj_id));
 378		break;
 379	case MLX5_CMD_OP_QUERY_MKEY:
 380		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
 381					MLX5_GET(query_mkey_in, in,
 382						 mkey_index));
 383		break;
 384	case MLX5_CMD_OP_QUERY_CQ:
 385		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 386					MLX5_GET(query_cq_in, in, cqn));
 387		break;
 388	case MLX5_CMD_OP_MODIFY_CQ:
 389		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 390					MLX5_GET(modify_cq_in, in, cqn));
 391		break;
 392	case MLX5_CMD_OP_QUERY_SQ:
 393		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 394					MLX5_GET(query_sq_in, in, sqn));
 395		break;
 396	case MLX5_CMD_OP_MODIFY_SQ:
 397		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 398					MLX5_GET(modify_sq_in, in, sqn));
 399		break;
 400	case MLX5_CMD_OP_QUERY_RQ:
 401		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 402					MLX5_GET(query_rq_in, in, rqn));
 403		break;
 404	case MLX5_CMD_OP_MODIFY_RQ:
 405		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 406					MLX5_GET(modify_rq_in, in, rqn));
 407		break;
 408	case MLX5_CMD_OP_QUERY_RMP:
 409		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
 410					MLX5_GET(query_rmp_in, in, rmpn));
 411		break;
 412	case MLX5_CMD_OP_MODIFY_RMP:
 413		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
 414					MLX5_GET(modify_rmp_in, in, rmpn));
 415		break;
 416	case MLX5_CMD_OP_QUERY_RQT:
 417		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 418					MLX5_GET(query_rqt_in, in, rqtn));
 419		break;
 420	case MLX5_CMD_OP_MODIFY_RQT:
 421		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 422					MLX5_GET(modify_rqt_in, in, rqtn));
 423		break;
 424	case MLX5_CMD_OP_QUERY_TIR:
 425		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 426					MLX5_GET(query_tir_in, in, tirn));
 427		break;
 428	case MLX5_CMD_OP_MODIFY_TIR:
 429		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 430					MLX5_GET(modify_tir_in, in, tirn));
 431		break;
 432	case MLX5_CMD_OP_QUERY_TIS:
 433		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 434					MLX5_GET(query_tis_in, in, tisn));
 435		break;
 436	case MLX5_CMD_OP_MODIFY_TIS:
 437		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 438					MLX5_GET(modify_tis_in, in, tisn));
 439		break;
 440	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 441		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
 442					MLX5_GET(query_flow_table_in, in,
 443						 table_id));
 444		break;
 445	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 446		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
 447					MLX5_GET(modify_flow_table_in, in,
 448						 table_id));
 449		break;
 450	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 451		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
 452					MLX5_GET(query_flow_group_in, in,
 453						 group_id));
 454		break;
 455	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 456		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
 457					MLX5_GET(query_fte_in, in,
 458						 flow_index));
 459		break;
 460	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 461		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
 462					MLX5_GET(set_fte_in, in, flow_index));
 463		break;
 464	case MLX5_CMD_OP_QUERY_Q_COUNTER:
 465		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
 466					MLX5_GET(query_q_counter_in, in,
 467						 counter_set_id));
 468		break;
 469	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 470		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
 471					MLX5_GET(query_flow_counter_in, in,
 472						 flow_counter_id));
 473		break;
 474	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
 475		obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
 476					MLX5_GET(query_modify_header_context_in,
 477						 in, modify_header_id));
 478		break;
 479	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
 480		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
 481					MLX5_GET(query_scheduling_element_in,
 482						 in, scheduling_element_id));
 483		break;
 484	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
 485		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
 486					MLX5_GET(modify_scheduling_element_in,
 487						 in, scheduling_element_id));
 488		break;
 489	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 490		obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
 491					MLX5_GET(add_vxlan_udp_dport_in, in,
 492						 vxlan_udp_port));
 493		break;
 494	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 495		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
 496					MLX5_GET(query_l2_table_entry_in, in,
 497						 table_index));
 498		break;
 499	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 500		obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
 501					MLX5_GET(set_l2_table_entry_in, in,
 502						 table_index));
 503		break;
 504	case MLX5_CMD_OP_QUERY_QP:
 505		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 506					MLX5_GET(query_qp_in, in, qpn));
 507		break;
 508	case MLX5_CMD_OP_RST2INIT_QP:
 509		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 510					MLX5_GET(rst2init_qp_in, in, qpn));
 511		break;
 512	case MLX5_CMD_OP_INIT2INIT_QP:
 513		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 514					MLX5_GET(init2init_qp_in, in, qpn));
 515		break;
 516	case MLX5_CMD_OP_INIT2RTR_QP:
 517		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 518					MLX5_GET(init2rtr_qp_in, in, qpn));
 519		break;
 520	case MLX5_CMD_OP_RTR2RTS_QP:
 521		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 522					MLX5_GET(rtr2rts_qp_in, in, qpn));
 523		break;
 524	case MLX5_CMD_OP_RTS2RTS_QP:
 525		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 526					MLX5_GET(rts2rts_qp_in, in, qpn));
 527		break;
 528	case MLX5_CMD_OP_SQERR2RTS_QP:
 529		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 530					MLX5_GET(sqerr2rts_qp_in, in, qpn));
 531		break;
 532	case MLX5_CMD_OP_2ERR_QP:
 533		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 534					MLX5_GET(qp_2err_in, in, qpn));
 535		break;
 536	case MLX5_CMD_OP_2RST_QP:
 537		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 538					MLX5_GET(qp_2rst_in, in, qpn));
 539		break;
 540	case MLX5_CMD_OP_QUERY_DCT:
 541		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 542					MLX5_GET(query_dct_in, in, dctn));
 543		break;
 544	case MLX5_CMD_OP_QUERY_XRQ:
 545	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
 546	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
 547		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
 548					MLX5_GET(query_xrq_in, in, xrqn));
 549		break;
 550	case MLX5_CMD_OP_QUERY_XRC_SRQ:
 551		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
 552					MLX5_GET(query_xrc_srq_in, in,
 553						 xrc_srqn));
 554		break;
 555	case MLX5_CMD_OP_ARM_XRC_SRQ:
 556		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
 557					MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
 558		break;
 559	case MLX5_CMD_OP_QUERY_SRQ:
 560		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
 561					MLX5_GET(query_srq_in, in, srqn));
 562		break;
 563	case MLX5_CMD_OP_ARM_RQ:
 564		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 565					MLX5_GET(arm_rq_in, in, srq_number));
 566		break;
 567	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 568		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 569					MLX5_GET(drain_dct_in, in, dctn));
 570		break;
 571	case MLX5_CMD_OP_ARM_XRQ:
 572	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
 573	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
 574	case MLX5_CMD_OP_MODIFY_XRQ:
 575		obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
 576					MLX5_GET(arm_xrq_in, in, xrqn));
 577		break;
 578	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
 579		obj_id = get_enc_obj_id
 580				(MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
 581				 MLX5_GET(query_packet_reformat_context_in,
 582					  in, packet_reformat_id));
 583		break;
 584	default:
 585		obj_id = 0;
 586	}
 587
 588	return obj_id;
 589}
 590
 591static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
 592				 struct ib_uobject *uobj, const void *in)
 593{
 594	struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
 595	u64 obj_id = devx_get_obj_id(in);
 596
 597	if (!obj_id)
 598		return false;
 599
 600	switch (uobj_get_object_id(uobj)) {
 601	case UVERBS_OBJECT_CQ:
 602		return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
 603				      to_mcq(uobj->object)->mcq.cqn) ==
 604				      obj_id;
 605
 606	case UVERBS_OBJECT_SRQ:
 607	{
 608		struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
 609		u16 opcode;
 610
 611		switch (srq->common.res) {
 612		case MLX5_RES_XSRQ:
 613			opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
 614			break;
 615		case MLX5_RES_XRQ:
 616			opcode = MLX5_CMD_OP_CREATE_XRQ;
 617			break;
 618		default:
 619			if (!dev->mdev->issi)
 620				opcode = MLX5_CMD_OP_CREATE_SRQ;
 621			else
 622				opcode = MLX5_CMD_OP_CREATE_RMP;
 623		}
 624
 625		return get_enc_obj_id(opcode,
 626				      to_msrq(uobj->object)->msrq.srqn) ==
 627				      obj_id;
 628	}
 629
 630	case UVERBS_OBJECT_QP:
 631	{
 632		struct mlx5_ib_qp *qp = to_mqp(uobj->object);
 633
 634		if (qp->type == IB_QPT_RAW_PACKET ||
 635		    (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
 636			struct mlx5_ib_raw_packet_qp *raw_packet_qp =
 637							 &qp->raw_packet_qp;
 638			struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
 639			struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
 640
 641			return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 642					       rq->base.mqp.qpn) == obj_id ||
 643				get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
 644					       sq->base.mqp.qpn) == obj_id ||
 645				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
 646					       rq->tirn) == obj_id ||
 647				get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
 648					       sq->tisn) == obj_id);
 649		}
 650
 651		if (qp->type == MLX5_IB_QPT_DCT)
 652			return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
 653					      qp->dct.mdct.mqp.qpn) == obj_id;
 654		return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
 655				      qp->ibqp.qp_num) == obj_id;
 656	}
 657
 658	case UVERBS_OBJECT_WQ:
 659		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
 660				      to_mrwq(uobj->object)->core_qp.qpn) ==
 661				      obj_id;
 662
 663	case UVERBS_OBJECT_RWQ_IND_TBL:
 664		return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
 665				      to_mrwq_ind_table(uobj->object)->rqtn) ==
 666				      obj_id;
 667
 668	case MLX5_IB_OBJECT_DEVX_OBJ:
 669		return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 670
 671	default:
 672		return false;
 673	}
 674}
 675
 676static void devx_set_umem_valid(const void *in)
 677{
 678	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 679
 680	switch (opcode) {
 681	case MLX5_CMD_OP_CREATE_MKEY:
 682		MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
 683		break;
 684	case MLX5_CMD_OP_CREATE_CQ:
 685	{
 686		void *cqc;
 687
 688		MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
 689		cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 690		MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
 691		break;
 692	}
 693	case MLX5_CMD_OP_CREATE_QP:
 694	{
 695		void *qpc;
 696
 697		qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 698		MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
 699		MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
 700		break;
 701	}
 702
 703	case MLX5_CMD_OP_CREATE_RQ:
 704	{
 705		void *rqc, *wq;
 706
 707		rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
 708		wq  = MLX5_ADDR_OF(rqc, rqc, wq);
 709		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 710		MLX5_SET(wq, wq, wq_umem_valid, 1);
 711		break;
 712	}
 713
 714	case MLX5_CMD_OP_CREATE_SQ:
 715	{
 716		void *sqc, *wq;
 717
 718		sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
 719		wq = MLX5_ADDR_OF(sqc, sqc, wq);
 720		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 721		MLX5_SET(wq, wq, wq_umem_valid, 1);
 722		break;
 723	}
 724
 725	case MLX5_CMD_OP_MODIFY_CQ:
 726		MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
 727		break;
 728
 729	case MLX5_CMD_OP_CREATE_RMP:
 730	{
 731		void *rmpc, *wq;
 732
 733		rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
 734		wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
 735		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 736		MLX5_SET(wq, wq, wq_umem_valid, 1);
 737		break;
 738	}
 739
 740	case MLX5_CMD_OP_CREATE_XRQ:
 741	{
 742		void *xrqc, *wq;
 743
 744		xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
 745		wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
 746		MLX5_SET(wq, wq, dbr_umem_valid, 1);
 747		MLX5_SET(wq, wq, wq_umem_valid, 1);
 748		break;
 749	}
 750
 751	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 752	{
 753		void *xrc_srqc;
 754
 755		MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
 756		xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
 757					xrc_srq_context_entry);
 758		MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
 759		break;
 760	}
 761
 762	default:
 763		return;
 764	}
 765}
 766
 767static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
 768{
 769	*opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 770
 771	switch (*opcode) {
 772	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 773	case MLX5_CMD_OP_CREATE_MKEY:
 774	case MLX5_CMD_OP_CREATE_CQ:
 775	case MLX5_CMD_OP_ALLOC_PD:
 776	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 777	case MLX5_CMD_OP_CREATE_RMP:
 778	case MLX5_CMD_OP_CREATE_SQ:
 779	case MLX5_CMD_OP_CREATE_RQ:
 780	case MLX5_CMD_OP_CREATE_RQT:
 781	case MLX5_CMD_OP_CREATE_TIR:
 782	case MLX5_CMD_OP_CREATE_TIS:
 783	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 784	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 785	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 786	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 787	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
 788	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
 789	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 790	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 791	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 792	case MLX5_CMD_OP_CREATE_QP:
 793	case MLX5_CMD_OP_CREATE_SRQ:
 794	case MLX5_CMD_OP_CREATE_XRC_SRQ:
 795	case MLX5_CMD_OP_CREATE_DCT:
 796	case MLX5_CMD_OP_CREATE_XRQ:
 797	case MLX5_CMD_OP_ATTACH_TO_MCG:
 798	case MLX5_CMD_OP_ALLOC_XRCD:
 799		return true;
 800	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 801	{
 802		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
 803		if (op_mod == 0)
 804			return true;
 805		return false;
 806	}
 807	case MLX5_CMD_OP_CREATE_PSV:
 808	{
 809		u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
 810
 811		if (num_psv == 1)
 812			return true;
 813		return false;
 814	}
 815	default:
 816		return false;
 817	}
 818}
 819
 820static bool devx_is_obj_modify_cmd(const void *in)
 821{
 822	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 823
 824	switch (opcode) {
 825	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
 826	case MLX5_CMD_OP_MODIFY_CQ:
 827	case MLX5_CMD_OP_MODIFY_RMP:
 828	case MLX5_CMD_OP_MODIFY_SQ:
 829	case MLX5_CMD_OP_MODIFY_RQ:
 830	case MLX5_CMD_OP_MODIFY_RQT:
 831	case MLX5_CMD_OP_MODIFY_TIR:
 832	case MLX5_CMD_OP_MODIFY_TIS:
 833	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 834	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
 835	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 836	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 837	case MLX5_CMD_OP_RST2INIT_QP:
 838	case MLX5_CMD_OP_INIT2RTR_QP:
 839	case MLX5_CMD_OP_INIT2INIT_QP:
 840	case MLX5_CMD_OP_RTR2RTS_QP:
 841	case MLX5_CMD_OP_RTS2RTS_QP:
 842	case MLX5_CMD_OP_SQERR2RTS_QP:
 843	case MLX5_CMD_OP_2ERR_QP:
 844	case MLX5_CMD_OP_2RST_QP:
 845	case MLX5_CMD_OP_ARM_XRC_SRQ:
 846	case MLX5_CMD_OP_ARM_RQ:
 847	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 848	case MLX5_CMD_OP_ARM_XRQ:
 849	case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
 850	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
 851	case MLX5_CMD_OP_MODIFY_XRQ:
 852		return true;
 853	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 854	{
 855		u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
 856
 857		if (op_mod == 1)
 858			return true;
 859		return false;
 860	}
 861	default:
 862		return false;
 863	}
 864}
 865
 866static bool devx_is_obj_query_cmd(const void *in)
 867{
 868	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 869
 870	switch (opcode) {
 871	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
 872	case MLX5_CMD_OP_QUERY_MKEY:
 873	case MLX5_CMD_OP_QUERY_CQ:
 874	case MLX5_CMD_OP_QUERY_RMP:
 875	case MLX5_CMD_OP_QUERY_SQ:
 876	case MLX5_CMD_OP_QUERY_RQ:
 877	case MLX5_CMD_OP_QUERY_RQT:
 878	case MLX5_CMD_OP_QUERY_TIR:
 879	case MLX5_CMD_OP_QUERY_TIS:
 880	case MLX5_CMD_OP_QUERY_Q_COUNTER:
 881	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 882	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 883	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 884	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 885	case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
 886	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
 887	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 888	case MLX5_CMD_OP_QUERY_QP:
 889	case MLX5_CMD_OP_QUERY_SRQ:
 890	case MLX5_CMD_OP_QUERY_XRC_SRQ:
 891	case MLX5_CMD_OP_QUERY_DCT:
 892	case MLX5_CMD_OP_QUERY_XRQ:
 893	case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
 894	case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
 895	case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
 896		return true;
 897	default:
 898		return false;
 899	}
 900}
 901
 902static bool devx_is_whitelist_cmd(void *in)
 903{
 904	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 905
 906	switch (opcode) {
 907	case MLX5_CMD_OP_QUERY_HCA_CAP:
 908	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 909	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 910	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
 911		return true;
 912	default:
 913		return false;
 914	}
 915}
 916
 917static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
 918{
 919	if (devx_is_whitelist_cmd(cmd_in)) {
 920		struct mlx5_ib_dev *dev;
 921
 922		if (c->devx_uid)
 923			return c->devx_uid;
 924
 925		dev = to_mdev(c->ibucontext.device);
 926		if (dev->devx_whitelist_uid)
 927			return dev->devx_whitelist_uid;
 928
 929		return -EOPNOTSUPP;
 930	}
 931
 932	if (!c->devx_uid)
 933		return -EINVAL;
 934
 935	return c->devx_uid;
 936}
 937
 938static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
 939{
 940	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
 941
 942	/* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
 943	if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
 944	     MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
 945	    (opcode >= MLX5_CMD_OP_GENERAL_START &&
 946	     opcode < MLX5_CMD_OP_GENERAL_END))
 947		return true;
 948
 949	switch (opcode) {
 950	case MLX5_CMD_OP_QUERY_HCA_CAP:
 951	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 952	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 953	case MLX5_CMD_OP_QUERY_VPORT_STATE:
 954	case MLX5_CMD_OP_QUERY_ADAPTER:
 955	case MLX5_CMD_OP_QUERY_ISSI:
 956	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
 957	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
 958	case MLX5_CMD_OP_QUERY_VNIC_ENV:
 959	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 960	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
 961	case MLX5_CMD_OP_NOP:
 962	case MLX5_CMD_OP_QUERY_CONG_STATUS:
 963	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
 964	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
 965	case MLX5_CMD_OP_QUERY_LAG:
 966	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
 967		return true;
 968	default:
 969		return false;
 970	}
 971}
 972
 973static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
 974	struct uverbs_attr_bundle *attrs)
 975{
 976	struct mlx5_ib_ucontext *c;
 977	struct mlx5_ib_dev *dev;
 978	int user_vector;
 979	int dev_eqn;
 980	int err;
 981
 982	if (uverbs_copy_from(&user_vector, attrs,
 983			     MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
 984		return -EFAULT;
 985
 986	c = devx_ufile2uctx(attrs);
 987	if (IS_ERR(c))
 988		return PTR_ERR(c);
 989	dev = to_mdev(c->ibucontext.device);
 990
 991	err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
 992	if (err < 0)
 993		return err;
 994
 995	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
 996			   &dev_eqn, sizeof(dev_eqn)))
 997		return -EFAULT;
 998
 999	return 0;
1000}
1001
1002/*
1003 *Security note:
1004 * The hardware protection mechanism works like this: Each device object that
1005 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1006 * the device specification manual) upon its creation. Then upon doorbell,
1007 * hardware fetches the object context for which the doorbell was rang, and
1008 * validates that the UAR through which the DB was rang matches the UAR ID
1009 * of the object.
1010 * If no match the doorbell is silently ignored by the hardware. Of course,
1011 * the user cannot ring a doorbell on a UAR that was not mapped to it.
1012 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1013 * mailboxes (except tagging them with UID), we expose to the user its UAR
1014 * ID, so it can embed it in these objects in the expected specification
1015 * format. So the only thing the user can do is hurt itself by creating a
1016 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1017 * may ring a doorbell on its objects.
1018 * The consequence of that will be that another user can schedule a QP/SQ
1019 * of the buggy user for execution (just insert it to the hardware schedule
1020 * queue or arm its CQ for event generation), no further harm is expected.
1021 */
1022static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1023	struct uverbs_attr_bundle *attrs)
1024{
1025	struct mlx5_ib_ucontext *c;
1026	struct mlx5_ib_dev *dev;
1027	u32 user_idx;
1028	s32 dev_idx;
1029
1030	c = devx_ufile2uctx(attrs);
1031	if (IS_ERR(c))
1032		return PTR_ERR(c);
1033	dev = to_mdev(c->ibucontext.device);
1034
1035	if (uverbs_copy_from(&user_idx, attrs,
1036			     MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1037		return -EFAULT;
1038
1039	dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1040	if (dev_idx < 0)
1041		return dev_idx;
1042
1043	if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1044			   &dev_idx, sizeof(dev_idx)))
1045		return -EFAULT;
1046
1047	return 0;
1048}
1049
1050static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1051	struct uverbs_attr_bundle *attrs)
1052{
1053	struct mlx5_ib_ucontext *c;
1054	struct mlx5_ib_dev *dev;
1055	void *cmd_in = uverbs_attr_get_alloced_ptr(
1056		attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1057	int cmd_out_len = uverbs_attr_get_len(attrs,
1058					MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1059	void *cmd_out;
1060	int err, err2;
1061	int uid;
1062
1063	c = devx_ufile2uctx(attrs);
1064	if (IS_ERR(c))
1065		return PTR_ERR(c);
1066	dev = to_mdev(c->ibucontext.device);
1067
1068	uid = devx_get_uid(c, cmd_in);
1069	if (uid < 0)
1070		return uid;
1071
1072	/* Only white list of some general HCA commands are allowed for this method. */
1073	if (!devx_is_general_cmd(cmd_in, dev))
1074		return -EINVAL;
1075
1076	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1077	if (IS_ERR(cmd_out))
1078		return PTR_ERR(cmd_out);
1079
1080	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1081	err = mlx5_cmd_do(dev->mdev, cmd_in,
1082			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1083			  cmd_out, cmd_out_len);
1084	if (err && err != -EREMOTEIO)
1085		return err;
1086
1087	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1088			      cmd_out_len);
1089
1090	return err2 ?: err;
1091}
1092
1093static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1094				       u32 *dinlen,
1095				       u32 *obj_id)
1096{
1097	u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
1098	u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1099
1100	*obj_id = devx_get_created_obj_id(in, out, opcode);
1101	*dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1102	MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1103
1104	switch (opcode) {
1105	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1106		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1107		MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1108		MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
1109			 MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
1110		break;
1111
1112	case MLX5_CMD_OP_CREATE_UMEM:
1113		MLX5_SET(destroy_umem_in, din, opcode,
1114			 MLX5_CMD_OP_DESTROY_UMEM);
1115		MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
1116		break;
1117	case MLX5_CMD_OP_CREATE_MKEY:
1118		MLX5_SET(destroy_mkey_in, din, opcode,
1119			 MLX5_CMD_OP_DESTROY_MKEY);
1120		MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
1121		break;
1122	case MLX5_CMD_OP_CREATE_CQ:
1123		MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1124		MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
1125		break;
1126	case MLX5_CMD_OP_ALLOC_PD:
1127		MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1128		MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
1129		break;
1130	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1131		MLX5_SET(dealloc_transport_domain_in, din, opcode,
1132			 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1133		MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
1134			 *obj_id);
1135		break;
1136	case MLX5_CMD_OP_CREATE_RMP:
1137		MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1138		MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
1139		break;
1140	case MLX5_CMD_OP_CREATE_SQ:
1141		MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1142		MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
1143		break;
1144	case MLX5_CMD_OP_CREATE_RQ:
1145		MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1146		MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
1147		break;
1148	case MLX5_CMD_OP_CREATE_RQT:
1149		MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1150		MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
1151		break;
1152	case MLX5_CMD_OP_CREATE_TIR:
1153		MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1154		MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1155		break;
1156	case MLX5_CMD_OP_CREATE_TIS:
1157		MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1158		MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
1159		break;
1160	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1161		MLX5_SET(dealloc_q_counter_in, din, opcode,
1162			 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1163		MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
1164		break;
1165	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1166		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1167		MLX5_SET(destroy_flow_table_in, din, other_vport,
1168			 MLX5_GET(create_flow_table_in,  in, other_vport));
1169		MLX5_SET(destroy_flow_table_in, din, vport_number,
1170			 MLX5_GET(create_flow_table_in,  in, vport_number));
1171		MLX5_SET(destroy_flow_table_in, din, table_type,
1172			 MLX5_GET(create_flow_table_in,  in, table_type));
1173		MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1174		MLX5_SET(destroy_flow_table_in, din, opcode,
1175			 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1176		break;
1177	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1178		*dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1179		MLX5_SET(destroy_flow_group_in, din, other_vport,
1180			 MLX5_GET(create_flow_group_in, in, other_vport));
1181		MLX5_SET(destroy_flow_group_in, din, vport_number,
1182			 MLX5_GET(create_flow_group_in, in, vport_number));
1183		MLX5_SET(destroy_flow_group_in, din, table_type,
1184			 MLX5_GET(create_flow_group_in, in, table_type));
1185		MLX5_SET(destroy_flow_group_in, din, table_id,
1186			 MLX5_GET(create_flow_group_in, in, table_id));
1187		MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1188		MLX5_SET(destroy_flow_group_in, din, opcode,
1189			 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1190		break;
1191	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1192		*dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1193		MLX5_SET(delete_fte_in, din, other_vport,
1194			 MLX5_GET(set_fte_in,  in, other_vport));
1195		MLX5_SET(delete_fte_in, din, vport_number,
1196			 MLX5_GET(set_fte_in, in, vport_number));
1197		MLX5_SET(delete_fte_in, din, table_type,
1198			 MLX5_GET(set_fte_in, in, table_type));
1199		MLX5_SET(delete_fte_in, din, table_id,
1200			 MLX5_GET(set_fte_in, in, table_id));
1201		MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1202		MLX5_SET(delete_fte_in, din, opcode,
1203			 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1204		break;
1205	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1206		MLX5_SET(dealloc_flow_counter_in, din, opcode,
1207			 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1208		MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
1209			 *obj_id);
1210		break;
1211	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1212		MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
1213			 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1214		MLX5_SET(dealloc_packet_reformat_context_in, din,
1215			 packet_reformat_id, *obj_id);
1216		break;
1217	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1218		MLX5_SET(dealloc_modify_header_context_in, din, opcode,
1219			 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1220		MLX5_SET(dealloc_modify_header_context_in, din,
1221			 modify_header_id, *obj_id);
1222		break;
1223	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1224		*dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1225		MLX5_SET(destroy_scheduling_element_in, din,
1226			 scheduling_hierarchy,
1227			 MLX5_GET(create_scheduling_element_in, in,
1228				  scheduling_hierarchy));
1229		MLX5_SET(destroy_scheduling_element_in, din,
1230			 scheduling_element_id, *obj_id);
1231		MLX5_SET(destroy_scheduling_element_in, din, opcode,
1232			 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1233		break;
1234	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1235		*dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1236		MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1237		MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
1238			 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1239		break;
1240	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1241		*dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1242		MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1243		MLX5_SET(delete_l2_table_entry_in, din, opcode,
1244			 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1245		break;
1246	case MLX5_CMD_OP_CREATE_QP:
1247		MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1248		MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
1249		break;
1250	case MLX5_CMD_OP_CREATE_SRQ:
1251		MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1252		MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
1253		break;
1254	case MLX5_CMD_OP_CREATE_XRC_SRQ:
1255		MLX5_SET(destroy_xrc_srq_in, din, opcode,
1256			 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1257		MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
1258		break;
1259	case MLX5_CMD_OP_CREATE_DCT:
1260		MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1261		MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
1262		break;
1263	case MLX5_CMD_OP_CREATE_XRQ:
1264		MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1265		MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
1266		break;
1267	case MLX5_CMD_OP_ATTACH_TO_MCG:
1268		*dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1269		MLX5_SET(detach_from_mcg_in, din, qpn,
1270			 MLX5_GET(attach_to_mcg_in, in, qpn));
1271		memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1272		       MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1273		       MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1274		MLX5_SET(detach_from_mcg_in, din, opcode,
1275			 MLX5_CMD_OP_DETACH_FROM_MCG);
1276		MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
1277		break;
1278	case MLX5_CMD_OP_ALLOC_XRCD:
1279		MLX5_SET(dealloc_xrcd_in, din, opcode,
1280			 MLX5_CMD_OP_DEALLOC_XRCD);
1281		MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
1282		break;
1283	case MLX5_CMD_OP_CREATE_PSV:
1284		MLX5_SET(destroy_psv_in, din, opcode,
1285			 MLX5_CMD_OP_DESTROY_PSV);
1286		MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
1287		break;
1288	default:
1289		/* The entry must match to one of the devx_is_obj_create_cmd */
1290		WARN_ON(true);
1291		break;
1292	}
1293}
1294
1295static int devx_handle_mkey_indirect(struct devx_obj *obj,
1296				     struct mlx5_ib_dev *dev,
1297				     void *in, void *out)
1298{
1299	struct mlx5_ib_mkey *mkey = &obj->mkey;
1300	void *mkc;
1301	u8 key;
1302
1303	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1304	key = MLX5_GET(mkc, mkc, mkey_7_0);
1305	mkey->key = mlx5_idx_to_mkey(
1306			MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1307	mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1308	mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1309	init_waitqueue_head(&mkey->wait);
1310
1311	return mlx5r_store_odp_mkey(dev, mkey);
1312}
1313
1314static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1315				   struct devx_obj *obj,
1316				   void *in, int in_len)
1317{
1318	int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1319			MLX5_FLD_SZ_BYTES(create_mkey_in,
1320			memory_key_mkey_entry);
1321	void *mkc;
1322	u8 access_mode;
1323
1324	if (in_len < min_len)
1325		return -EINVAL;
1326
1327	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1328
1329	access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1330	access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1331
1332	if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1333		access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1334		if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1335			obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1336		return 0;
1337	}
1338
1339	MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1340	return 0;
1341}
1342
1343static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1344				      struct devx_event_subscription *sub)
1345{
1346	struct devx_event *event;
1347	struct devx_obj_event *xa_val_level2;
1348
1349	if (sub->is_cleaned)
1350		return;
1351
1352	sub->is_cleaned = 1;
1353	list_del_rcu(&sub->xa_list);
1354
1355	if (list_empty(&sub->obj_list))
1356		return;
1357
1358	list_del_rcu(&sub->obj_list);
1359	/* check whether key level 1 for this obj_sub_list is empty */
1360	event = xa_load(&dev->devx_event_table.event_xa,
1361			sub->xa_key_level1);
1362	WARN_ON(!event);
1363
1364	xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1365	if (list_empty(&xa_val_level2->obj_sub_list)) {
1366		xa_erase(&event->object_ids,
1367			 sub->xa_key_level2);
1368		kfree_rcu(xa_val_level2, rcu);
1369	}
1370}
1371
1372static int devx_obj_cleanup(struct ib_uobject *uobject,
1373			    enum rdma_remove_reason why,
1374			    struct uverbs_attr_bundle *attrs)
1375{
1376	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1377	struct mlx5_devx_event_table *devx_event_table;
1378	struct devx_obj *obj = uobject->object;
1379	struct devx_event_subscription *sub_entry, *tmp;
1380	struct mlx5_ib_dev *dev;
1381	int ret;
1382
1383	dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1384	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
1385	    xa_erase(&obj->ib_dev->odp_mkeys,
1386		     mlx5_base_mkey(obj->mkey.key)))
1387		/*
1388		 * The pagefault_single_data_segment() does commands against
1389		 * the mmkey, we must wait for that to stop before freeing the
1390		 * mkey, as another allocation could get the same mkey #.
1391		 */
1392		mlx5r_deref_wait_odp_mkey(&obj->mkey);
1393
1394	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1395		ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1396	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1397		ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1398	else
1399		ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1400				    obj->dinlen, out, sizeof(out));
1401	if (ret)
1402		return ret;
1403
1404	devx_event_table = &dev->devx_event_table;
1405
1406	mutex_lock(&devx_event_table->event_xa_lock);
1407	list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1408		devx_cleanup_subscription(dev, sub_entry);
1409	mutex_unlock(&devx_event_table->event_xa_lock);
1410
1411	kfree(obj);
1412	return ret;
1413}
1414
1415static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1416{
1417	struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1418	struct mlx5_devx_event_table *table;
1419	struct devx_event *event;
1420	struct devx_obj_event *obj_event;
1421	u32 obj_id = mcq->cqn;
1422
1423	table = &obj->ib_dev->devx_event_table;
1424	rcu_read_lock();
1425	event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1426	if (!event)
1427		goto out;
1428
1429	obj_event = xa_load(&event->object_ids, obj_id);
1430	if (!obj_event)
1431		goto out;
1432
1433	dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1434out:
1435	rcu_read_unlock();
1436}
1437
1438static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
1439{
1440	if (!MLX5_CAP_GEN(dev->mdev, apu) ||
1441	    !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
1442		return false;
1443
1444	return true;
1445}
1446
1447static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1448	struct uverbs_attr_bundle *attrs)
1449{
1450	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1451	int cmd_out_len =  uverbs_attr_get_len(attrs,
1452					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1453	int cmd_in_len = uverbs_attr_get_len(attrs,
1454					MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1455	void *cmd_out;
1456	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1457		attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1458	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1459		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1460	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1461	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1462	struct devx_obj *obj;
1463	u16 obj_type = 0;
1464	int err, err2 = 0;
1465	int uid;
1466	u32 obj_id;
1467	u16 opcode;
1468
1469	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1470		return -EINVAL;
1471
1472	uid = devx_get_uid(c, cmd_in);
1473	if (uid < 0)
1474		return uid;
1475
1476	if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1477		return -EINVAL;
1478
1479	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1480	if (IS_ERR(cmd_out))
1481		return PTR_ERR(cmd_out);
1482
1483	obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1484	if (!obj)
1485		return -ENOMEM;
1486
1487	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1488	if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1489		err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1490		if (err)
1491			goto obj_free;
1492	} else {
1493		devx_set_umem_valid(cmd_in);
1494	}
1495
1496	if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1497		obj->flags |= DEVX_OBJ_FLAGS_DCT;
1498		err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
1499					   cmd_in_len, cmd_out, cmd_out_len);
1500	} else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
1501		   !is_apu_cq(dev, cmd_in)) {
1502		obj->flags |= DEVX_OBJ_FLAGS_CQ;
1503		obj->core_cq.comp = devx_cq_comp;
1504		err = mlx5_create_cq(dev->mdev, &obj->core_cq,
1505				     cmd_in, cmd_in_len, cmd_out,
1506				     cmd_out_len);
1507	} else {
1508		err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
1509				  cmd_out, cmd_out_len);
1510	}
1511
1512	if (err == -EREMOTEIO)
1513		err2 = uverbs_copy_to(attrs,
1514				      MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1515				      cmd_out, cmd_out_len);
1516	if (err)
1517		goto obj_free;
1518
1519	if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1520		u8 bulk = MLX5_GET(alloc_flow_counter_in,
1521				   cmd_in,
1522				   flow_counter_bulk);
1523		obj->flow_counter_bulk_size = 128UL * bulk;
 
 
 
 
 
 
 
1524	}
1525
1526	uobj->object = obj;
1527	INIT_LIST_HEAD(&obj->event_sub);
1528	obj->ib_dev = dev;
1529	devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1530				   &obj_id);
1531	WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1532
1533	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1534	if (err)
1535		goto obj_destroy;
1536
1537	if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1538		obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1539	obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1540
1541	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1542		err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1543		if (err)
1544			goto obj_destroy;
1545	}
1546	return 0;
1547
1548obj_destroy:
1549	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1550		mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1551	else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1552		mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1553	else
1554		mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1555			      sizeof(out));
1556obj_free:
1557	kfree(obj);
1558	return err2 ?: err;
1559}
1560
1561static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1562	struct uverbs_attr_bundle *attrs)
1563{
1564	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1565	int cmd_out_len = uverbs_attr_get_len(attrs,
1566					MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1567	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1568							  MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1569	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1570		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1571	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1572	void *cmd_out;
1573	int err, err2;
1574	int uid;
1575
1576	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1577		return -EINVAL;
1578
1579	uid = devx_get_uid(c, cmd_in);
1580	if (uid < 0)
1581		return uid;
1582
1583	if (!devx_is_obj_modify_cmd(cmd_in))
1584		return -EINVAL;
1585
1586	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1587		return -EINVAL;
1588
1589	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1590	if (IS_ERR(cmd_out))
1591		return PTR_ERR(cmd_out);
1592
1593	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1594	devx_set_umem_valid(cmd_in);
1595
1596	err = mlx5_cmd_do(mdev->mdev, cmd_in,
1597			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1598			  cmd_out, cmd_out_len);
1599	if (err && err != -EREMOTEIO)
1600		return err;
1601
1602	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1603			      cmd_out, cmd_out_len);
1604
1605	return err2 ?: err;
1606}
1607
1608static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1609	struct uverbs_attr_bundle *attrs)
1610{
1611	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1612	int cmd_out_len = uverbs_attr_get_len(attrs,
1613					      MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1614	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1615							  MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1616	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1617		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1618	void *cmd_out;
1619	int err, err2;
1620	int uid;
1621	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1622
1623	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1624		return -EINVAL;
1625
1626	uid = devx_get_uid(c, cmd_in);
1627	if (uid < 0)
1628		return uid;
1629
1630	if (!devx_is_obj_query_cmd(cmd_in))
1631		return -EINVAL;
1632
1633	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1634		return -EINVAL;
1635
1636	cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1637	if (IS_ERR(cmd_out))
1638		return PTR_ERR(cmd_out);
1639
1640	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1641	err = mlx5_cmd_do(mdev->mdev, cmd_in,
1642			  uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1643			  cmd_out, cmd_out_len);
1644	if (err && err != -EREMOTEIO)
1645		return err;
1646
1647	err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1648			      cmd_out, cmd_out_len);
1649
1650	return err2 ?: err;
1651}
1652
1653struct devx_async_event_queue {
1654	spinlock_t		lock;
1655	wait_queue_head_t	poll_wait;
1656	struct list_head	event_list;
1657	atomic_t		bytes_in_use;
1658	u8			is_destroyed:1;
1659};
1660
1661struct devx_async_cmd_event_file {
1662	struct ib_uobject		uobj;
1663	struct devx_async_event_queue	ev_queue;
1664	struct mlx5_async_ctx		async_ctx;
1665};
1666
1667static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1668{
1669	spin_lock_init(&ev_queue->lock);
1670	INIT_LIST_HEAD(&ev_queue->event_list);
1671	init_waitqueue_head(&ev_queue->poll_wait);
1672	atomic_set(&ev_queue->bytes_in_use, 0);
1673	ev_queue->is_destroyed = 0;
1674}
1675
1676static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1677	struct uverbs_attr_bundle *attrs)
1678{
1679	struct devx_async_cmd_event_file *ev_file;
1680
1681	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1682		attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1683	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1684
1685	ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1686			       uobj);
1687	devx_init_event_queue(&ev_file->ev_queue);
1688	mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1689	return 0;
1690}
1691
1692static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1693	struct uverbs_attr_bundle *attrs)
1694{
1695	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1696		attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1697	struct devx_async_event_file *ev_file;
1698	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1699		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1700	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1701	u32 flags;
1702	int err;
1703
1704	err = uverbs_get_flags32(&flags, attrs,
1705		MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1706		MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1707
1708	if (err)
1709		return err;
1710
1711	ev_file = container_of(uobj, struct devx_async_event_file,
1712			       uobj);
1713	spin_lock_init(&ev_file->lock);
1714	INIT_LIST_HEAD(&ev_file->event_list);
1715	init_waitqueue_head(&ev_file->poll_wait);
1716	if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1717		ev_file->omit_data = 1;
1718	INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1719	ev_file->dev = dev;
1720	get_device(&dev->ib_dev.dev);
1721	return 0;
1722}
1723
1724static void devx_query_callback(int status, struct mlx5_async_work *context)
1725{
1726	struct devx_async_data *async_data =
1727		container_of(context, struct devx_async_data, cb_work);
1728	struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1729	struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1730	unsigned long flags;
1731
1732	/*
1733	 * Note that if the struct devx_async_cmd_event_file uobj begins to be
1734	 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1735	 * routine returns, ensuring that it always remains valid here.
1736	 */
1737	spin_lock_irqsave(&ev_queue->lock, flags);
1738	list_add_tail(&async_data->list, &ev_queue->event_list);
1739	spin_unlock_irqrestore(&ev_queue->lock, flags);
1740
1741	wake_up_interruptible(&ev_queue->poll_wait);
1742}
1743
1744#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1745
1746static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1747	struct uverbs_attr_bundle *attrs)
1748{
1749	void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1750				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1751	struct ib_uobject *uobj = uverbs_attr_get_uobject(
1752				attrs,
1753				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1754	u16 cmd_out_len;
1755	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1756		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1757	struct ib_uobject *fd_uobj;
1758	int err;
1759	int uid;
1760	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1761	struct devx_async_cmd_event_file *ev_file;
1762	struct devx_async_data *async_data;
1763
1764	if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1765		return -EINVAL;
1766
1767	uid = devx_get_uid(c, cmd_in);
1768	if (uid < 0)
1769		return uid;
1770
1771	if (!devx_is_obj_query_cmd(cmd_in))
1772		return -EINVAL;
1773
1774	err = uverbs_get_const(&cmd_out_len, attrs,
1775			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1776	if (err)
1777		return err;
1778
1779	if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1780		return -EINVAL;
1781
1782	fd_uobj = uverbs_attr_get_uobject(attrs,
1783				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1784	if (IS_ERR(fd_uobj))
1785		return PTR_ERR(fd_uobj);
1786
1787	ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1788			       uobj);
1789
1790	if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1791			MAX_ASYNC_BYTES_IN_USE) {
1792		atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1793		return -EAGAIN;
1794	}
1795
1796	async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1797					  cmd_out_len), GFP_KERNEL);
1798	if (!async_data) {
1799		err = -ENOMEM;
1800		goto sub_bytes;
1801	}
1802
1803	err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1804			       MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1805	if (err)
1806		goto free_async;
1807
1808	async_data->cmd_out_len = cmd_out_len;
1809	async_data->mdev = mdev;
1810	async_data->ev_file = ev_file;
1811
1812	MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1813	err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1814		    uverbs_attr_get_len(attrs,
1815				MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1816		    async_data->hdr.out_data,
1817		    async_data->cmd_out_len,
1818		    devx_query_callback, &async_data->cb_work);
1819
1820	if (err)
1821		goto free_async;
1822
1823	return 0;
1824
1825free_async:
1826	kvfree(async_data);
1827sub_bytes:
1828	atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1829	return err;
1830}
1831
1832static void
1833subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1834			   u32 key_level1,
1835			   bool is_level2,
1836			   u32 key_level2)
1837{
1838	struct devx_event *event;
1839	struct devx_obj_event *xa_val_level2;
1840
1841	/* Level 1 is valid for future use, no need to free */
1842	if (!is_level2)
1843		return;
1844
1845	event = xa_load(&devx_event_table->event_xa, key_level1);
1846	WARN_ON(!event);
1847
1848	xa_val_level2 = xa_load(&event->object_ids,
1849				key_level2);
1850	if (list_empty(&xa_val_level2->obj_sub_list)) {
1851		xa_erase(&event->object_ids,
1852			 key_level2);
1853		kfree_rcu(xa_val_level2, rcu);
1854	}
1855}
1856
1857static int
1858subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1859			 u32 key_level1,
1860			 bool is_level2,
1861			 u32 key_level2)
1862{
1863	struct devx_obj_event *obj_event;
1864	struct devx_event *event;
1865	int err;
1866
1867	event = xa_load(&devx_event_table->event_xa, key_level1);
1868	if (!event) {
1869		event = kzalloc(sizeof(*event), GFP_KERNEL);
1870		if (!event)
1871			return -ENOMEM;
1872
1873		INIT_LIST_HEAD(&event->unaffiliated_list);
1874		xa_init(&event->object_ids);
1875
1876		err = xa_insert(&devx_event_table->event_xa,
1877				key_level1,
1878				event,
1879				GFP_KERNEL);
1880		if (err) {
1881			kfree(event);
1882			return err;
1883		}
1884	}
1885
1886	if (!is_level2)
1887		return 0;
1888
1889	obj_event = xa_load(&event->object_ids, key_level2);
1890	if (!obj_event) {
1891		obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1892		if (!obj_event)
1893			/* Level1 is valid for future use, no need to free */
1894			return -ENOMEM;
1895
1896		err = xa_insert(&event->object_ids,
1897				key_level2,
1898				obj_event,
1899				GFP_KERNEL);
1900		if (err) {
1901			kfree(obj_event);
1902			return err;
1903		}
1904		INIT_LIST_HEAD(&obj_event->obj_sub_list);
1905	}
1906
1907	return 0;
1908}
1909
1910static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1911				   struct devx_obj *obj)
1912{
1913	int i;
1914
1915	for (i = 0; i < num_events; i++) {
1916		if (obj) {
1917			if (!is_legacy_obj_event_num(event_type_num_list[i]))
1918				return false;
1919		} else if (!is_legacy_unaffiliated_event_num(
1920				event_type_num_list[i])) {
1921			return false;
1922		}
1923	}
1924
1925	return true;
1926}
1927
1928#define MAX_SUPP_EVENT_NUM 255
1929static bool is_valid_events(struct mlx5_core_dev *dev,
1930			    int num_events, u16 *event_type_num_list,
1931			    struct devx_obj *obj)
1932{
1933	__be64 *aff_events;
1934	__be64 *unaff_events;
1935	int mask_entry;
1936	int mask_bit;
1937	int i;
1938
1939	if (MLX5_CAP_GEN(dev, event_cap)) {
1940		aff_events = MLX5_CAP_DEV_EVENT(dev,
1941						user_affiliated_events);
1942		unaff_events = MLX5_CAP_DEV_EVENT(dev,
1943						  user_unaffiliated_events);
1944	} else {
1945		return is_valid_events_legacy(num_events, event_type_num_list,
1946					      obj);
1947	}
1948
1949	for (i = 0; i < num_events; i++) {
1950		if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1951			return false;
1952
1953		mask_entry = event_type_num_list[i] / 64;
1954		mask_bit = event_type_num_list[i] % 64;
1955
1956		if (obj) {
1957			/* CQ completion */
1958			if (event_type_num_list[i] == 0)
1959				continue;
1960
1961			if (!(be64_to_cpu(aff_events[mask_entry]) &
1962					(1ull << mask_bit)))
1963				return false;
1964
1965			continue;
1966		}
1967
1968		if (!(be64_to_cpu(unaff_events[mask_entry]) &
1969				(1ull << mask_bit)))
1970			return false;
1971	}
1972
1973	return true;
1974}
1975
1976#define MAX_NUM_EVENTS 16
1977static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1978	struct uverbs_attr_bundle *attrs)
1979{
1980	struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1981				attrs,
1982				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1983	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1984		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1985	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1986	struct ib_uobject *fd_uobj;
1987	struct devx_obj *obj = NULL;
1988	struct devx_async_event_file *ev_file;
1989	struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1990	u16 *event_type_num_list;
1991	struct devx_event_subscription *event_sub, *tmp_sub;
1992	struct list_head sub_list;
1993	int redirect_fd;
1994	bool use_eventfd = false;
1995	int num_events;
1996	int num_alloc_xa_entries = 0;
1997	u16 obj_type = 0;
1998	u64 cookie = 0;
1999	u32 obj_id = 0;
2000	int err;
2001	int i;
2002
2003	if (!c->devx_uid)
2004		return -EINVAL;
2005
2006	if (!IS_ERR(devx_uobj)) {
2007		obj = (struct devx_obj *)devx_uobj->object;
2008		if (obj)
2009			obj_id = get_dec_obj_id(obj->obj_id);
2010	}
2011
2012	fd_uobj = uverbs_attr_get_uobject(attrs,
2013				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
2014	if (IS_ERR(fd_uobj))
2015		return PTR_ERR(fd_uobj);
2016
2017	ev_file = container_of(fd_uobj, struct devx_async_event_file,
2018			       uobj);
2019
2020	if (uverbs_attr_is_valid(attrs,
2021				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
2022		err = uverbs_copy_from(&redirect_fd, attrs,
2023			       MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
2024		if (err)
2025			return err;
2026
2027		use_eventfd = true;
2028	}
2029
2030	if (uverbs_attr_is_valid(attrs,
2031				 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
2032		if (use_eventfd)
2033			return -EINVAL;
2034
2035		err = uverbs_copy_from(&cookie, attrs,
2036				MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
2037		if (err)
2038			return err;
2039	}
2040
2041	num_events = uverbs_attr_ptr_get_array_size(
2042		attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2043		sizeof(u16));
2044
2045	if (num_events < 0)
2046		return num_events;
2047
2048	if (num_events > MAX_NUM_EVENTS)
2049		return -EINVAL;
2050
2051	event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2052			MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2053
2054	if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2055		return -EINVAL;
2056
2057	INIT_LIST_HEAD(&sub_list);
2058
2059	/* Protect from concurrent subscriptions to same XA entries to allow
2060	 * both to succeed
2061	 */
2062	mutex_lock(&devx_event_table->event_xa_lock);
2063	for (i = 0; i < num_events; i++) {
2064		u32 key_level1;
2065
2066		if (obj)
2067			obj_type = get_dec_obj_type(obj,
2068						    event_type_num_list[i]);
2069		key_level1 = event_type_num_list[i] | obj_type << 16;
2070
2071		err = subscribe_event_xa_alloc(devx_event_table,
2072					       key_level1,
2073					       obj,
2074					       obj_id);
2075		if (err)
2076			goto err;
2077
2078		num_alloc_xa_entries++;
2079		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2080		if (!event_sub) {
2081			err = -ENOMEM;
2082			goto err;
2083		}
2084
2085		list_add_tail(&event_sub->event_list, &sub_list);
2086		uverbs_uobject_get(&ev_file->uobj);
2087		if (use_eventfd) {
2088			event_sub->eventfd =
2089				eventfd_ctx_fdget(redirect_fd);
2090
2091			if (IS_ERR(event_sub->eventfd)) {
2092				err = PTR_ERR(event_sub->eventfd);
2093				event_sub->eventfd = NULL;
2094				goto err;
2095			}
2096		}
2097
2098		event_sub->cookie = cookie;
2099		event_sub->ev_file = ev_file;
2100		/* May be needed upon cleanup the devx object/subscription */
2101		event_sub->xa_key_level1 = key_level1;
2102		event_sub->xa_key_level2 = obj_id;
2103		INIT_LIST_HEAD(&event_sub->obj_list);
2104	}
2105
2106	/* Once all the allocations and the XA data insertions were done we
2107	 * can go ahead and add all the subscriptions to the relevant lists
2108	 * without concern of a failure.
2109	 */
2110	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2111		struct devx_event *event;
2112		struct devx_obj_event *obj_event;
2113
2114		list_del_init(&event_sub->event_list);
2115
2116		spin_lock_irq(&ev_file->lock);
2117		list_add_tail_rcu(&event_sub->file_list,
2118				  &ev_file->subscribed_events_list);
2119		spin_unlock_irq(&ev_file->lock);
2120
2121		event = xa_load(&devx_event_table->event_xa,
2122				event_sub->xa_key_level1);
2123		WARN_ON(!event);
2124
2125		if (!obj) {
2126			list_add_tail_rcu(&event_sub->xa_list,
2127					  &event->unaffiliated_list);
2128			continue;
2129		}
2130
2131		obj_event = xa_load(&event->object_ids, obj_id);
2132		WARN_ON(!obj_event);
2133		list_add_tail_rcu(&event_sub->xa_list,
2134				  &obj_event->obj_sub_list);
2135		list_add_tail_rcu(&event_sub->obj_list,
2136				  &obj->event_sub);
2137	}
2138
2139	mutex_unlock(&devx_event_table->event_xa_lock);
2140	return 0;
2141
2142err:
2143	list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2144		list_del(&event_sub->event_list);
2145
2146		subscribe_event_xa_dealloc(devx_event_table,
2147					   event_sub->xa_key_level1,
2148					   obj,
2149					   obj_id);
2150
2151		if (event_sub->eventfd)
2152			eventfd_ctx_put(event_sub->eventfd);
2153		uverbs_uobject_put(&event_sub->ev_file->uobj);
2154		kfree(event_sub);
2155	}
2156
2157	mutex_unlock(&devx_event_table->event_xa_lock);
2158	return err;
2159}
2160
2161static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2162			 struct uverbs_attr_bundle *attrs,
2163			 struct devx_umem *obj, u32 access_flags)
2164{
2165	u64 addr;
2166	size_t size;
2167	int err;
2168
2169	if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2170	    uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2171		return -EFAULT;
2172
2173	err = ib_check_mr_access(&dev->ib_dev, access_flags);
2174	if (err)
2175		return err;
2176
2177	if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
2178		struct ib_umem_dmabuf *umem_dmabuf;
2179		int dmabuf_fd;
2180
2181		err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
2182					MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
2183		if (err)
2184			return -EFAULT;
2185
2186		umem_dmabuf = ib_umem_dmabuf_get_pinned(
2187			&dev->ib_dev, addr, size, dmabuf_fd, access_flags);
2188		if (IS_ERR(umem_dmabuf))
2189			return PTR_ERR(umem_dmabuf);
2190		obj->umem = &umem_dmabuf->umem;
2191	} else {
2192		obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
2193		if (IS_ERR(obj->umem))
2194			return PTR_ERR(obj->umem);
2195	}
2196	return 0;
2197}
2198
2199static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
2200					       unsigned long pgsz_bitmap)
2201{
2202	unsigned long page_size;
2203
2204	/* Don't bother checking larger page sizes as offset must be zero and
2205	 * total DEVX umem length must be equal to total umem length.
2206	 */
2207	pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
2208					 PAGE_SHIFT),
2209				   MLX5_ADAPTER_PAGE_SHIFT);
2210	if (!pgsz_bitmap)
2211		return 0;
2212
2213	page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
2214	if (!page_size)
2215		return 0;
2216
2217	/* If the page_size is less than the CPU page size then we can use the
2218	 * offset and create a umem which is a subset of the page list.
2219	 * For larger page sizes we can't be sure the DMA  list reflects the
2220	 * VA so we must ensure that the umem extent is exactly equal to the
2221	 * page list. Reduce the page size until one of these cases is true.
2222	 */
2223	while ((ib_umem_dma_offset(umem, page_size) != 0 ||
2224		(umem->length % page_size) != 0) &&
2225		page_size > PAGE_SIZE)
2226		page_size /= 2;
2227
2228	return page_size;
2229}
2230
2231static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
2232				   struct uverbs_attr_bundle *attrs,
2233				   struct devx_umem *obj,
2234				   struct devx_umem_reg_cmd *cmd,
2235				   int access)
2236{
2237	unsigned long pgsz_bitmap;
2238	unsigned int page_size;
2239	__be64 *mtt;
2240	void *umem;
2241	int ret;
2242
2243	/*
2244	 * If the user does not pass in pgsz_bitmap then the user promises not
2245	 * to use umem_offset!=0 in any commands that allocate on top of the
2246	 * umem.
2247	 *
2248	 * If the user wants to use a umem_offset then it must pass in
2249	 * pgsz_bitmap which guides the maximum page size and thus maximum
2250	 * object alignment inside the umem. See the PRM.
2251	 *
2252	 * Users are not allowed to use IOVA here, mkeys are not supported on
2253	 * umem.
2254	 */
2255	ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
2256			MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2257			GENMASK_ULL(63,
2258				    min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
2259	if (ret)
2260		return ret;
2261
2262	page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
2263	if (!page_size)
2264		return -EINVAL;
2265
2266	cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2267		     (MLX5_ST_SZ_BYTES(mtt) *
2268		      ib_umem_num_dma_blocks(obj->umem, page_size));
2269	cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2270	if (IS_ERR(cmd->in))
2271		return PTR_ERR(cmd->in);
2272
2273	umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2274	mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2275
2276	MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2277	MLX5_SET64(umem, umem, num_of_mtt,
2278		   ib_umem_num_dma_blocks(obj->umem, page_size));
2279	MLX5_SET(umem, umem, log_page_size,
2280		 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
2281	MLX5_SET(umem, umem, page_offset,
2282		 ib_umem_dma_offset(obj->umem, page_size));
2283
2284	if (mlx5_umem_needs_ats(dev, obj->umem, access))
2285		MLX5_SET(umem, umem, ats, 1);
2286
2287	mlx5_ib_populate_pas(obj->umem, page_size, mtt,
2288			     (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2289				     MLX5_IB_MTT_READ);
2290	return 0;
2291}
2292
2293static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2294	struct uverbs_attr_bundle *attrs)
2295{
2296	struct devx_umem_reg_cmd cmd;
2297	struct devx_umem *obj;
2298	struct ib_uobject *uobj = uverbs_attr_get_uobject(
2299		attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2300	u32 obj_id;
2301	struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2302		&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2303	struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2304	int access_flags;
2305	int err;
2306
2307	if (!c->devx_uid)
2308		return -EINVAL;
2309
2310	err = uverbs_get_flags32(&access_flags, attrs,
2311				 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2312				 IB_ACCESS_LOCAL_WRITE |
2313				 IB_ACCESS_REMOTE_WRITE |
2314				 IB_ACCESS_REMOTE_READ |
2315				 IB_ACCESS_RELAXED_ORDERING);
2316	if (err)
2317		return err;
2318
2319	obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2320	if (!obj)
2321		return -ENOMEM;
2322
2323	err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
2324	if (err)
2325		goto err_obj_free;
2326
2327	err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
2328	if (err)
2329		goto err_umem_release;
2330
2331	MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2332	err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2333			    sizeof(cmd.out));
2334	if (err)
2335		goto err_umem_release;
2336
2337	obj->mdev = dev->mdev;
2338	uobj->object = obj;
2339	devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2340	uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2341
2342	err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
2343			     sizeof(obj_id));
2344	return err;
2345
2346err_umem_release:
2347	ib_umem_release(obj->umem);
2348err_obj_free:
2349	kfree(obj);
2350	return err;
2351}
2352
2353static int devx_umem_cleanup(struct ib_uobject *uobject,
2354			     enum rdma_remove_reason why,
2355			     struct uverbs_attr_bundle *attrs)
2356{
2357	struct devx_umem *obj = uobject->object;
2358	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2359	int err;
2360
2361	err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2362	if (err)
2363		return err;
2364
2365	ib_umem_release(obj->umem);
2366	kfree(obj);
2367	return 0;
2368}
2369
2370static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2371				  unsigned long event_type)
2372{
2373	__be64 *unaff_events;
2374	int mask_entry;
2375	int mask_bit;
2376
2377	if (!MLX5_CAP_GEN(dev, event_cap))
2378		return is_legacy_unaffiliated_event_num(event_type);
2379
2380	unaff_events = MLX5_CAP_DEV_EVENT(dev,
2381					  user_unaffiliated_events);
2382	WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2383
2384	mask_entry = event_type / 64;
2385	mask_bit = event_type % 64;
2386
2387	if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2388		return false;
2389
2390	return true;
2391}
2392
2393static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2394{
2395	struct mlx5_eqe *eqe = data;
2396	u32 obj_id = 0;
2397
2398	switch (event_type) {
2399	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2400	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2401	case MLX5_EVENT_TYPE_PATH_MIG:
2402	case MLX5_EVENT_TYPE_COMM_EST:
2403	case MLX5_EVENT_TYPE_SQ_DRAINED:
2404	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2405	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2406	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2407	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2408	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2409		obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2410		break;
2411	case MLX5_EVENT_TYPE_XRQ_ERROR:
2412		obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2413		break;
2414	case MLX5_EVENT_TYPE_DCT_DRAINED:
2415	case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2416		obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2417		break;
2418	case MLX5_EVENT_TYPE_CQ_ERROR:
2419		obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2420		break;
2421	default:
2422		obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2423		break;
2424	}
2425
2426	return obj_id;
2427}
2428
2429static int deliver_event(struct devx_event_subscription *event_sub,
2430			 const void *data)
2431{
2432	struct devx_async_event_file *ev_file;
2433	struct devx_async_event_data *event_data;
2434	unsigned long flags;
2435
2436	ev_file = event_sub->ev_file;
2437
2438	if (ev_file->omit_data) {
2439		spin_lock_irqsave(&ev_file->lock, flags);
2440		if (!list_empty(&event_sub->event_list) ||
2441		    ev_file->is_destroyed) {
2442			spin_unlock_irqrestore(&ev_file->lock, flags);
2443			return 0;
2444		}
2445
2446		list_add_tail(&event_sub->event_list, &ev_file->event_list);
2447		spin_unlock_irqrestore(&ev_file->lock, flags);
2448		wake_up_interruptible(&ev_file->poll_wait);
2449		return 0;
2450	}
2451
2452	event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2453			     GFP_ATOMIC);
2454	if (!event_data) {
2455		spin_lock_irqsave(&ev_file->lock, flags);
2456		ev_file->is_overflow_err = 1;
2457		spin_unlock_irqrestore(&ev_file->lock, flags);
2458		return -ENOMEM;
2459	}
2460
2461	event_data->hdr.cookie = event_sub->cookie;
2462	memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2463
2464	spin_lock_irqsave(&ev_file->lock, flags);
2465	if (!ev_file->is_destroyed)
2466		list_add_tail(&event_data->list, &ev_file->event_list);
2467	else
2468		kfree(event_data);
2469	spin_unlock_irqrestore(&ev_file->lock, flags);
2470	wake_up_interruptible(&ev_file->poll_wait);
2471
2472	return 0;
2473}
2474
2475static void dispatch_event_fd(struct list_head *fd_list,
2476			      const void *data)
2477{
2478	struct devx_event_subscription *item;
2479
2480	list_for_each_entry_rcu(item, fd_list, xa_list) {
2481		if (item->eventfd)
2482			eventfd_signal(item->eventfd, 1);
2483		else
2484			deliver_event(item, data);
2485	}
2486}
2487
2488static int devx_event_notifier(struct notifier_block *nb,
2489			       unsigned long event_type, void *data)
2490{
2491	struct mlx5_devx_event_table *table;
2492	struct mlx5_ib_dev *dev;
2493	struct devx_event *event;
2494	struct devx_obj_event *obj_event;
2495	u16 obj_type = 0;
2496	bool is_unaffiliated;
2497	u32 obj_id;
2498
2499	/* Explicit filtering to kernel events which may occur frequently */
2500	if (event_type == MLX5_EVENT_TYPE_CMD ||
2501	    event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2502		return NOTIFY_OK;
2503
2504	table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2505	dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2506	is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2507
2508	if (!is_unaffiliated)
2509		obj_type = get_event_obj_type(event_type, data);
2510
2511	rcu_read_lock();
2512	event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2513	if (!event) {
2514		rcu_read_unlock();
2515		return NOTIFY_DONE;
2516	}
2517
2518	if (is_unaffiliated) {
2519		dispatch_event_fd(&event->unaffiliated_list, data);
2520		rcu_read_unlock();
2521		return NOTIFY_OK;
2522	}
2523
2524	obj_id = devx_get_obj_id_from_event(event_type, data);
2525	obj_event = xa_load(&event->object_ids, obj_id);
2526	if (!obj_event) {
2527		rcu_read_unlock();
2528		return NOTIFY_DONE;
2529	}
2530
2531	dispatch_event_fd(&obj_event->obj_sub_list, data);
2532
2533	rcu_read_unlock();
2534	return NOTIFY_OK;
2535}
2536
2537int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
2538{
2539	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2540	int uid;
2541
2542	uid = mlx5_ib_devx_create(dev, false);
2543	if (uid > 0) {
2544		dev->devx_whitelist_uid = uid;
2545		xa_init(&table->event_xa);
2546		mutex_init(&table->event_xa_lock);
2547		MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2548		mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2549	}
2550
2551	return 0;
2552}
2553
2554void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
2555{
2556	struct mlx5_devx_event_table *table = &dev->devx_event_table;
2557	struct devx_event_subscription *sub, *tmp;
2558	struct devx_event *event;
2559	void *entry;
2560	unsigned long id;
2561
2562	if (dev->devx_whitelist_uid) {
2563		mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2564		mutex_lock(&dev->devx_event_table.event_xa_lock);
2565		xa_for_each(&table->event_xa, id, entry) {
2566			event = entry;
2567			list_for_each_entry_safe(
2568				sub, tmp, &event->unaffiliated_list, xa_list)
2569				devx_cleanup_subscription(dev, sub);
2570			kfree(entry);
2571		}
2572		mutex_unlock(&dev->devx_event_table.event_xa_lock);
2573		xa_destroy(&table->event_xa);
2574
2575		mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
2576	}
2577}
2578
2579static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2580					 size_t count, loff_t *pos)
2581{
2582	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2583	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2584	struct devx_async_data *event;
2585	int ret = 0;
2586	size_t eventsz;
2587
2588	spin_lock_irq(&ev_queue->lock);
2589
2590	while (list_empty(&ev_queue->event_list)) {
2591		spin_unlock_irq(&ev_queue->lock);
2592
2593		if (filp->f_flags & O_NONBLOCK)
2594			return -EAGAIN;
2595
2596		if (wait_event_interruptible(
2597			    ev_queue->poll_wait,
2598			    (!list_empty(&ev_queue->event_list) ||
2599			     ev_queue->is_destroyed))) {
2600			return -ERESTARTSYS;
2601		}
2602
2603		spin_lock_irq(&ev_queue->lock);
2604		if (ev_queue->is_destroyed) {
2605			spin_unlock_irq(&ev_queue->lock);
2606			return -EIO;
2607		}
2608	}
2609
2610	event = list_entry(ev_queue->event_list.next,
2611			   struct devx_async_data, list);
2612	eventsz = event->cmd_out_len +
2613			sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2614
2615	if (eventsz > count) {
2616		spin_unlock_irq(&ev_queue->lock);
2617		return -ENOSPC;
2618	}
2619
2620	list_del(ev_queue->event_list.next);
2621	spin_unlock_irq(&ev_queue->lock);
2622
2623	if (copy_to_user(buf, &event->hdr, eventsz))
2624		ret = -EFAULT;
2625	else
2626		ret = eventsz;
2627
2628	atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2629	kvfree(event);
2630	return ret;
2631}
2632
2633static __poll_t devx_async_cmd_event_poll(struct file *filp,
2634					      struct poll_table_struct *wait)
2635{
2636	struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2637	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2638	__poll_t pollflags = 0;
2639
2640	poll_wait(filp, &ev_queue->poll_wait, wait);
2641
2642	spin_lock_irq(&ev_queue->lock);
2643	if (ev_queue->is_destroyed)
2644		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2645	else if (!list_empty(&ev_queue->event_list))
2646		pollflags = EPOLLIN | EPOLLRDNORM;
2647	spin_unlock_irq(&ev_queue->lock);
2648
2649	return pollflags;
2650}
2651
2652static const struct file_operations devx_async_cmd_event_fops = {
2653	.owner	 = THIS_MODULE,
2654	.read	 = devx_async_cmd_event_read,
2655	.poll    = devx_async_cmd_event_poll,
2656	.release = uverbs_uobject_fd_release,
2657	.llseek	 = no_llseek,
2658};
2659
2660static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2661				     size_t count, loff_t *pos)
2662{
2663	struct devx_async_event_file *ev_file = filp->private_data;
2664	struct devx_event_subscription *event_sub;
2665	struct devx_async_event_data *event;
2666	int ret = 0;
2667	size_t eventsz;
2668	bool omit_data;
2669	void *event_data;
2670
2671	omit_data = ev_file->omit_data;
2672
2673	spin_lock_irq(&ev_file->lock);
2674
2675	if (ev_file->is_overflow_err) {
2676		ev_file->is_overflow_err = 0;
2677		spin_unlock_irq(&ev_file->lock);
2678		return -EOVERFLOW;
2679	}
2680
2681
2682	while (list_empty(&ev_file->event_list)) {
2683		spin_unlock_irq(&ev_file->lock);
2684
2685		if (filp->f_flags & O_NONBLOCK)
2686			return -EAGAIN;
2687
2688		if (wait_event_interruptible(ev_file->poll_wait,
2689			    (!list_empty(&ev_file->event_list) ||
2690			     ev_file->is_destroyed))) {
2691			return -ERESTARTSYS;
2692		}
2693
2694		spin_lock_irq(&ev_file->lock);
2695		if (ev_file->is_destroyed) {
2696			spin_unlock_irq(&ev_file->lock);
2697			return -EIO;
2698		}
2699	}
2700
2701	if (omit_data) {
2702		event_sub = list_first_entry(&ev_file->event_list,
2703					struct devx_event_subscription,
2704					event_list);
2705		eventsz = sizeof(event_sub->cookie);
2706		event_data = &event_sub->cookie;
2707	} else {
2708		event = list_first_entry(&ev_file->event_list,
2709				      struct devx_async_event_data, list);
2710		eventsz = sizeof(struct mlx5_eqe) +
2711			sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2712		event_data = &event->hdr;
2713	}
2714
2715	if (eventsz > count) {
2716		spin_unlock_irq(&ev_file->lock);
2717		return -EINVAL;
2718	}
2719
2720	if (omit_data)
2721		list_del_init(&event_sub->event_list);
2722	else
2723		list_del(&event->list);
2724
2725	spin_unlock_irq(&ev_file->lock);
2726
2727	if (copy_to_user(buf, event_data, eventsz))
2728		/* This points to an application issue, not a kernel concern */
2729		ret = -EFAULT;
2730	else
2731		ret = eventsz;
2732
2733	if (!omit_data)
2734		kfree(event);
2735	return ret;
2736}
2737
2738static __poll_t devx_async_event_poll(struct file *filp,
2739				      struct poll_table_struct *wait)
2740{
2741	struct devx_async_event_file *ev_file = filp->private_data;
2742	__poll_t pollflags = 0;
2743
2744	poll_wait(filp, &ev_file->poll_wait, wait);
2745
2746	spin_lock_irq(&ev_file->lock);
2747	if (ev_file->is_destroyed)
2748		pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2749	else if (!list_empty(&ev_file->event_list))
2750		pollflags = EPOLLIN | EPOLLRDNORM;
2751	spin_unlock_irq(&ev_file->lock);
2752
2753	return pollflags;
2754}
2755
2756static void devx_free_subscription(struct rcu_head *rcu)
2757{
2758	struct devx_event_subscription *event_sub =
2759		container_of(rcu, struct devx_event_subscription, rcu);
2760
2761	if (event_sub->eventfd)
2762		eventfd_ctx_put(event_sub->eventfd);
2763	uverbs_uobject_put(&event_sub->ev_file->uobj);
2764	kfree(event_sub);
2765}
2766
2767static const struct file_operations devx_async_event_fops = {
2768	.owner	 = THIS_MODULE,
2769	.read	 = devx_async_event_read,
2770	.poll    = devx_async_event_poll,
2771	.release = uverbs_uobject_fd_release,
2772	.llseek	 = no_llseek,
2773};
2774
2775static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2776					      enum rdma_remove_reason why)
2777{
2778	struct devx_async_cmd_event_file *comp_ev_file =
2779		container_of(uobj, struct devx_async_cmd_event_file,
2780			     uobj);
2781	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2782	struct devx_async_data *entry, *tmp;
2783
2784	spin_lock_irq(&ev_queue->lock);
2785	ev_queue->is_destroyed = 1;
2786	spin_unlock_irq(&ev_queue->lock);
2787	wake_up_interruptible(&ev_queue->poll_wait);
2788
2789	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2790
2791	spin_lock_irq(&comp_ev_file->ev_queue.lock);
2792	list_for_each_entry_safe(entry, tmp,
2793				 &comp_ev_file->ev_queue.event_list, list) {
2794		list_del(&entry->list);
2795		kvfree(entry);
2796	}
2797	spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2798};
2799
2800static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2801					  enum rdma_remove_reason why)
2802{
2803	struct devx_async_event_file *ev_file =
2804		container_of(uobj, struct devx_async_event_file,
2805			     uobj);
2806	struct devx_event_subscription *event_sub, *event_sub_tmp;
2807	struct mlx5_ib_dev *dev = ev_file->dev;
2808
2809	spin_lock_irq(&ev_file->lock);
2810	ev_file->is_destroyed = 1;
2811
2812	/* free the pending events allocation */
2813	if (ev_file->omit_data) {
2814		struct devx_event_subscription *event_sub, *tmp;
2815
2816		list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2817					 event_list)
2818			list_del_init(&event_sub->event_list);
2819
2820	} else {
2821		struct devx_async_event_data *entry, *tmp;
2822
2823		list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2824					 list) {
2825			list_del(&entry->list);
2826			kfree(entry);
2827		}
2828	}
2829
2830	spin_unlock_irq(&ev_file->lock);
2831	wake_up_interruptible(&ev_file->poll_wait);
2832
2833	mutex_lock(&dev->devx_event_table.event_xa_lock);
2834	/* delete the subscriptions which are related to this FD */
2835	list_for_each_entry_safe(event_sub, event_sub_tmp,
2836				 &ev_file->subscribed_events_list, file_list) {
2837		devx_cleanup_subscription(dev, event_sub);
2838		list_del_rcu(&event_sub->file_list);
2839		/* subscription may not be used by the read API any more */
2840		call_rcu(&event_sub->rcu, devx_free_subscription);
2841	}
2842	mutex_unlock(&dev->devx_event_table.event_xa_lock);
2843
2844	put_device(&dev->ib_dev.dev);
2845};
2846
2847DECLARE_UVERBS_NAMED_METHOD(
2848	MLX5_IB_METHOD_DEVX_UMEM_REG,
2849	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2850			MLX5_IB_OBJECT_DEVX_UMEM,
2851			UVERBS_ACCESS_NEW,
2852			UA_MANDATORY),
2853	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2854			   UVERBS_ATTR_TYPE(u64),
2855			   UA_MANDATORY),
2856	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2857			   UVERBS_ATTR_TYPE(u64),
2858			   UA_MANDATORY),
2859	UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
2860			   UA_OPTIONAL),
2861	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2862			     enum ib_access_flags),
2863	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2864			     u64),
2865	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2866			    UVERBS_ATTR_TYPE(u32),
2867			    UA_MANDATORY));
2868
2869DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2870	MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2871	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2872			MLX5_IB_OBJECT_DEVX_UMEM,
2873			UVERBS_ACCESS_DESTROY,
2874			UA_MANDATORY));
2875
2876DECLARE_UVERBS_NAMED_METHOD(
2877	MLX5_IB_METHOD_DEVX_QUERY_EQN,
2878	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2879			   UVERBS_ATTR_TYPE(u32),
2880			   UA_MANDATORY),
2881	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2882			    UVERBS_ATTR_TYPE(u32),
2883			    UA_MANDATORY));
2884
2885DECLARE_UVERBS_NAMED_METHOD(
2886	MLX5_IB_METHOD_DEVX_QUERY_UAR,
2887	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2888			   UVERBS_ATTR_TYPE(u32),
2889			   UA_MANDATORY),
2890	UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2891			    UVERBS_ATTR_TYPE(u32),
2892			    UA_MANDATORY));
2893
2894DECLARE_UVERBS_NAMED_METHOD(
2895	MLX5_IB_METHOD_DEVX_OTHER,
2896	UVERBS_ATTR_PTR_IN(
2897		MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2898		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2899		UA_MANDATORY,
2900		UA_ALLOC_AND_COPY),
2901	UVERBS_ATTR_PTR_OUT(
2902		MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2903		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2904		UA_MANDATORY));
2905
2906DECLARE_UVERBS_NAMED_METHOD(
2907	MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2908	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2909			MLX5_IB_OBJECT_DEVX_OBJ,
2910			UVERBS_ACCESS_NEW,
2911			UA_MANDATORY),
2912	UVERBS_ATTR_PTR_IN(
2913		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2914		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2915		UA_MANDATORY,
2916		UA_ALLOC_AND_COPY),
2917	UVERBS_ATTR_PTR_OUT(
2918		MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2919		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2920		UA_MANDATORY));
2921
2922DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2923	MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2924	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2925			MLX5_IB_OBJECT_DEVX_OBJ,
2926			UVERBS_ACCESS_DESTROY,
2927			UA_MANDATORY));
2928
2929DECLARE_UVERBS_NAMED_METHOD(
2930	MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2931	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2932			UVERBS_IDR_ANY_OBJECT,
2933			UVERBS_ACCESS_WRITE,
2934			UA_MANDATORY),
2935	UVERBS_ATTR_PTR_IN(
2936		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2937		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2938		UA_MANDATORY,
2939		UA_ALLOC_AND_COPY),
2940	UVERBS_ATTR_PTR_OUT(
2941		MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2942		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2943		UA_MANDATORY));
2944
2945DECLARE_UVERBS_NAMED_METHOD(
2946	MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2947	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2948			UVERBS_IDR_ANY_OBJECT,
2949			UVERBS_ACCESS_READ,
2950			UA_MANDATORY),
2951	UVERBS_ATTR_PTR_IN(
2952		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2953		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2954		UA_MANDATORY,
2955		UA_ALLOC_AND_COPY),
2956	UVERBS_ATTR_PTR_OUT(
2957		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2958		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2959		UA_MANDATORY));
2960
2961DECLARE_UVERBS_NAMED_METHOD(
2962	MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2963	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2964			UVERBS_IDR_ANY_OBJECT,
2965			UVERBS_ACCESS_READ,
2966			UA_MANDATORY),
2967	UVERBS_ATTR_PTR_IN(
2968		MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2969		UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2970		UA_MANDATORY,
2971		UA_ALLOC_AND_COPY),
2972	UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2973		u16, UA_MANDATORY),
2974	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2975		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2976		UVERBS_ACCESS_READ,
2977		UA_MANDATORY),
2978	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2979		UVERBS_ATTR_TYPE(u64),
2980		UA_MANDATORY));
2981
2982DECLARE_UVERBS_NAMED_METHOD(
2983	MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2984	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2985		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2986		UVERBS_ACCESS_READ,
2987		UA_MANDATORY),
2988	UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2989		MLX5_IB_OBJECT_DEVX_OBJ,
2990		UVERBS_ACCESS_READ,
2991		UA_OPTIONAL),
2992	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2993		UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2994		UA_MANDATORY,
2995		UA_ALLOC_AND_COPY),
2996	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2997		UVERBS_ATTR_TYPE(u64),
2998		UA_OPTIONAL),
2999	UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
3000		UVERBS_ATTR_TYPE(u32),
3001		UA_OPTIONAL));
3002
3003DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
3004			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
3005			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
3006			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
3007			      &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
3008
3009DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
3010			    UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
3011			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
3012			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
3013			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
3014			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
3015			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
3016
3017DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
3018			    UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
3019			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
3020			    &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
3021
3022
3023DECLARE_UVERBS_NAMED_METHOD(
3024	MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
3025	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
3026			MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3027			UVERBS_ACCESS_NEW,
3028			UA_MANDATORY));
3029
3030DECLARE_UVERBS_NAMED_OBJECT(
3031	MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3032	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
3033			     devx_async_cmd_event_destroy_uobj,
3034			     &devx_async_cmd_event_fops, "[devx_async_cmd]",
3035			     O_RDONLY),
3036	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
3037
3038DECLARE_UVERBS_NAMED_METHOD(
3039	MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
3040	UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
3041			MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3042			UVERBS_ACCESS_NEW,
3043			UA_MANDATORY),
3044	UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
3045			enum mlx5_ib_uapi_devx_create_event_channel_flags,
3046			UA_MANDATORY));
3047
3048DECLARE_UVERBS_NAMED_OBJECT(
3049	MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3050	UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
3051			     devx_async_event_destroy_uobj,
3052			     &devx_async_event_fops, "[devx_async_event]",
3053			     O_RDONLY),
3054	&UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
3055
3056static bool devx_is_supported(struct ib_device *device)
3057{
3058	struct mlx5_ib_dev *dev = to_mdev(device);
3059
3060	return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
3061}
3062
3063const struct uapi_definition mlx5_ib_devx_defs[] = {
3064	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3065		MLX5_IB_OBJECT_DEVX,
3066		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3067	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3068		MLX5_IB_OBJECT_DEVX_OBJ,
3069		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3070	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3071		MLX5_IB_OBJECT_DEVX_UMEM,
3072		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3073	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3074		MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3075		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3076	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3077		MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3078		UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3079	{},
3080};