Loading...
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
11#include <rdma/mlx5_user_ioctl_verbs.h>
12#include <rdma/ib_umem.h>
13#include <rdma/uverbs_std_types.h>
14#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
16#include "mlx5_ib.h"
17#include "devx.h"
18#include "qp.h"
19#include <linux/xarray.h>
20
21#define UVERBS_MODULE_NAME mlx5_ib
22#include <rdma/uverbs_named_ioctl.h>
23
24static void dispatch_event_fd(struct list_head *fd_list, const void *data);
25
26enum devx_obj_flags {
27 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
28 DEVX_OBJ_FLAGS_DCT = 1 << 1,
29 DEVX_OBJ_FLAGS_CQ = 1 << 2,
30};
31
32struct devx_async_data {
33 struct mlx5_ib_dev *mdev;
34 struct list_head list;
35 struct devx_async_cmd_event_file *ev_file;
36 struct mlx5_async_work cb_work;
37 u16 cmd_out_len;
38 /* must be last field in this structure */
39 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
40};
41
42struct devx_async_event_data {
43 struct list_head list; /* headed in ev_file->event_list */
44 struct mlx5_ib_uapi_devx_async_event_hdr hdr;
45};
46
47/* first level XA value data structure */
48struct devx_event {
49 struct xarray object_ids; /* second XA level, Key = object id */
50 struct list_head unaffiliated_list;
51};
52
53/* second level XA value data structure */
54struct devx_obj_event {
55 struct rcu_head rcu;
56 struct list_head obj_sub_list;
57};
58
59struct devx_event_subscription {
60 struct list_head file_list; /* headed in ev_file->
61 * subscribed_events_list
62 */
63 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
64 * devx_obj_event->obj_sub_list
65 */
66 struct list_head obj_list; /* headed in devx_object */
67 struct list_head event_list; /* headed in ev_file->event_list or in
68 * temp list via subscription
69 */
70
71 u8 is_cleaned:1;
72 u32 xa_key_level1;
73 u32 xa_key_level2;
74 struct rcu_head rcu;
75 u64 cookie;
76 struct devx_async_event_file *ev_file;
77 struct eventfd_ctx *eventfd;
78};
79
80struct devx_async_event_file {
81 struct ib_uobject uobj;
82 /* Head of events that are subscribed to this FD */
83 struct list_head subscribed_events_list;
84 spinlock_t lock;
85 wait_queue_head_t poll_wait;
86 struct list_head event_list;
87 struct mlx5_ib_dev *dev;
88 u8 omit_data:1;
89 u8 is_overflow_err:1;
90 u8 is_destroyed:1;
91};
92
93struct devx_umem {
94 struct mlx5_core_dev *mdev;
95 struct ib_umem *umem;
96 u32 dinlen;
97 u32 dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
98};
99
100struct devx_umem_reg_cmd {
101 void *in;
102 u32 inlen;
103 u32 out[MLX5_ST_SZ_DW(create_umem_out)];
104};
105
106static struct mlx5_ib_ucontext *
107devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
108{
109 return to_mucontext(ib_uverbs_get_ucontext(attrs));
110}
111
112int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
113{
114 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
115 u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
116 void *uctx;
117 int err;
118 u16 uid;
119 u32 cap = 0;
120
121 /* 0 means not supported */
122 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
123 return -EINVAL;
124
125 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
126 if (is_user && capable(CAP_NET_RAW) &&
127 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
128 cap |= MLX5_UCTX_CAP_RAW_TX;
129 if (is_user && capable(CAP_SYS_RAWIO) &&
130 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
131 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
132 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
133
134 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
135 MLX5_SET(uctx, uctx, cap, cap);
136
137 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
138 if (err)
139 return err;
140
141 uid = MLX5_GET(create_uctx_out, out, uid);
142 return uid;
143}
144
145void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
146{
147 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
148 u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
149
150 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
151 MLX5_SET(destroy_uctx_in, in, uid, uid);
152
153 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
154}
155
156static bool is_legacy_unaffiliated_event_num(u16 event_num)
157{
158 switch (event_num) {
159 case MLX5_EVENT_TYPE_PORT_CHANGE:
160 return true;
161 default:
162 return false;
163 }
164}
165
166static bool is_legacy_obj_event_num(u16 event_num)
167{
168 switch (event_num) {
169 case MLX5_EVENT_TYPE_PATH_MIG:
170 case MLX5_EVENT_TYPE_COMM_EST:
171 case MLX5_EVENT_TYPE_SQ_DRAINED:
172 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
173 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
174 case MLX5_EVENT_TYPE_CQ_ERROR:
175 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
176 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
177 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
178 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
179 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
180 case MLX5_EVENT_TYPE_DCT_DRAINED:
181 case MLX5_EVENT_TYPE_COMP:
182 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
183 case MLX5_EVENT_TYPE_XRQ_ERROR:
184 return true;
185 default:
186 return false;
187 }
188}
189
190static u16 get_legacy_obj_type(u16 opcode)
191{
192 switch (opcode) {
193 case MLX5_CMD_OP_CREATE_RQ:
194 return MLX5_EVENT_QUEUE_TYPE_RQ;
195 case MLX5_CMD_OP_CREATE_QP:
196 return MLX5_EVENT_QUEUE_TYPE_QP;
197 case MLX5_CMD_OP_CREATE_SQ:
198 return MLX5_EVENT_QUEUE_TYPE_SQ;
199 case MLX5_CMD_OP_CREATE_DCT:
200 return MLX5_EVENT_QUEUE_TYPE_DCT;
201 default:
202 return 0;
203 }
204}
205
206static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
207{
208 u16 opcode;
209
210 opcode = (obj->obj_id >> 32) & 0xffff;
211
212 if (is_legacy_obj_event_num(event_num))
213 return get_legacy_obj_type(opcode);
214
215 switch (opcode) {
216 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
217 return (obj->obj_id >> 48);
218 case MLX5_CMD_OP_CREATE_RQ:
219 return MLX5_OBJ_TYPE_RQ;
220 case MLX5_CMD_OP_CREATE_QP:
221 return MLX5_OBJ_TYPE_QP;
222 case MLX5_CMD_OP_CREATE_SQ:
223 return MLX5_OBJ_TYPE_SQ;
224 case MLX5_CMD_OP_CREATE_DCT:
225 return MLX5_OBJ_TYPE_DCT;
226 case MLX5_CMD_OP_CREATE_TIR:
227 return MLX5_OBJ_TYPE_TIR;
228 case MLX5_CMD_OP_CREATE_TIS:
229 return MLX5_OBJ_TYPE_TIS;
230 case MLX5_CMD_OP_CREATE_PSV:
231 return MLX5_OBJ_TYPE_PSV;
232 case MLX5_OBJ_TYPE_MKEY:
233 return MLX5_OBJ_TYPE_MKEY;
234 case MLX5_CMD_OP_CREATE_RMP:
235 return MLX5_OBJ_TYPE_RMP;
236 case MLX5_CMD_OP_CREATE_XRC_SRQ:
237 return MLX5_OBJ_TYPE_XRC_SRQ;
238 case MLX5_CMD_OP_CREATE_XRQ:
239 return MLX5_OBJ_TYPE_XRQ;
240 case MLX5_CMD_OP_CREATE_RQT:
241 return MLX5_OBJ_TYPE_RQT;
242 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
243 return MLX5_OBJ_TYPE_FLOW_COUNTER;
244 case MLX5_CMD_OP_CREATE_CQ:
245 return MLX5_OBJ_TYPE_CQ;
246 default:
247 return 0;
248 }
249}
250
251static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
252{
253 switch (event_type) {
254 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
255 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
256 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
257 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
258 case MLX5_EVENT_TYPE_PATH_MIG:
259 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
260 case MLX5_EVENT_TYPE_COMM_EST:
261 case MLX5_EVENT_TYPE_SQ_DRAINED:
262 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
263 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
264 return eqe->data.qp_srq.type;
265 case MLX5_EVENT_TYPE_CQ_ERROR:
266 case MLX5_EVENT_TYPE_XRQ_ERROR:
267 return 0;
268 case MLX5_EVENT_TYPE_DCT_DRAINED:
269 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
270 return MLX5_EVENT_QUEUE_TYPE_DCT;
271 default:
272 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
273 }
274}
275
276static u32 get_dec_obj_id(u64 obj_id)
277{
278 return (obj_id & 0xffffffff);
279}
280
281/*
282 * As the obj_id in the firmware is not globally unique the object type
283 * must be considered upon checking for a valid object id.
284 * For that the opcode of the creator command is encoded as part of the obj_id.
285 */
286static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
287{
288 return ((u64)opcode << 32) | obj_id;
289}
290
291static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
292{
293 switch (opcode) {
294 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
295 return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
296 case MLX5_CMD_OP_CREATE_UMEM:
297 return MLX5_GET(create_umem_out, out, umem_id);
298 case MLX5_CMD_OP_CREATE_MKEY:
299 return MLX5_GET(create_mkey_out, out, mkey_index);
300 case MLX5_CMD_OP_CREATE_CQ:
301 return MLX5_GET(create_cq_out, out, cqn);
302 case MLX5_CMD_OP_ALLOC_PD:
303 return MLX5_GET(alloc_pd_out, out, pd);
304 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
305 return MLX5_GET(alloc_transport_domain_out, out,
306 transport_domain);
307 case MLX5_CMD_OP_CREATE_RMP:
308 return MLX5_GET(create_rmp_out, out, rmpn);
309 case MLX5_CMD_OP_CREATE_SQ:
310 return MLX5_GET(create_sq_out, out, sqn);
311 case MLX5_CMD_OP_CREATE_RQ:
312 return MLX5_GET(create_rq_out, out, rqn);
313 case MLX5_CMD_OP_CREATE_RQT:
314 return MLX5_GET(create_rqt_out, out, rqtn);
315 case MLX5_CMD_OP_CREATE_TIR:
316 return MLX5_GET(create_tir_out, out, tirn);
317 case MLX5_CMD_OP_CREATE_TIS:
318 return MLX5_GET(create_tis_out, out, tisn);
319 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
320 return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
321 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
322 return MLX5_GET(create_flow_table_out, out, table_id);
323 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
324 return MLX5_GET(create_flow_group_out, out, group_id);
325 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
326 return MLX5_GET(set_fte_in, in, flow_index);
327 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
328 return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
329 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
330 return MLX5_GET(alloc_packet_reformat_context_out, out,
331 packet_reformat_id);
332 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
333 return MLX5_GET(alloc_modify_header_context_out, out,
334 modify_header_id);
335 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
336 return MLX5_GET(create_scheduling_element_out, out,
337 scheduling_element_id);
338 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
339 return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
340 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
341 return MLX5_GET(set_l2_table_entry_in, in, table_index);
342 case MLX5_CMD_OP_CREATE_QP:
343 return MLX5_GET(create_qp_out, out, qpn);
344 case MLX5_CMD_OP_CREATE_SRQ:
345 return MLX5_GET(create_srq_out, out, srqn);
346 case MLX5_CMD_OP_CREATE_XRC_SRQ:
347 return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
348 case MLX5_CMD_OP_CREATE_DCT:
349 return MLX5_GET(create_dct_out, out, dctn);
350 case MLX5_CMD_OP_CREATE_XRQ:
351 return MLX5_GET(create_xrq_out, out, xrqn);
352 case MLX5_CMD_OP_ATTACH_TO_MCG:
353 return MLX5_GET(attach_to_mcg_in, in, qpn);
354 case MLX5_CMD_OP_ALLOC_XRCD:
355 return MLX5_GET(alloc_xrcd_out, out, xrcd);
356 case MLX5_CMD_OP_CREATE_PSV:
357 return MLX5_GET(create_psv_out, out, psv0_index);
358 default:
359 /* The entry must match to one of the devx_is_obj_create_cmd */
360 WARN_ON(true);
361 return 0;
362 }
363}
364
365static u64 devx_get_obj_id(const void *in)
366{
367 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
368 u64 obj_id;
369
370 switch (opcode) {
371 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
372 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
373 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
374 MLX5_GET(general_obj_in_cmd_hdr, in,
375 obj_type) << 16,
376 MLX5_GET(general_obj_in_cmd_hdr, in,
377 obj_id));
378 break;
379 case MLX5_CMD_OP_QUERY_MKEY:
380 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
381 MLX5_GET(query_mkey_in, in,
382 mkey_index));
383 break;
384 case MLX5_CMD_OP_QUERY_CQ:
385 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
386 MLX5_GET(query_cq_in, in, cqn));
387 break;
388 case MLX5_CMD_OP_MODIFY_CQ:
389 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
390 MLX5_GET(modify_cq_in, in, cqn));
391 break;
392 case MLX5_CMD_OP_QUERY_SQ:
393 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
394 MLX5_GET(query_sq_in, in, sqn));
395 break;
396 case MLX5_CMD_OP_MODIFY_SQ:
397 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
398 MLX5_GET(modify_sq_in, in, sqn));
399 break;
400 case MLX5_CMD_OP_QUERY_RQ:
401 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
402 MLX5_GET(query_rq_in, in, rqn));
403 break;
404 case MLX5_CMD_OP_MODIFY_RQ:
405 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
406 MLX5_GET(modify_rq_in, in, rqn));
407 break;
408 case MLX5_CMD_OP_QUERY_RMP:
409 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
410 MLX5_GET(query_rmp_in, in, rmpn));
411 break;
412 case MLX5_CMD_OP_MODIFY_RMP:
413 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
414 MLX5_GET(modify_rmp_in, in, rmpn));
415 break;
416 case MLX5_CMD_OP_QUERY_RQT:
417 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
418 MLX5_GET(query_rqt_in, in, rqtn));
419 break;
420 case MLX5_CMD_OP_MODIFY_RQT:
421 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
422 MLX5_GET(modify_rqt_in, in, rqtn));
423 break;
424 case MLX5_CMD_OP_QUERY_TIR:
425 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
426 MLX5_GET(query_tir_in, in, tirn));
427 break;
428 case MLX5_CMD_OP_MODIFY_TIR:
429 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
430 MLX5_GET(modify_tir_in, in, tirn));
431 break;
432 case MLX5_CMD_OP_QUERY_TIS:
433 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
434 MLX5_GET(query_tis_in, in, tisn));
435 break;
436 case MLX5_CMD_OP_MODIFY_TIS:
437 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
438 MLX5_GET(modify_tis_in, in, tisn));
439 break;
440 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
441 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
442 MLX5_GET(query_flow_table_in, in,
443 table_id));
444 break;
445 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
446 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
447 MLX5_GET(modify_flow_table_in, in,
448 table_id));
449 break;
450 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
451 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
452 MLX5_GET(query_flow_group_in, in,
453 group_id));
454 break;
455 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
456 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
457 MLX5_GET(query_fte_in, in,
458 flow_index));
459 break;
460 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
461 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
462 MLX5_GET(set_fte_in, in, flow_index));
463 break;
464 case MLX5_CMD_OP_QUERY_Q_COUNTER:
465 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
466 MLX5_GET(query_q_counter_in, in,
467 counter_set_id));
468 break;
469 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
470 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
471 MLX5_GET(query_flow_counter_in, in,
472 flow_counter_id));
473 break;
474 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
475 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
476 MLX5_GET(query_modify_header_context_in,
477 in, modify_header_id));
478 break;
479 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
480 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
481 MLX5_GET(query_scheduling_element_in,
482 in, scheduling_element_id));
483 break;
484 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
485 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
486 MLX5_GET(modify_scheduling_element_in,
487 in, scheduling_element_id));
488 break;
489 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
490 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
491 MLX5_GET(add_vxlan_udp_dport_in, in,
492 vxlan_udp_port));
493 break;
494 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
495 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
496 MLX5_GET(query_l2_table_entry_in, in,
497 table_index));
498 break;
499 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
500 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
501 MLX5_GET(set_l2_table_entry_in, in,
502 table_index));
503 break;
504 case MLX5_CMD_OP_QUERY_QP:
505 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
506 MLX5_GET(query_qp_in, in, qpn));
507 break;
508 case MLX5_CMD_OP_RST2INIT_QP:
509 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
510 MLX5_GET(rst2init_qp_in, in, qpn));
511 break;
512 case MLX5_CMD_OP_INIT2INIT_QP:
513 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
514 MLX5_GET(init2init_qp_in, in, qpn));
515 break;
516 case MLX5_CMD_OP_INIT2RTR_QP:
517 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
518 MLX5_GET(init2rtr_qp_in, in, qpn));
519 break;
520 case MLX5_CMD_OP_RTR2RTS_QP:
521 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
522 MLX5_GET(rtr2rts_qp_in, in, qpn));
523 break;
524 case MLX5_CMD_OP_RTS2RTS_QP:
525 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
526 MLX5_GET(rts2rts_qp_in, in, qpn));
527 break;
528 case MLX5_CMD_OP_SQERR2RTS_QP:
529 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
530 MLX5_GET(sqerr2rts_qp_in, in, qpn));
531 break;
532 case MLX5_CMD_OP_2ERR_QP:
533 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
534 MLX5_GET(qp_2err_in, in, qpn));
535 break;
536 case MLX5_CMD_OP_2RST_QP:
537 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
538 MLX5_GET(qp_2rst_in, in, qpn));
539 break;
540 case MLX5_CMD_OP_QUERY_DCT:
541 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
542 MLX5_GET(query_dct_in, in, dctn));
543 break;
544 case MLX5_CMD_OP_QUERY_XRQ:
545 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
546 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
547 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
548 MLX5_GET(query_xrq_in, in, xrqn));
549 break;
550 case MLX5_CMD_OP_QUERY_XRC_SRQ:
551 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
552 MLX5_GET(query_xrc_srq_in, in,
553 xrc_srqn));
554 break;
555 case MLX5_CMD_OP_ARM_XRC_SRQ:
556 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
557 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
558 break;
559 case MLX5_CMD_OP_QUERY_SRQ:
560 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
561 MLX5_GET(query_srq_in, in, srqn));
562 break;
563 case MLX5_CMD_OP_ARM_RQ:
564 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
565 MLX5_GET(arm_rq_in, in, srq_number));
566 break;
567 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
568 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
569 MLX5_GET(drain_dct_in, in, dctn));
570 break;
571 case MLX5_CMD_OP_ARM_XRQ:
572 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
573 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
574 case MLX5_CMD_OP_MODIFY_XRQ:
575 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
576 MLX5_GET(arm_xrq_in, in, xrqn));
577 break;
578 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
579 obj_id = get_enc_obj_id
580 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
581 MLX5_GET(query_packet_reformat_context_in,
582 in, packet_reformat_id));
583 break;
584 default:
585 obj_id = 0;
586 }
587
588 return obj_id;
589}
590
591static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
592 struct ib_uobject *uobj, const void *in)
593{
594 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
595 u64 obj_id = devx_get_obj_id(in);
596
597 if (!obj_id)
598 return false;
599
600 switch (uobj_get_object_id(uobj)) {
601 case UVERBS_OBJECT_CQ:
602 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
603 to_mcq(uobj->object)->mcq.cqn) ==
604 obj_id;
605
606 case UVERBS_OBJECT_SRQ:
607 {
608 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
609 u16 opcode;
610
611 switch (srq->common.res) {
612 case MLX5_RES_XSRQ:
613 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
614 break;
615 case MLX5_RES_XRQ:
616 opcode = MLX5_CMD_OP_CREATE_XRQ;
617 break;
618 default:
619 if (!dev->mdev->issi)
620 opcode = MLX5_CMD_OP_CREATE_SRQ;
621 else
622 opcode = MLX5_CMD_OP_CREATE_RMP;
623 }
624
625 return get_enc_obj_id(opcode,
626 to_msrq(uobj->object)->msrq.srqn) ==
627 obj_id;
628 }
629
630 case UVERBS_OBJECT_QP:
631 {
632 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
633
634 if (qp->type == IB_QPT_RAW_PACKET ||
635 (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
636 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
637 &qp->raw_packet_qp;
638 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
639 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
640
641 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
642 rq->base.mqp.qpn) == obj_id ||
643 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
644 sq->base.mqp.qpn) == obj_id ||
645 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
646 rq->tirn) == obj_id ||
647 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
648 sq->tisn) == obj_id);
649 }
650
651 if (qp->type == MLX5_IB_QPT_DCT)
652 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
653 qp->dct.mdct.mqp.qpn) == obj_id;
654 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
655 qp->ibqp.qp_num) == obj_id;
656 }
657
658 case UVERBS_OBJECT_WQ:
659 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
660 to_mrwq(uobj->object)->core_qp.qpn) ==
661 obj_id;
662
663 case UVERBS_OBJECT_RWQ_IND_TBL:
664 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
665 to_mrwq_ind_table(uobj->object)->rqtn) ==
666 obj_id;
667
668 case MLX5_IB_OBJECT_DEVX_OBJ:
669 {
670 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
671 struct devx_obj *devx_uobj = uobj->object;
672
673 if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
674 devx_uobj->flow_counter_bulk_size) {
675 u64 end;
676
677 end = devx_uobj->obj_id +
678 devx_uobj->flow_counter_bulk_size;
679 return devx_uobj->obj_id <= obj_id && end > obj_id;
680 }
681
682 return devx_uobj->obj_id == obj_id;
683 }
684
685 default:
686 return false;
687 }
688}
689
690static void devx_set_umem_valid(const void *in)
691{
692 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
693
694 switch (opcode) {
695 case MLX5_CMD_OP_CREATE_MKEY:
696 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
697 break;
698 case MLX5_CMD_OP_CREATE_CQ:
699 {
700 void *cqc;
701
702 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
703 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
704 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
705 break;
706 }
707 case MLX5_CMD_OP_CREATE_QP:
708 {
709 void *qpc;
710
711 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
712 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
713 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
714 break;
715 }
716
717 case MLX5_CMD_OP_CREATE_RQ:
718 {
719 void *rqc, *wq;
720
721 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
722 wq = MLX5_ADDR_OF(rqc, rqc, wq);
723 MLX5_SET(wq, wq, dbr_umem_valid, 1);
724 MLX5_SET(wq, wq, wq_umem_valid, 1);
725 break;
726 }
727
728 case MLX5_CMD_OP_CREATE_SQ:
729 {
730 void *sqc, *wq;
731
732 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
733 wq = MLX5_ADDR_OF(sqc, sqc, wq);
734 MLX5_SET(wq, wq, dbr_umem_valid, 1);
735 MLX5_SET(wq, wq, wq_umem_valid, 1);
736 break;
737 }
738
739 case MLX5_CMD_OP_MODIFY_CQ:
740 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
741 break;
742
743 case MLX5_CMD_OP_CREATE_RMP:
744 {
745 void *rmpc, *wq;
746
747 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
748 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
749 MLX5_SET(wq, wq, dbr_umem_valid, 1);
750 MLX5_SET(wq, wq, wq_umem_valid, 1);
751 break;
752 }
753
754 case MLX5_CMD_OP_CREATE_XRQ:
755 {
756 void *xrqc, *wq;
757
758 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
759 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
760 MLX5_SET(wq, wq, dbr_umem_valid, 1);
761 MLX5_SET(wq, wq, wq_umem_valid, 1);
762 break;
763 }
764
765 case MLX5_CMD_OP_CREATE_XRC_SRQ:
766 {
767 void *xrc_srqc;
768
769 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
770 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
771 xrc_srq_context_entry);
772 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
773 break;
774 }
775
776 default:
777 return;
778 }
779}
780
781static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
782{
783 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
784
785 switch (*opcode) {
786 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
787 case MLX5_CMD_OP_CREATE_MKEY:
788 case MLX5_CMD_OP_CREATE_CQ:
789 case MLX5_CMD_OP_ALLOC_PD:
790 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
791 case MLX5_CMD_OP_CREATE_RMP:
792 case MLX5_CMD_OP_CREATE_SQ:
793 case MLX5_CMD_OP_CREATE_RQ:
794 case MLX5_CMD_OP_CREATE_RQT:
795 case MLX5_CMD_OP_CREATE_TIR:
796 case MLX5_CMD_OP_CREATE_TIS:
797 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
798 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
799 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
800 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
801 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
802 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
803 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
804 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
805 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
806 case MLX5_CMD_OP_CREATE_QP:
807 case MLX5_CMD_OP_CREATE_SRQ:
808 case MLX5_CMD_OP_CREATE_XRC_SRQ:
809 case MLX5_CMD_OP_CREATE_DCT:
810 case MLX5_CMD_OP_CREATE_XRQ:
811 case MLX5_CMD_OP_ATTACH_TO_MCG:
812 case MLX5_CMD_OP_ALLOC_XRCD:
813 return true;
814 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
815 {
816 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
817 if (op_mod == 0)
818 return true;
819 return false;
820 }
821 case MLX5_CMD_OP_CREATE_PSV:
822 {
823 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
824
825 if (num_psv == 1)
826 return true;
827 return false;
828 }
829 default:
830 return false;
831 }
832}
833
834static bool devx_is_obj_modify_cmd(const void *in)
835{
836 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
837
838 switch (opcode) {
839 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
840 case MLX5_CMD_OP_MODIFY_CQ:
841 case MLX5_CMD_OP_MODIFY_RMP:
842 case MLX5_CMD_OP_MODIFY_SQ:
843 case MLX5_CMD_OP_MODIFY_RQ:
844 case MLX5_CMD_OP_MODIFY_RQT:
845 case MLX5_CMD_OP_MODIFY_TIR:
846 case MLX5_CMD_OP_MODIFY_TIS:
847 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
848 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
849 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
850 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
851 case MLX5_CMD_OP_RST2INIT_QP:
852 case MLX5_CMD_OP_INIT2RTR_QP:
853 case MLX5_CMD_OP_INIT2INIT_QP:
854 case MLX5_CMD_OP_RTR2RTS_QP:
855 case MLX5_CMD_OP_RTS2RTS_QP:
856 case MLX5_CMD_OP_SQERR2RTS_QP:
857 case MLX5_CMD_OP_2ERR_QP:
858 case MLX5_CMD_OP_2RST_QP:
859 case MLX5_CMD_OP_ARM_XRC_SRQ:
860 case MLX5_CMD_OP_ARM_RQ:
861 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
862 case MLX5_CMD_OP_ARM_XRQ:
863 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
864 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
865 case MLX5_CMD_OP_MODIFY_XRQ:
866 return true;
867 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
868 {
869 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
870
871 if (op_mod == 1)
872 return true;
873 return false;
874 }
875 default:
876 return false;
877 }
878}
879
880static bool devx_is_obj_query_cmd(const void *in)
881{
882 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
883
884 switch (opcode) {
885 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
886 case MLX5_CMD_OP_QUERY_MKEY:
887 case MLX5_CMD_OP_QUERY_CQ:
888 case MLX5_CMD_OP_QUERY_RMP:
889 case MLX5_CMD_OP_QUERY_SQ:
890 case MLX5_CMD_OP_QUERY_RQ:
891 case MLX5_CMD_OP_QUERY_RQT:
892 case MLX5_CMD_OP_QUERY_TIR:
893 case MLX5_CMD_OP_QUERY_TIS:
894 case MLX5_CMD_OP_QUERY_Q_COUNTER:
895 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
896 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
897 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
898 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
899 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
900 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
901 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
902 case MLX5_CMD_OP_QUERY_QP:
903 case MLX5_CMD_OP_QUERY_SRQ:
904 case MLX5_CMD_OP_QUERY_XRC_SRQ:
905 case MLX5_CMD_OP_QUERY_DCT:
906 case MLX5_CMD_OP_QUERY_XRQ:
907 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
908 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
909 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
910 return true;
911 default:
912 return false;
913 }
914}
915
916static bool devx_is_whitelist_cmd(void *in)
917{
918 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
919
920 switch (opcode) {
921 case MLX5_CMD_OP_QUERY_HCA_CAP:
922 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
923 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
924 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
925 return true;
926 default:
927 return false;
928 }
929}
930
931static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
932{
933 if (devx_is_whitelist_cmd(cmd_in)) {
934 struct mlx5_ib_dev *dev;
935
936 if (c->devx_uid)
937 return c->devx_uid;
938
939 dev = to_mdev(c->ibucontext.device);
940 if (dev->devx_whitelist_uid)
941 return dev->devx_whitelist_uid;
942
943 return -EOPNOTSUPP;
944 }
945
946 if (!c->devx_uid)
947 return -EINVAL;
948
949 return c->devx_uid;
950}
951
952static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
953{
954 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
955
956 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
957 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
958 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
959 (opcode >= MLX5_CMD_OP_GENERAL_START &&
960 opcode < MLX5_CMD_OP_GENERAL_END))
961 return true;
962
963 switch (opcode) {
964 case MLX5_CMD_OP_QUERY_HCA_CAP:
965 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
966 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
967 case MLX5_CMD_OP_QUERY_VPORT_STATE:
968 case MLX5_CMD_OP_QUERY_ADAPTER:
969 case MLX5_CMD_OP_QUERY_ISSI:
970 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
971 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
972 case MLX5_CMD_OP_QUERY_VNIC_ENV:
973 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
974 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
975 case MLX5_CMD_OP_NOP:
976 case MLX5_CMD_OP_QUERY_CONG_STATUS:
977 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
978 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
979 case MLX5_CMD_OP_QUERY_LAG:
980 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
981 return true;
982 default:
983 return false;
984 }
985}
986
987static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
988 struct uverbs_attr_bundle *attrs)
989{
990 struct mlx5_ib_ucontext *c;
991 struct mlx5_ib_dev *dev;
992 int user_vector;
993 int dev_eqn;
994 int err;
995
996 if (uverbs_copy_from(&user_vector, attrs,
997 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
998 return -EFAULT;
999
1000 c = devx_ufile2uctx(attrs);
1001 if (IS_ERR(c))
1002 return PTR_ERR(c);
1003 dev = to_mdev(c->ibucontext.device);
1004
1005 err = mlx5_comp_eqn_get(dev->mdev, user_vector, &dev_eqn);
1006 if (err < 0)
1007 return err;
1008
1009 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
1010 &dev_eqn, sizeof(dev_eqn)))
1011 return -EFAULT;
1012
1013 return 0;
1014}
1015
1016/*
1017 *Security note:
1018 * The hardware protection mechanism works like this: Each device object that
1019 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
1020 * the device specification manual) upon its creation. Then upon doorbell,
1021 * hardware fetches the object context for which the doorbell was rang, and
1022 * validates that the UAR through which the DB was rang matches the UAR ID
1023 * of the object.
1024 * If no match the doorbell is silently ignored by the hardware. Of course,
1025 * the user cannot ring a doorbell on a UAR that was not mapped to it.
1026 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
1027 * mailboxes (except tagging them with UID), we expose to the user its UAR
1028 * ID, so it can embed it in these objects in the expected specification
1029 * format. So the only thing the user can do is hurt itself by creating a
1030 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
1031 * may ring a doorbell on its objects.
1032 * The consequence of that will be that another user can schedule a QP/SQ
1033 * of the buggy user for execution (just insert it to the hardware schedule
1034 * queue or arm its CQ for event generation), no further harm is expected.
1035 */
1036static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1037 struct uverbs_attr_bundle *attrs)
1038{
1039 struct mlx5_ib_ucontext *c;
1040 struct mlx5_ib_dev *dev;
1041 u32 user_idx;
1042 s32 dev_idx;
1043
1044 c = devx_ufile2uctx(attrs);
1045 if (IS_ERR(c))
1046 return PTR_ERR(c);
1047 dev = to_mdev(c->ibucontext.device);
1048
1049 if (uverbs_copy_from(&user_idx, attrs,
1050 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1051 return -EFAULT;
1052
1053 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1054 if (dev_idx < 0)
1055 return dev_idx;
1056
1057 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1058 &dev_idx, sizeof(dev_idx)))
1059 return -EFAULT;
1060
1061 return 0;
1062}
1063
1064static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1065 struct uverbs_attr_bundle *attrs)
1066{
1067 struct mlx5_ib_ucontext *c;
1068 struct mlx5_ib_dev *dev;
1069 void *cmd_in = uverbs_attr_get_alloced_ptr(
1070 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1071 int cmd_out_len = uverbs_attr_get_len(attrs,
1072 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1073 void *cmd_out;
1074 int err, err2;
1075 int uid;
1076
1077 c = devx_ufile2uctx(attrs);
1078 if (IS_ERR(c))
1079 return PTR_ERR(c);
1080 dev = to_mdev(c->ibucontext.device);
1081
1082 uid = devx_get_uid(c, cmd_in);
1083 if (uid < 0)
1084 return uid;
1085
1086 /* Only white list of some general HCA commands are allowed for this method. */
1087 if (!devx_is_general_cmd(cmd_in, dev))
1088 return -EINVAL;
1089
1090 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1091 if (IS_ERR(cmd_out))
1092 return PTR_ERR(cmd_out);
1093
1094 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1095 err = mlx5_cmd_do(dev->mdev, cmd_in,
1096 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1097 cmd_out, cmd_out_len);
1098 if (err && err != -EREMOTEIO)
1099 return err;
1100
1101 err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1102 cmd_out_len);
1103
1104 return err2 ?: err;
1105}
1106
1107static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1108 u32 *dinlen,
1109 u32 *obj_id)
1110{
1111 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
1112 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1113
1114 *obj_id = devx_get_created_obj_id(in, out, opcode);
1115 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1116 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1117
1118 switch (opcode) {
1119 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1120 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1121 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1122 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
1123 MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
1124 break;
1125
1126 case MLX5_CMD_OP_CREATE_UMEM:
1127 MLX5_SET(destroy_umem_in, din, opcode,
1128 MLX5_CMD_OP_DESTROY_UMEM);
1129 MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
1130 break;
1131 case MLX5_CMD_OP_CREATE_MKEY:
1132 MLX5_SET(destroy_mkey_in, din, opcode,
1133 MLX5_CMD_OP_DESTROY_MKEY);
1134 MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
1135 break;
1136 case MLX5_CMD_OP_CREATE_CQ:
1137 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1138 MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
1139 break;
1140 case MLX5_CMD_OP_ALLOC_PD:
1141 MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1142 MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
1143 break;
1144 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1145 MLX5_SET(dealloc_transport_domain_in, din, opcode,
1146 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1147 MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
1148 *obj_id);
1149 break;
1150 case MLX5_CMD_OP_CREATE_RMP:
1151 MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1152 MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
1153 break;
1154 case MLX5_CMD_OP_CREATE_SQ:
1155 MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1156 MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
1157 break;
1158 case MLX5_CMD_OP_CREATE_RQ:
1159 MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1160 MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
1161 break;
1162 case MLX5_CMD_OP_CREATE_RQT:
1163 MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1164 MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
1165 break;
1166 case MLX5_CMD_OP_CREATE_TIR:
1167 MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1168 MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1169 break;
1170 case MLX5_CMD_OP_CREATE_TIS:
1171 MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1172 MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
1173 break;
1174 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1175 MLX5_SET(dealloc_q_counter_in, din, opcode,
1176 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1177 MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
1178 break;
1179 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1180 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1181 MLX5_SET(destroy_flow_table_in, din, other_vport,
1182 MLX5_GET(create_flow_table_in, in, other_vport));
1183 MLX5_SET(destroy_flow_table_in, din, vport_number,
1184 MLX5_GET(create_flow_table_in, in, vport_number));
1185 MLX5_SET(destroy_flow_table_in, din, table_type,
1186 MLX5_GET(create_flow_table_in, in, table_type));
1187 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1188 MLX5_SET(destroy_flow_table_in, din, opcode,
1189 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1190 break;
1191 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1192 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1193 MLX5_SET(destroy_flow_group_in, din, other_vport,
1194 MLX5_GET(create_flow_group_in, in, other_vport));
1195 MLX5_SET(destroy_flow_group_in, din, vport_number,
1196 MLX5_GET(create_flow_group_in, in, vport_number));
1197 MLX5_SET(destroy_flow_group_in, din, table_type,
1198 MLX5_GET(create_flow_group_in, in, table_type));
1199 MLX5_SET(destroy_flow_group_in, din, table_id,
1200 MLX5_GET(create_flow_group_in, in, table_id));
1201 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1202 MLX5_SET(destroy_flow_group_in, din, opcode,
1203 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1204 break;
1205 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1206 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1207 MLX5_SET(delete_fte_in, din, other_vport,
1208 MLX5_GET(set_fte_in, in, other_vport));
1209 MLX5_SET(delete_fte_in, din, vport_number,
1210 MLX5_GET(set_fte_in, in, vport_number));
1211 MLX5_SET(delete_fte_in, din, table_type,
1212 MLX5_GET(set_fte_in, in, table_type));
1213 MLX5_SET(delete_fte_in, din, table_id,
1214 MLX5_GET(set_fte_in, in, table_id));
1215 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1216 MLX5_SET(delete_fte_in, din, opcode,
1217 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1218 break;
1219 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1220 MLX5_SET(dealloc_flow_counter_in, din, opcode,
1221 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1222 MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
1223 *obj_id);
1224 break;
1225 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1226 MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
1227 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1228 MLX5_SET(dealloc_packet_reformat_context_in, din,
1229 packet_reformat_id, *obj_id);
1230 break;
1231 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1232 MLX5_SET(dealloc_modify_header_context_in, din, opcode,
1233 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1234 MLX5_SET(dealloc_modify_header_context_in, din,
1235 modify_header_id, *obj_id);
1236 break;
1237 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1238 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1239 MLX5_SET(destroy_scheduling_element_in, din,
1240 scheduling_hierarchy,
1241 MLX5_GET(create_scheduling_element_in, in,
1242 scheduling_hierarchy));
1243 MLX5_SET(destroy_scheduling_element_in, din,
1244 scheduling_element_id, *obj_id);
1245 MLX5_SET(destroy_scheduling_element_in, din, opcode,
1246 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1247 break;
1248 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1249 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1250 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1251 MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
1252 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1253 break;
1254 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1255 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1256 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1257 MLX5_SET(delete_l2_table_entry_in, din, opcode,
1258 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1259 break;
1260 case MLX5_CMD_OP_CREATE_QP:
1261 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1262 MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
1263 break;
1264 case MLX5_CMD_OP_CREATE_SRQ:
1265 MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1266 MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
1267 break;
1268 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1269 MLX5_SET(destroy_xrc_srq_in, din, opcode,
1270 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1271 MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
1272 break;
1273 case MLX5_CMD_OP_CREATE_DCT:
1274 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1275 MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
1276 break;
1277 case MLX5_CMD_OP_CREATE_XRQ:
1278 MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1279 MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
1280 break;
1281 case MLX5_CMD_OP_ATTACH_TO_MCG:
1282 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1283 MLX5_SET(detach_from_mcg_in, din, qpn,
1284 MLX5_GET(attach_to_mcg_in, in, qpn));
1285 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1286 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1287 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1288 MLX5_SET(detach_from_mcg_in, din, opcode,
1289 MLX5_CMD_OP_DETACH_FROM_MCG);
1290 MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
1291 break;
1292 case MLX5_CMD_OP_ALLOC_XRCD:
1293 MLX5_SET(dealloc_xrcd_in, din, opcode,
1294 MLX5_CMD_OP_DEALLOC_XRCD);
1295 MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
1296 break;
1297 case MLX5_CMD_OP_CREATE_PSV:
1298 MLX5_SET(destroy_psv_in, din, opcode,
1299 MLX5_CMD_OP_DESTROY_PSV);
1300 MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
1301 break;
1302 default:
1303 /* The entry must match to one of the devx_is_obj_create_cmd */
1304 WARN_ON(true);
1305 break;
1306 }
1307}
1308
1309static int devx_handle_mkey_indirect(struct devx_obj *obj,
1310 struct mlx5_ib_dev *dev,
1311 void *in, void *out)
1312{
1313 struct mlx5_ib_mkey *mkey = &obj->mkey;
1314 void *mkc;
1315 u8 key;
1316
1317 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1318 key = MLX5_GET(mkc, mkc, mkey_7_0);
1319 mkey->key = mlx5_idx_to_mkey(
1320 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1321 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1322 mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1323 init_waitqueue_head(&mkey->wait);
1324
1325 return mlx5r_store_odp_mkey(dev, mkey);
1326}
1327
1328static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1329 struct devx_obj *obj,
1330 void *in, int in_len)
1331{
1332 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1333 MLX5_FLD_SZ_BYTES(create_mkey_in,
1334 memory_key_mkey_entry);
1335 void *mkc;
1336 u8 access_mode;
1337
1338 if (in_len < min_len)
1339 return -EINVAL;
1340
1341 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1342
1343 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1344 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1345
1346 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1347 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1348 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1349 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1350 return 0;
1351 }
1352
1353 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1354 return 0;
1355}
1356
1357static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1358 struct devx_event_subscription *sub)
1359{
1360 struct devx_event *event;
1361 struct devx_obj_event *xa_val_level2;
1362
1363 if (sub->is_cleaned)
1364 return;
1365
1366 sub->is_cleaned = 1;
1367 list_del_rcu(&sub->xa_list);
1368
1369 if (list_empty(&sub->obj_list))
1370 return;
1371
1372 list_del_rcu(&sub->obj_list);
1373 /* check whether key level 1 for this obj_sub_list is empty */
1374 event = xa_load(&dev->devx_event_table.event_xa,
1375 sub->xa_key_level1);
1376 WARN_ON(!event);
1377
1378 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1379 if (list_empty(&xa_val_level2->obj_sub_list)) {
1380 xa_erase(&event->object_ids,
1381 sub->xa_key_level2);
1382 kfree_rcu(xa_val_level2, rcu);
1383 }
1384}
1385
1386static int devx_obj_cleanup(struct ib_uobject *uobject,
1387 enum rdma_remove_reason why,
1388 struct uverbs_attr_bundle *attrs)
1389{
1390 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1391 struct mlx5_devx_event_table *devx_event_table;
1392 struct devx_obj *obj = uobject->object;
1393 struct devx_event_subscription *sub_entry, *tmp;
1394 struct mlx5_ib_dev *dev;
1395 int ret;
1396
1397 dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1398 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
1399 xa_erase(&obj->ib_dev->odp_mkeys,
1400 mlx5_base_mkey(obj->mkey.key)))
1401 /*
1402 * The pagefault_single_data_segment() does commands against
1403 * the mmkey, we must wait for that to stop before freeing the
1404 * mkey, as another allocation could get the same mkey #.
1405 */
1406 mlx5r_deref_wait_odp_mkey(&obj->mkey);
1407
1408 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1409 ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1410 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1411 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1412 else
1413 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1414 obj->dinlen, out, sizeof(out));
1415 if (ret)
1416 return ret;
1417
1418 devx_event_table = &dev->devx_event_table;
1419
1420 mutex_lock(&devx_event_table->event_xa_lock);
1421 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1422 devx_cleanup_subscription(dev, sub_entry);
1423 mutex_unlock(&devx_event_table->event_xa_lock);
1424
1425 kfree(obj);
1426 return ret;
1427}
1428
1429static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1430{
1431 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1432 struct mlx5_devx_event_table *table;
1433 struct devx_event *event;
1434 struct devx_obj_event *obj_event;
1435 u32 obj_id = mcq->cqn;
1436
1437 table = &obj->ib_dev->devx_event_table;
1438 rcu_read_lock();
1439 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1440 if (!event)
1441 goto out;
1442
1443 obj_event = xa_load(&event->object_ids, obj_id);
1444 if (!obj_event)
1445 goto out;
1446
1447 dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1448out:
1449 rcu_read_unlock();
1450}
1451
1452static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
1453{
1454 if (!MLX5_CAP_GEN(dev->mdev, apu) ||
1455 !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
1456 return false;
1457
1458 return true;
1459}
1460
1461static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1462 struct uverbs_attr_bundle *attrs)
1463{
1464 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1465 int cmd_out_len = uverbs_attr_get_len(attrs,
1466 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1467 int cmd_in_len = uverbs_attr_get_len(attrs,
1468 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1469 void *cmd_out;
1470 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1471 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1472 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1473 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1474 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1475 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1476 struct devx_obj *obj;
1477 u16 obj_type = 0;
1478 int err, err2 = 0;
1479 int uid;
1480 u32 obj_id;
1481 u16 opcode;
1482
1483 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1484 return -EINVAL;
1485
1486 uid = devx_get_uid(c, cmd_in);
1487 if (uid < 0)
1488 return uid;
1489
1490 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1491 return -EINVAL;
1492
1493 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1494 if (IS_ERR(cmd_out))
1495 return PTR_ERR(cmd_out);
1496
1497 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1498 if (!obj)
1499 return -ENOMEM;
1500
1501 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1502 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1503 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1504 if (err)
1505 goto obj_free;
1506 } else {
1507 devx_set_umem_valid(cmd_in);
1508 }
1509
1510 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1511 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1512 err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
1513 cmd_in_len, cmd_out, cmd_out_len);
1514 } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
1515 !is_apu_cq(dev, cmd_in)) {
1516 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1517 obj->core_cq.comp = devx_cq_comp;
1518 err = mlx5_create_cq(dev->mdev, &obj->core_cq,
1519 cmd_in, cmd_in_len, cmd_out,
1520 cmd_out_len);
1521 } else {
1522 err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
1523 cmd_out, cmd_out_len);
1524 }
1525
1526 if (err == -EREMOTEIO)
1527 err2 = uverbs_copy_to(attrs,
1528 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
1529 cmd_out, cmd_out_len);
1530 if (err)
1531 goto obj_free;
1532
1533 if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1534 u32 bulk = MLX5_GET(alloc_flow_counter_in,
1535 cmd_in,
1536 flow_counter_bulk_log_size);
1537
1538 if (bulk)
1539 bulk = 1 << bulk;
1540 else
1541 bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
1542 cmd_in,
1543 flow_counter_bulk);
1544 obj->flow_counter_bulk_size = bulk;
1545 }
1546
1547 uobj->object = obj;
1548 INIT_LIST_HEAD(&obj->event_sub);
1549 obj->ib_dev = dev;
1550 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1551 &obj_id);
1552 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1553
1554 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1555 if (err)
1556 goto obj_destroy;
1557
1558 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1559 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1560 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1561
1562 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1563 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1564 if (err)
1565 goto obj_destroy;
1566 }
1567 return 0;
1568
1569obj_destroy:
1570 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1571 mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
1572 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1573 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1574 else
1575 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1576 sizeof(out));
1577obj_free:
1578 kfree(obj);
1579 return err2 ?: err;
1580}
1581
1582static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1583 struct uverbs_attr_bundle *attrs)
1584{
1585 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1586 int cmd_out_len = uverbs_attr_get_len(attrs,
1587 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1588 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1589 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1590 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1591 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1592 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1593 void *cmd_out;
1594 int err, err2;
1595 int uid;
1596
1597 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1598 return -EINVAL;
1599
1600 uid = devx_get_uid(c, cmd_in);
1601 if (uid < 0)
1602 return uid;
1603
1604 if (!devx_is_obj_modify_cmd(cmd_in))
1605 return -EINVAL;
1606
1607 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1608 return -EINVAL;
1609
1610 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1611 if (IS_ERR(cmd_out))
1612 return PTR_ERR(cmd_out);
1613
1614 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1615 devx_set_umem_valid(cmd_in);
1616
1617 err = mlx5_cmd_do(mdev->mdev, cmd_in,
1618 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1619 cmd_out, cmd_out_len);
1620 if (err && err != -EREMOTEIO)
1621 return err;
1622
1623 err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1624 cmd_out, cmd_out_len);
1625
1626 return err2 ?: err;
1627}
1628
1629static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1630 struct uverbs_attr_bundle *attrs)
1631{
1632 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1633 int cmd_out_len = uverbs_attr_get_len(attrs,
1634 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1635 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1636 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1637 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1638 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1639 void *cmd_out;
1640 int err, err2;
1641 int uid;
1642 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1643
1644 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1645 return -EINVAL;
1646
1647 uid = devx_get_uid(c, cmd_in);
1648 if (uid < 0)
1649 return uid;
1650
1651 if (!devx_is_obj_query_cmd(cmd_in))
1652 return -EINVAL;
1653
1654 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1655 return -EINVAL;
1656
1657 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1658 if (IS_ERR(cmd_out))
1659 return PTR_ERR(cmd_out);
1660
1661 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1662 err = mlx5_cmd_do(mdev->mdev, cmd_in,
1663 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1664 cmd_out, cmd_out_len);
1665 if (err && err != -EREMOTEIO)
1666 return err;
1667
1668 err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1669 cmd_out, cmd_out_len);
1670
1671 return err2 ?: err;
1672}
1673
1674struct devx_async_event_queue {
1675 spinlock_t lock;
1676 wait_queue_head_t poll_wait;
1677 struct list_head event_list;
1678 atomic_t bytes_in_use;
1679 u8 is_destroyed:1;
1680};
1681
1682struct devx_async_cmd_event_file {
1683 struct ib_uobject uobj;
1684 struct devx_async_event_queue ev_queue;
1685 struct mlx5_async_ctx async_ctx;
1686};
1687
1688static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1689{
1690 spin_lock_init(&ev_queue->lock);
1691 INIT_LIST_HEAD(&ev_queue->event_list);
1692 init_waitqueue_head(&ev_queue->poll_wait);
1693 atomic_set(&ev_queue->bytes_in_use, 0);
1694 ev_queue->is_destroyed = 0;
1695}
1696
1697static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1698 struct uverbs_attr_bundle *attrs)
1699{
1700 struct devx_async_cmd_event_file *ev_file;
1701
1702 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1703 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1704 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1705
1706 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1707 uobj);
1708 devx_init_event_queue(&ev_file->ev_queue);
1709 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1710 return 0;
1711}
1712
1713static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1714 struct uverbs_attr_bundle *attrs)
1715{
1716 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1717 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1718 struct devx_async_event_file *ev_file;
1719 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1720 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1721 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1722 u32 flags;
1723 int err;
1724
1725 err = uverbs_get_flags32(&flags, attrs,
1726 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1727 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1728
1729 if (err)
1730 return err;
1731
1732 ev_file = container_of(uobj, struct devx_async_event_file,
1733 uobj);
1734 spin_lock_init(&ev_file->lock);
1735 INIT_LIST_HEAD(&ev_file->event_list);
1736 init_waitqueue_head(&ev_file->poll_wait);
1737 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1738 ev_file->omit_data = 1;
1739 INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1740 ev_file->dev = dev;
1741 get_device(&dev->ib_dev.dev);
1742 return 0;
1743}
1744
1745static void devx_query_callback(int status, struct mlx5_async_work *context)
1746{
1747 struct devx_async_data *async_data =
1748 container_of(context, struct devx_async_data, cb_work);
1749 struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1750 struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1751 unsigned long flags;
1752
1753 /*
1754 * Note that if the struct devx_async_cmd_event_file uobj begins to be
1755 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1756 * routine returns, ensuring that it always remains valid here.
1757 */
1758 spin_lock_irqsave(&ev_queue->lock, flags);
1759 list_add_tail(&async_data->list, &ev_queue->event_list);
1760 spin_unlock_irqrestore(&ev_queue->lock, flags);
1761
1762 wake_up_interruptible(&ev_queue->poll_wait);
1763}
1764
1765#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1766
1767static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1768 struct uverbs_attr_bundle *attrs)
1769{
1770 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1771 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1772 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1773 attrs,
1774 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1775 u16 cmd_out_len;
1776 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1777 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1778 struct ib_uobject *fd_uobj;
1779 int err;
1780 int uid;
1781 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1782 struct devx_async_cmd_event_file *ev_file;
1783 struct devx_async_data *async_data;
1784
1785 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1786 return -EINVAL;
1787
1788 uid = devx_get_uid(c, cmd_in);
1789 if (uid < 0)
1790 return uid;
1791
1792 if (!devx_is_obj_query_cmd(cmd_in))
1793 return -EINVAL;
1794
1795 err = uverbs_get_const(&cmd_out_len, attrs,
1796 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1797 if (err)
1798 return err;
1799
1800 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1801 return -EINVAL;
1802
1803 fd_uobj = uverbs_attr_get_uobject(attrs,
1804 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1805 if (IS_ERR(fd_uobj))
1806 return PTR_ERR(fd_uobj);
1807
1808 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1809 uobj);
1810
1811 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1812 MAX_ASYNC_BYTES_IN_USE) {
1813 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1814 return -EAGAIN;
1815 }
1816
1817 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1818 cmd_out_len), GFP_KERNEL);
1819 if (!async_data) {
1820 err = -ENOMEM;
1821 goto sub_bytes;
1822 }
1823
1824 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1825 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1826 if (err)
1827 goto free_async;
1828
1829 async_data->cmd_out_len = cmd_out_len;
1830 async_data->mdev = mdev;
1831 async_data->ev_file = ev_file;
1832
1833 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1834 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1835 uverbs_attr_get_len(attrs,
1836 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1837 async_data->hdr.out_data,
1838 async_data->cmd_out_len,
1839 devx_query_callback, &async_data->cb_work);
1840
1841 if (err)
1842 goto free_async;
1843
1844 return 0;
1845
1846free_async:
1847 kvfree(async_data);
1848sub_bytes:
1849 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1850 return err;
1851}
1852
1853static void
1854subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1855 u32 key_level1,
1856 bool is_level2,
1857 u32 key_level2)
1858{
1859 struct devx_event *event;
1860 struct devx_obj_event *xa_val_level2;
1861
1862 /* Level 1 is valid for future use, no need to free */
1863 if (!is_level2)
1864 return;
1865
1866 event = xa_load(&devx_event_table->event_xa, key_level1);
1867 WARN_ON(!event);
1868
1869 xa_val_level2 = xa_load(&event->object_ids,
1870 key_level2);
1871 if (list_empty(&xa_val_level2->obj_sub_list)) {
1872 xa_erase(&event->object_ids,
1873 key_level2);
1874 kfree_rcu(xa_val_level2, rcu);
1875 }
1876}
1877
1878static int
1879subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1880 u32 key_level1,
1881 bool is_level2,
1882 u32 key_level2)
1883{
1884 struct devx_obj_event *obj_event;
1885 struct devx_event *event;
1886 int err;
1887
1888 event = xa_load(&devx_event_table->event_xa, key_level1);
1889 if (!event) {
1890 event = kzalloc(sizeof(*event), GFP_KERNEL);
1891 if (!event)
1892 return -ENOMEM;
1893
1894 INIT_LIST_HEAD(&event->unaffiliated_list);
1895 xa_init(&event->object_ids);
1896
1897 err = xa_insert(&devx_event_table->event_xa,
1898 key_level1,
1899 event,
1900 GFP_KERNEL);
1901 if (err) {
1902 kfree(event);
1903 return err;
1904 }
1905 }
1906
1907 if (!is_level2)
1908 return 0;
1909
1910 obj_event = xa_load(&event->object_ids, key_level2);
1911 if (!obj_event) {
1912 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1913 if (!obj_event)
1914 /* Level1 is valid for future use, no need to free */
1915 return -ENOMEM;
1916
1917 err = xa_insert(&event->object_ids,
1918 key_level2,
1919 obj_event,
1920 GFP_KERNEL);
1921 if (err) {
1922 kfree(obj_event);
1923 return err;
1924 }
1925 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1926 }
1927
1928 return 0;
1929}
1930
1931static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1932 struct devx_obj *obj)
1933{
1934 int i;
1935
1936 for (i = 0; i < num_events; i++) {
1937 if (obj) {
1938 if (!is_legacy_obj_event_num(event_type_num_list[i]))
1939 return false;
1940 } else if (!is_legacy_unaffiliated_event_num(
1941 event_type_num_list[i])) {
1942 return false;
1943 }
1944 }
1945
1946 return true;
1947}
1948
1949#define MAX_SUPP_EVENT_NUM 255
1950static bool is_valid_events(struct mlx5_core_dev *dev,
1951 int num_events, u16 *event_type_num_list,
1952 struct devx_obj *obj)
1953{
1954 __be64 *aff_events;
1955 __be64 *unaff_events;
1956 int mask_entry;
1957 int mask_bit;
1958 int i;
1959
1960 if (MLX5_CAP_GEN(dev, event_cap)) {
1961 aff_events = MLX5_CAP_DEV_EVENT(dev,
1962 user_affiliated_events);
1963 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1964 user_unaffiliated_events);
1965 } else {
1966 return is_valid_events_legacy(num_events, event_type_num_list,
1967 obj);
1968 }
1969
1970 for (i = 0; i < num_events; i++) {
1971 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1972 return false;
1973
1974 mask_entry = event_type_num_list[i] / 64;
1975 mask_bit = event_type_num_list[i] % 64;
1976
1977 if (obj) {
1978 /* CQ completion */
1979 if (event_type_num_list[i] == 0)
1980 continue;
1981
1982 if (!(be64_to_cpu(aff_events[mask_entry]) &
1983 (1ull << mask_bit)))
1984 return false;
1985
1986 continue;
1987 }
1988
1989 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1990 (1ull << mask_bit)))
1991 return false;
1992 }
1993
1994 return true;
1995}
1996
1997#define MAX_NUM_EVENTS 16
1998static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1999 struct uverbs_attr_bundle *attrs)
2000{
2001 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
2002 attrs,
2003 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
2004 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2005 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2006 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2007 struct ib_uobject *fd_uobj;
2008 struct devx_obj *obj = NULL;
2009 struct devx_async_event_file *ev_file;
2010 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
2011 u16 *event_type_num_list;
2012 struct devx_event_subscription *event_sub, *tmp_sub;
2013 struct list_head sub_list;
2014 int redirect_fd;
2015 bool use_eventfd = false;
2016 int num_events;
2017 u16 obj_type = 0;
2018 u64 cookie = 0;
2019 u32 obj_id = 0;
2020 int err;
2021 int i;
2022
2023 if (!c->devx_uid)
2024 return -EINVAL;
2025
2026 if (!IS_ERR(devx_uobj)) {
2027 obj = (struct devx_obj *)devx_uobj->object;
2028 if (obj)
2029 obj_id = get_dec_obj_id(obj->obj_id);
2030 }
2031
2032 fd_uobj = uverbs_attr_get_uobject(attrs,
2033 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
2034 if (IS_ERR(fd_uobj))
2035 return PTR_ERR(fd_uobj);
2036
2037 ev_file = container_of(fd_uobj, struct devx_async_event_file,
2038 uobj);
2039
2040 if (uverbs_attr_is_valid(attrs,
2041 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
2042 err = uverbs_copy_from(&redirect_fd, attrs,
2043 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
2044 if (err)
2045 return err;
2046
2047 use_eventfd = true;
2048 }
2049
2050 if (uverbs_attr_is_valid(attrs,
2051 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
2052 if (use_eventfd)
2053 return -EINVAL;
2054
2055 err = uverbs_copy_from(&cookie, attrs,
2056 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
2057 if (err)
2058 return err;
2059 }
2060
2061 num_events = uverbs_attr_ptr_get_array_size(
2062 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2063 sizeof(u16));
2064
2065 if (num_events < 0)
2066 return num_events;
2067
2068 if (num_events > MAX_NUM_EVENTS)
2069 return -EINVAL;
2070
2071 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
2072 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2073
2074 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2075 return -EINVAL;
2076
2077 INIT_LIST_HEAD(&sub_list);
2078
2079 /* Protect from concurrent subscriptions to same XA entries to allow
2080 * both to succeed
2081 */
2082 mutex_lock(&devx_event_table->event_xa_lock);
2083 for (i = 0; i < num_events; i++) {
2084 u32 key_level1;
2085
2086 if (obj)
2087 obj_type = get_dec_obj_type(obj,
2088 event_type_num_list[i]);
2089 key_level1 = event_type_num_list[i] | obj_type << 16;
2090
2091 err = subscribe_event_xa_alloc(devx_event_table,
2092 key_level1,
2093 obj,
2094 obj_id);
2095 if (err)
2096 goto err;
2097
2098 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2099 if (!event_sub) {
2100 err = -ENOMEM;
2101 goto err;
2102 }
2103
2104 list_add_tail(&event_sub->event_list, &sub_list);
2105 uverbs_uobject_get(&ev_file->uobj);
2106 if (use_eventfd) {
2107 event_sub->eventfd =
2108 eventfd_ctx_fdget(redirect_fd);
2109
2110 if (IS_ERR(event_sub->eventfd)) {
2111 err = PTR_ERR(event_sub->eventfd);
2112 event_sub->eventfd = NULL;
2113 goto err;
2114 }
2115 }
2116
2117 event_sub->cookie = cookie;
2118 event_sub->ev_file = ev_file;
2119 /* May be needed upon cleanup the devx object/subscription */
2120 event_sub->xa_key_level1 = key_level1;
2121 event_sub->xa_key_level2 = obj_id;
2122 INIT_LIST_HEAD(&event_sub->obj_list);
2123 }
2124
2125 /* Once all the allocations and the XA data insertions were done we
2126 * can go ahead and add all the subscriptions to the relevant lists
2127 * without concern of a failure.
2128 */
2129 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2130 struct devx_event *event;
2131 struct devx_obj_event *obj_event;
2132
2133 list_del_init(&event_sub->event_list);
2134
2135 spin_lock_irq(&ev_file->lock);
2136 list_add_tail_rcu(&event_sub->file_list,
2137 &ev_file->subscribed_events_list);
2138 spin_unlock_irq(&ev_file->lock);
2139
2140 event = xa_load(&devx_event_table->event_xa,
2141 event_sub->xa_key_level1);
2142 WARN_ON(!event);
2143
2144 if (!obj) {
2145 list_add_tail_rcu(&event_sub->xa_list,
2146 &event->unaffiliated_list);
2147 continue;
2148 }
2149
2150 obj_event = xa_load(&event->object_ids, obj_id);
2151 WARN_ON(!obj_event);
2152 list_add_tail_rcu(&event_sub->xa_list,
2153 &obj_event->obj_sub_list);
2154 list_add_tail_rcu(&event_sub->obj_list,
2155 &obj->event_sub);
2156 }
2157
2158 mutex_unlock(&devx_event_table->event_xa_lock);
2159 return 0;
2160
2161err:
2162 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2163 list_del(&event_sub->event_list);
2164
2165 subscribe_event_xa_dealloc(devx_event_table,
2166 event_sub->xa_key_level1,
2167 obj,
2168 obj_id);
2169
2170 if (event_sub->eventfd)
2171 eventfd_ctx_put(event_sub->eventfd);
2172 uverbs_uobject_put(&event_sub->ev_file->uobj);
2173 kfree(event_sub);
2174 }
2175
2176 mutex_unlock(&devx_event_table->event_xa_lock);
2177 return err;
2178}
2179
2180static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2181 struct uverbs_attr_bundle *attrs,
2182 struct devx_umem *obj, u32 access_flags)
2183{
2184 u64 addr;
2185 size_t size;
2186 int err;
2187
2188 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2189 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2190 return -EFAULT;
2191
2192 err = ib_check_mr_access(&dev->ib_dev, access_flags);
2193 if (err)
2194 return err;
2195
2196 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
2197 struct ib_umem_dmabuf *umem_dmabuf;
2198 int dmabuf_fd;
2199
2200 err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
2201 MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
2202 if (err)
2203 return -EFAULT;
2204
2205 umem_dmabuf = ib_umem_dmabuf_get_pinned(
2206 &dev->ib_dev, addr, size, dmabuf_fd, access_flags);
2207 if (IS_ERR(umem_dmabuf))
2208 return PTR_ERR(umem_dmabuf);
2209 obj->umem = &umem_dmabuf->umem;
2210 } else {
2211 obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
2212 if (IS_ERR(obj->umem))
2213 return PTR_ERR(obj->umem);
2214 }
2215 return 0;
2216}
2217
2218static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
2219 unsigned long pgsz_bitmap)
2220{
2221 unsigned long page_size;
2222
2223 /* Don't bother checking larger page sizes as offset must be zero and
2224 * total DEVX umem length must be equal to total umem length.
2225 */
2226 pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
2227 PAGE_SHIFT),
2228 MLX5_ADAPTER_PAGE_SHIFT);
2229 if (!pgsz_bitmap)
2230 return 0;
2231
2232 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
2233 if (!page_size)
2234 return 0;
2235
2236 /* If the page_size is less than the CPU page size then we can use the
2237 * offset and create a umem which is a subset of the page list.
2238 * For larger page sizes we can't be sure the DMA list reflects the
2239 * VA so we must ensure that the umem extent is exactly equal to the
2240 * page list. Reduce the page size until one of these cases is true.
2241 */
2242 while ((ib_umem_dma_offset(umem, page_size) != 0 ||
2243 (umem->length % page_size) != 0) &&
2244 page_size > PAGE_SIZE)
2245 page_size /= 2;
2246
2247 return page_size;
2248}
2249
2250static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
2251 struct uverbs_attr_bundle *attrs,
2252 struct devx_umem *obj,
2253 struct devx_umem_reg_cmd *cmd,
2254 int access)
2255{
2256 unsigned long pgsz_bitmap;
2257 unsigned int page_size;
2258 __be64 *mtt;
2259 void *umem;
2260 int ret;
2261
2262 /*
2263 * If the user does not pass in pgsz_bitmap then the user promises not
2264 * to use umem_offset!=0 in any commands that allocate on top of the
2265 * umem.
2266 *
2267 * If the user wants to use a umem_offset then it must pass in
2268 * pgsz_bitmap which guides the maximum page size and thus maximum
2269 * object alignment inside the umem. See the PRM.
2270 *
2271 * Users are not allowed to use IOVA here, mkeys are not supported on
2272 * umem.
2273 */
2274 ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
2275 MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2276 GENMASK_ULL(63,
2277 min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
2278 if (ret)
2279 return ret;
2280
2281 page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
2282 if (!page_size)
2283 return -EINVAL;
2284
2285 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2286 (MLX5_ST_SZ_BYTES(mtt) *
2287 ib_umem_num_dma_blocks(obj->umem, page_size));
2288 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2289 if (IS_ERR(cmd->in))
2290 return PTR_ERR(cmd->in);
2291
2292 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2293 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2294
2295 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2296 MLX5_SET64(umem, umem, num_of_mtt,
2297 ib_umem_num_dma_blocks(obj->umem, page_size));
2298 MLX5_SET(umem, umem, log_page_size,
2299 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
2300 MLX5_SET(umem, umem, page_offset,
2301 ib_umem_dma_offset(obj->umem, page_size));
2302
2303 if (mlx5_umem_needs_ats(dev, obj->umem, access))
2304 MLX5_SET(umem, umem, ats, 1);
2305
2306 mlx5_ib_populate_pas(obj->umem, page_size, mtt,
2307 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2308 MLX5_IB_MTT_READ);
2309 return 0;
2310}
2311
2312static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2313 struct uverbs_attr_bundle *attrs)
2314{
2315 struct devx_umem_reg_cmd cmd;
2316 struct devx_umem *obj;
2317 struct ib_uobject *uobj = uverbs_attr_get_uobject(
2318 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2319 u32 obj_id;
2320 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2321 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2322 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2323 int access_flags;
2324 int err;
2325
2326 if (!c->devx_uid)
2327 return -EINVAL;
2328
2329 err = uverbs_get_flags32(&access_flags, attrs,
2330 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2331 IB_ACCESS_LOCAL_WRITE |
2332 IB_ACCESS_REMOTE_WRITE |
2333 IB_ACCESS_REMOTE_READ |
2334 IB_ACCESS_RELAXED_ORDERING);
2335 if (err)
2336 return err;
2337
2338 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2339 if (!obj)
2340 return -ENOMEM;
2341
2342 err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
2343 if (err)
2344 goto err_obj_free;
2345
2346 err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
2347 if (err)
2348 goto err_umem_release;
2349
2350 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2351 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2352 sizeof(cmd.out));
2353 if (err)
2354 goto err_umem_release;
2355
2356 obj->mdev = dev->mdev;
2357 uobj->object = obj;
2358 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2359 uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2360
2361 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
2362 sizeof(obj_id));
2363 return err;
2364
2365err_umem_release:
2366 ib_umem_release(obj->umem);
2367err_obj_free:
2368 kfree(obj);
2369 return err;
2370}
2371
2372static int devx_umem_cleanup(struct ib_uobject *uobject,
2373 enum rdma_remove_reason why,
2374 struct uverbs_attr_bundle *attrs)
2375{
2376 struct devx_umem *obj = uobject->object;
2377 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2378 int err;
2379
2380 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2381 if (err)
2382 return err;
2383
2384 ib_umem_release(obj->umem);
2385 kfree(obj);
2386 return 0;
2387}
2388
2389static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2390 unsigned long event_type)
2391{
2392 __be64 *unaff_events;
2393 int mask_entry;
2394 int mask_bit;
2395
2396 if (!MLX5_CAP_GEN(dev, event_cap))
2397 return is_legacy_unaffiliated_event_num(event_type);
2398
2399 unaff_events = MLX5_CAP_DEV_EVENT(dev,
2400 user_unaffiliated_events);
2401 WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2402
2403 mask_entry = event_type / 64;
2404 mask_bit = event_type % 64;
2405
2406 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2407 return false;
2408
2409 return true;
2410}
2411
2412static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2413{
2414 struct mlx5_eqe *eqe = data;
2415 u32 obj_id = 0;
2416
2417 switch (event_type) {
2418 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2419 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2420 case MLX5_EVENT_TYPE_PATH_MIG:
2421 case MLX5_EVENT_TYPE_COMM_EST:
2422 case MLX5_EVENT_TYPE_SQ_DRAINED:
2423 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2424 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2425 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2426 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2427 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2428 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2429 break;
2430 case MLX5_EVENT_TYPE_XRQ_ERROR:
2431 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2432 break;
2433 case MLX5_EVENT_TYPE_DCT_DRAINED:
2434 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2435 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2436 break;
2437 case MLX5_EVENT_TYPE_CQ_ERROR:
2438 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2439 break;
2440 default:
2441 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2442 break;
2443 }
2444
2445 return obj_id;
2446}
2447
2448static int deliver_event(struct devx_event_subscription *event_sub,
2449 const void *data)
2450{
2451 struct devx_async_event_file *ev_file;
2452 struct devx_async_event_data *event_data;
2453 unsigned long flags;
2454
2455 ev_file = event_sub->ev_file;
2456
2457 if (ev_file->omit_data) {
2458 spin_lock_irqsave(&ev_file->lock, flags);
2459 if (!list_empty(&event_sub->event_list) ||
2460 ev_file->is_destroyed) {
2461 spin_unlock_irqrestore(&ev_file->lock, flags);
2462 return 0;
2463 }
2464
2465 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2466 spin_unlock_irqrestore(&ev_file->lock, flags);
2467 wake_up_interruptible(&ev_file->poll_wait);
2468 return 0;
2469 }
2470
2471 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2472 GFP_ATOMIC);
2473 if (!event_data) {
2474 spin_lock_irqsave(&ev_file->lock, flags);
2475 ev_file->is_overflow_err = 1;
2476 spin_unlock_irqrestore(&ev_file->lock, flags);
2477 return -ENOMEM;
2478 }
2479
2480 event_data->hdr.cookie = event_sub->cookie;
2481 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2482
2483 spin_lock_irqsave(&ev_file->lock, flags);
2484 if (!ev_file->is_destroyed)
2485 list_add_tail(&event_data->list, &ev_file->event_list);
2486 else
2487 kfree(event_data);
2488 spin_unlock_irqrestore(&ev_file->lock, flags);
2489 wake_up_interruptible(&ev_file->poll_wait);
2490
2491 return 0;
2492}
2493
2494static void dispatch_event_fd(struct list_head *fd_list,
2495 const void *data)
2496{
2497 struct devx_event_subscription *item;
2498
2499 list_for_each_entry_rcu(item, fd_list, xa_list) {
2500 if (item->eventfd)
2501 eventfd_signal(item->eventfd);
2502 else
2503 deliver_event(item, data);
2504 }
2505}
2506
2507static int devx_event_notifier(struct notifier_block *nb,
2508 unsigned long event_type, void *data)
2509{
2510 struct mlx5_devx_event_table *table;
2511 struct mlx5_ib_dev *dev;
2512 struct devx_event *event;
2513 struct devx_obj_event *obj_event;
2514 u16 obj_type = 0;
2515 bool is_unaffiliated;
2516 u32 obj_id;
2517
2518 /* Explicit filtering to kernel events which may occur frequently */
2519 if (event_type == MLX5_EVENT_TYPE_CMD ||
2520 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2521 return NOTIFY_OK;
2522
2523 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2524 dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2525 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2526
2527 if (!is_unaffiliated)
2528 obj_type = get_event_obj_type(event_type, data);
2529
2530 rcu_read_lock();
2531 event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2532 if (!event) {
2533 rcu_read_unlock();
2534 return NOTIFY_DONE;
2535 }
2536
2537 if (is_unaffiliated) {
2538 dispatch_event_fd(&event->unaffiliated_list, data);
2539 rcu_read_unlock();
2540 return NOTIFY_OK;
2541 }
2542
2543 obj_id = devx_get_obj_id_from_event(event_type, data);
2544 obj_event = xa_load(&event->object_ids, obj_id);
2545 if (!obj_event) {
2546 rcu_read_unlock();
2547 return NOTIFY_DONE;
2548 }
2549
2550 dispatch_event_fd(&obj_event->obj_sub_list, data);
2551
2552 rcu_read_unlock();
2553 return NOTIFY_OK;
2554}
2555
2556int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
2557{
2558 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2559 int uid;
2560
2561 uid = mlx5_ib_devx_create(dev, false);
2562 if (uid > 0) {
2563 dev->devx_whitelist_uid = uid;
2564 xa_init(&table->event_xa);
2565 mutex_init(&table->event_xa_lock);
2566 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2567 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2568 }
2569
2570 return 0;
2571}
2572
2573void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
2574{
2575 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2576 struct devx_event_subscription *sub, *tmp;
2577 struct devx_event *event;
2578 void *entry;
2579 unsigned long id;
2580
2581 if (dev->devx_whitelist_uid) {
2582 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2583 mutex_lock(&dev->devx_event_table.event_xa_lock);
2584 xa_for_each(&table->event_xa, id, entry) {
2585 event = entry;
2586 list_for_each_entry_safe(
2587 sub, tmp, &event->unaffiliated_list, xa_list)
2588 devx_cleanup_subscription(dev, sub);
2589 kfree(entry);
2590 }
2591 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2592 xa_destroy(&table->event_xa);
2593
2594 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
2595 }
2596}
2597
2598static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2599 size_t count, loff_t *pos)
2600{
2601 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2602 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2603 struct devx_async_data *event;
2604 int ret = 0;
2605 size_t eventsz;
2606
2607 spin_lock_irq(&ev_queue->lock);
2608
2609 while (list_empty(&ev_queue->event_list)) {
2610 spin_unlock_irq(&ev_queue->lock);
2611
2612 if (filp->f_flags & O_NONBLOCK)
2613 return -EAGAIN;
2614
2615 if (wait_event_interruptible(
2616 ev_queue->poll_wait,
2617 (!list_empty(&ev_queue->event_list) ||
2618 ev_queue->is_destroyed))) {
2619 return -ERESTARTSYS;
2620 }
2621
2622 spin_lock_irq(&ev_queue->lock);
2623 if (ev_queue->is_destroyed) {
2624 spin_unlock_irq(&ev_queue->lock);
2625 return -EIO;
2626 }
2627 }
2628
2629 event = list_entry(ev_queue->event_list.next,
2630 struct devx_async_data, list);
2631 eventsz = event->cmd_out_len +
2632 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2633
2634 if (eventsz > count) {
2635 spin_unlock_irq(&ev_queue->lock);
2636 return -ENOSPC;
2637 }
2638
2639 list_del(ev_queue->event_list.next);
2640 spin_unlock_irq(&ev_queue->lock);
2641
2642 if (copy_to_user(buf, &event->hdr, eventsz))
2643 ret = -EFAULT;
2644 else
2645 ret = eventsz;
2646
2647 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2648 kvfree(event);
2649 return ret;
2650}
2651
2652static __poll_t devx_async_cmd_event_poll(struct file *filp,
2653 struct poll_table_struct *wait)
2654{
2655 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2656 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2657 __poll_t pollflags = 0;
2658
2659 poll_wait(filp, &ev_queue->poll_wait, wait);
2660
2661 spin_lock_irq(&ev_queue->lock);
2662 if (ev_queue->is_destroyed)
2663 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2664 else if (!list_empty(&ev_queue->event_list))
2665 pollflags = EPOLLIN | EPOLLRDNORM;
2666 spin_unlock_irq(&ev_queue->lock);
2667
2668 return pollflags;
2669}
2670
2671static const struct file_operations devx_async_cmd_event_fops = {
2672 .owner = THIS_MODULE,
2673 .read = devx_async_cmd_event_read,
2674 .poll = devx_async_cmd_event_poll,
2675 .release = uverbs_uobject_fd_release,
2676 .llseek = no_llseek,
2677};
2678
2679static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2680 size_t count, loff_t *pos)
2681{
2682 struct devx_async_event_file *ev_file = filp->private_data;
2683 struct devx_event_subscription *event_sub;
2684 struct devx_async_event_data *event;
2685 int ret = 0;
2686 size_t eventsz;
2687 bool omit_data;
2688 void *event_data;
2689
2690 omit_data = ev_file->omit_data;
2691
2692 spin_lock_irq(&ev_file->lock);
2693
2694 if (ev_file->is_overflow_err) {
2695 ev_file->is_overflow_err = 0;
2696 spin_unlock_irq(&ev_file->lock);
2697 return -EOVERFLOW;
2698 }
2699
2700
2701 while (list_empty(&ev_file->event_list)) {
2702 spin_unlock_irq(&ev_file->lock);
2703
2704 if (filp->f_flags & O_NONBLOCK)
2705 return -EAGAIN;
2706
2707 if (wait_event_interruptible(ev_file->poll_wait,
2708 (!list_empty(&ev_file->event_list) ||
2709 ev_file->is_destroyed))) {
2710 return -ERESTARTSYS;
2711 }
2712
2713 spin_lock_irq(&ev_file->lock);
2714 if (ev_file->is_destroyed) {
2715 spin_unlock_irq(&ev_file->lock);
2716 return -EIO;
2717 }
2718 }
2719
2720 if (omit_data) {
2721 event_sub = list_first_entry(&ev_file->event_list,
2722 struct devx_event_subscription,
2723 event_list);
2724 eventsz = sizeof(event_sub->cookie);
2725 event_data = &event_sub->cookie;
2726 } else {
2727 event = list_first_entry(&ev_file->event_list,
2728 struct devx_async_event_data, list);
2729 eventsz = sizeof(struct mlx5_eqe) +
2730 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2731 event_data = &event->hdr;
2732 }
2733
2734 if (eventsz > count) {
2735 spin_unlock_irq(&ev_file->lock);
2736 return -EINVAL;
2737 }
2738
2739 if (omit_data)
2740 list_del_init(&event_sub->event_list);
2741 else
2742 list_del(&event->list);
2743
2744 spin_unlock_irq(&ev_file->lock);
2745
2746 if (copy_to_user(buf, event_data, eventsz))
2747 /* This points to an application issue, not a kernel concern */
2748 ret = -EFAULT;
2749 else
2750 ret = eventsz;
2751
2752 if (!omit_data)
2753 kfree(event);
2754 return ret;
2755}
2756
2757static __poll_t devx_async_event_poll(struct file *filp,
2758 struct poll_table_struct *wait)
2759{
2760 struct devx_async_event_file *ev_file = filp->private_data;
2761 __poll_t pollflags = 0;
2762
2763 poll_wait(filp, &ev_file->poll_wait, wait);
2764
2765 spin_lock_irq(&ev_file->lock);
2766 if (ev_file->is_destroyed)
2767 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2768 else if (!list_empty(&ev_file->event_list))
2769 pollflags = EPOLLIN | EPOLLRDNORM;
2770 spin_unlock_irq(&ev_file->lock);
2771
2772 return pollflags;
2773}
2774
2775static void devx_free_subscription(struct rcu_head *rcu)
2776{
2777 struct devx_event_subscription *event_sub =
2778 container_of(rcu, struct devx_event_subscription, rcu);
2779
2780 if (event_sub->eventfd)
2781 eventfd_ctx_put(event_sub->eventfd);
2782 uverbs_uobject_put(&event_sub->ev_file->uobj);
2783 kfree(event_sub);
2784}
2785
2786static const struct file_operations devx_async_event_fops = {
2787 .owner = THIS_MODULE,
2788 .read = devx_async_event_read,
2789 .poll = devx_async_event_poll,
2790 .release = uverbs_uobject_fd_release,
2791 .llseek = no_llseek,
2792};
2793
2794static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2795 enum rdma_remove_reason why)
2796{
2797 struct devx_async_cmd_event_file *comp_ev_file =
2798 container_of(uobj, struct devx_async_cmd_event_file,
2799 uobj);
2800 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2801 struct devx_async_data *entry, *tmp;
2802
2803 spin_lock_irq(&ev_queue->lock);
2804 ev_queue->is_destroyed = 1;
2805 spin_unlock_irq(&ev_queue->lock);
2806 wake_up_interruptible(&ev_queue->poll_wait);
2807
2808 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2809
2810 spin_lock_irq(&comp_ev_file->ev_queue.lock);
2811 list_for_each_entry_safe(entry, tmp,
2812 &comp_ev_file->ev_queue.event_list, list) {
2813 list_del(&entry->list);
2814 kvfree(entry);
2815 }
2816 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2817};
2818
2819static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2820 enum rdma_remove_reason why)
2821{
2822 struct devx_async_event_file *ev_file =
2823 container_of(uobj, struct devx_async_event_file,
2824 uobj);
2825 struct devx_event_subscription *event_sub, *event_sub_tmp;
2826 struct mlx5_ib_dev *dev = ev_file->dev;
2827
2828 spin_lock_irq(&ev_file->lock);
2829 ev_file->is_destroyed = 1;
2830
2831 /* free the pending events allocation */
2832 if (ev_file->omit_data) {
2833 struct devx_event_subscription *event_sub, *tmp;
2834
2835 list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
2836 event_list)
2837 list_del_init(&event_sub->event_list);
2838
2839 } else {
2840 struct devx_async_event_data *entry, *tmp;
2841
2842 list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
2843 list) {
2844 list_del(&entry->list);
2845 kfree(entry);
2846 }
2847 }
2848
2849 spin_unlock_irq(&ev_file->lock);
2850 wake_up_interruptible(&ev_file->poll_wait);
2851
2852 mutex_lock(&dev->devx_event_table.event_xa_lock);
2853 /* delete the subscriptions which are related to this FD */
2854 list_for_each_entry_safe(event_sub, event_sub_tmp,
2855 &ev_file->subscribed_events_list, file_list) {
2856 devx_cleanup_subscription(dev, event_sub);
2857 list_del_rcu(&event_sub->file_list);
2858 /* subscription may not be used by the read API any more */
2859 call_rcu(&event_sub->rcu, devx_free_subscription);
2860 }
2861 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2862
2863 put_device(&dev->ib_dev.dev);
2864};
2865
2866DECLARE_UVERBS_NAMED_METHOD(
2867 MLX5_IB_METHOD_DEVX_UMEM_REG,
2868 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2869 MLX5_IB_OBJECT_DEVX_UMEM,
2870 UVERBS_ACCESS_NEW,
2871 UA_MANDATORY),
2872 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2873 UVERBS_ATTR_TYPE(u64),
2874 UA_MANDATORY),
2875 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2876 UVERBS_ATTR_TYPE(u64),
2877 UA_MANDATORY),
2878 UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
2879 UA_OPTIONAL),
2880 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2881 enum ib_access_flags),
2882 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
2883 u64),
2884 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2885 UVERBS_ATTR_TYPE(u32),
2886 UA_MANDATORY));
2887
2888DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2889 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2890 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2891 MLX5_IB_OBJECT_DEVX_UMEM,
2892 UVERBS_ACCESS_DESTROY,
2893 UA_MANDATORY));
2894
2895DECLARE_UVERBS_NAMED_METHOD(
2896 MLX5_IB_METHOD_DEVX_QUERY_EQN,
2897 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2898 UVERBS_ATTR_TYPE(u32),
2899 UA_MANDATORY),
2900 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2901 UVERBS_ATTR_TYPE(u32),
2902 UA_MANDATORY));
2903
2904DECLARE_UVERBS_NAMED_METHOD(
2905 MLX5_IB_METHOD_DEVX_QUERY_UAR,
2906 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2907 UVERBS_ATTR_TYPE(u32),
2908 UA_MANDATORY),
2909 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2910 UVERBS_ATTR_TYPE(u32),
2911 UA_MANDATORY));
2912
2913DECLARE_UVERBS_NAMED_METHOD(
2914 MLX5_IB_METHOD_DEVX_OTHER,
2915 UVERBS_ATTR_PTR_IN(
2916 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2917 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2918 UA_MANDATORY,
2919 UA_ALLOC_AND_COPY),
2920 UVERBS_ATTR_PTR_OUT(
2921 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2922 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2923 UA_MANDATORY));
2924
2925DECLARE_UVERBS_NAMED_METHOD(
2926 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2927 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2928 MLX5_IB_OBJECT_DEVX_OBJ,
2929 UVERBS_ACCESS_NEW,
2930 UA_MANDATORY),
2931 UVERBS_ATTR_PTR_IN(
2932 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2933 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2934 UA_MANDATORY,
2935 UA_ALLOC_AND_COPY),
2936 UVERBS_ATTR_PTR_OUT(
2937 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2938 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2939 UA_MANDATORY));
2940
2941DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2942 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2943 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2944 MLX5_IB_OBJECT_DEVX_OBJ,
2945 UVERBS_ACCESS_DESTROY,
2946 UA_MANDATORY));
2947
2948DECLARE_UVERBS_NAMED_METHOD(
2949 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2950 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2951 UVERBS_IDR_ANY_OBJECT,
2952 UVERBS_ACCESS_READ,
2953 UA_MANDATORY),
2954 UVERBS_ATTR_PTR_IN(
2955 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2956 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2957 UA_MANDATORY,
2958 UA_ALLOC_AND_COPY),
2959 UVERBS_ATTR_PTR_OUT(
2960 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2961 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2962 UA_MANDATORY));
2963
2964DECLARE_UVERBS_NAMED_METHOD(
2965 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2966 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2967 UVERBS_IDR_ANY_OBJECT,
2968 UVERBS_ACCESS_READ,
2969 UA_MANDATORY),
2970 UVERBS_ATTR_PTR_IN(
2971 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2972 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2973 UA_MANDATORY,
2974 UA_ALLOC_AND_COPY),
2975 UVERBS_ATTR_PTR_OUT(
2976 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2977 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2978 UA_MANDATORY));
2979
2980DECLARE_UVERBS_NAMED_METHOD(
2981 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2982 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2983 UVERBS_IDR_ANY_OBJECT,
2984 UVERBS_ACCESS_READ,
2985 UA_MANDATORY),
2986 UVERBS_ATTR_PTR_IN(
2987 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2988 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2989 UA_MANDATORY,
2990 UA_ALLOC_AND_COPY),
2991 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2992 u16, UA_MANDATORY),
2993 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2994 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2995 UVERBS_ACCESS_READ,
2996 UA_MANDATORY),
2997 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2998 UVERBS_ATTR_TYPE(u64),
2999 UA_MANDATORY));
3000
3001DECLARE_UVERBS_NAMED_METHOD(
3002 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
3003 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
3004 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3005 UVERBS_ACCESS_READ,
3006 UA_MANDATORY),
3007 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
3008 MLX5_IB_OBJECT_DEVX_OBJ,
3009 UVERBS_ACCESS_READ,
3010 UA_OPTIONAL),
3011 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
3012 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
3013 UA_MANDATORY,
3014 UA_ALLOC_AND_COPY),
3015 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
3016 UVERBS_ATTR_TYPE(u64),
3017 UA_OPTIONAL),
3018 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
3019 UVERBS_ATTR_TYPE(u32),
3020 UA_OPTIONAL));
3021
3022DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
3023 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
3024 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
3025 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
3026 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
3027
3028DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
3029 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
3030 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
3031 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
3032 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
3033 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
3034 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
3035
3036DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
3037 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
3038 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
3039 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
3040
3041
3042DECLARE_UVERBS_NAMED_METHOD(
3043 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
3044 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
3045 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3046 UVERBS_ACCESS_NEW,
3047 UA_MANDATORY));
3048
3049DECLARE_UVERBS_NAMED_OBJECT(
3050 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3051 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
3052 devx_async_cmd_event_destroy_uobj,
3053 &devx_async_cmd_event_fops, "[devx_async_cmd]",
3054 O_RDONLY),
3055 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
3056
3057DECLARE_UVERBS_NAMED_METHOD(
3058 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
3059 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
3060 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3061 UVERBS_ACCESS_NEW,
3062 UA_MANDATORY),
3063 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
3064 enum mlx5_ib_uapi_devx_create_event_channel_flags,
3065 UA_MANDATORY));
3066
3067DECLARE_UVERBS_NAMED_OBJECT(
3068 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3069 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
3070 devx_async_event_destroy_uobj,
3071 &devx_async_event_fops, "[devx_async_event]",
3072 O_RDONLY),
3073 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
3074
3075static bool devx_is_supported(struct ib_device *device)
3076{
3077 struct mlx5_ib_dev *dev = to_mdev(device);
3078
3079 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
3080}
3081
3082const struct uapi_definition mlx5_ib_devx_defs[] = {
3083 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3084 MLX5_IB_OBJECT_DEVX,
3085 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3086 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3087 MLX5_IB_OBJECT_DEVX_OBJ,
3088 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3089 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3090 MLX5_IB_OBJECT_DEVX_UMEM,
3091 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3092 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3093 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
3094 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3095 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
3096 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
3097 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
3098 {},
3099};
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
11#include <rdma/mlx5_user_ioctl_verbs.h>
12#include <rdma/ib_umem.h>
13#include <rdma/uverbs_std_types.h>
14#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
16#include "mlx5_ib.h"
17#include <linux/xarray.h>
18
19#define UVERBS_MODULE_NAME mlx5_ib
20#include <rdma/uverbs_named_ioctl.h>
21
22static void dispatch_event_fd(struct list_head *fd_list, const void *data);
23
24enum devx_obj_flags {
25 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
26 DEVX_OBJ_FLAGS_DCT = 1 << 1,
27 DEVX_OBJ_FLAGS_CQ = 1 << 2,
28};
29
30struct devx_async_data {
31 struct mlx5_ib_dev *mdev;
32 struct list_head list;
33 struct ib_uobject *fd_uobj;
34 struct mlx5_async_work cb_work;
35 u16 cmd_out_len;
36 /* must be last field in this structure */
37 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
38};
39
40struct devx_async_event_data {
41 struct list_head list; /* headed in ev_file->event_list */
42 struct mlx5_ib_uapi_devx_async_event_hdr hdr;
43};
44
45/* first level XA value data structure */
46struct devx_event {
47 struct xarray object_ids; /* second XA level, Key = object id */
48 struct list_head unaffiliated_list;
49};
50
51/* second level XA value data structure */
52struct devx_obj_event {
53 struct rcu_head rcu;
54 struct list_head obj_sub_list;
55};
56
57struct devx_event_subscription {
58 struct list_head file_list; /* headed in ev_file->
59 * subscribed_events_list
60 */
61 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
62 * devx_obj_event->obj_sub_list
63 */
64 struct list_head obj_list; /* headed in devx_object */
65 struct list_head event_list; /* headed in ev_file->event_list or in
66 * temp list via subscription
67 */
68
69 u8 is_cleaned:1;
70 u32 xa_key_level1;
71 u32 xa_key_level2;
72 struct rcu_head rcu;
73 u64 cookie;
74 struct devx_async_event_file *ev_file;
75 struct file *filp; /* Upon hot unplug we need a direct access to */
76 struct eventfd_ctx *eventfd;
77};
78
79struct devx_async_event_file {
80 struct ib_uobject uobj;
81 /* Head of events that are subscribed to this FD */
82 struct list_head subscribed_events_list;
83 spinlock_t lock;
84 wait_queue_head_t poll_wait;
85 struct list_head event_list;
86 struct mlx5_ib_dev *dev;
87 u8 omit_data:1;
88 u8 is_overflow_err:1;
89 u8 is_destroyed:1;
90};
91
92#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
93struct devx_obj {
94 struct mlx5_ib_dev *ib_dev;
95 u64 obj_id;
96 u32 dinlen; /* destroy inbox length */
97 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
98 u32 flags;
99 union {
100 struct mlx5_ib_devx_mr devx_mr;
101 struct mlx5_core_dct core_dct;
102 struct mlx5_core_cq core_cq;
103 };
104 struct list_head event_sub; /* holds devx_event_subscription entries */
105};
106
107struct devx_umem {
108 struct mlx5_core_dev *mdev;
109 struct ib_umem *umem;
110 u32 page_offset;
111 int page_shift;
112 int ncont;
113 u32 dinlen;
114 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
115};
116
117struct devx_umem_reg_cmd {
118 void *in;
119 u32 inlen;
120 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
121};
122
123static struct mlx5_ib_ucontext *
124devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
125{
126 return to_mucontext(ib_uverbs_get_ucontext(attrs));
127}
128
129int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
130{
131 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
133 void *uctx;
134 int err;
135 u16 uid;
136 u32 cap = 0;
137
138 /* 0 means not supported */
139 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
140 return -EINVAL;
141
142 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
143 if (is_user && capable(CAP_NET_RAW) &&
144 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
145 cap |= MLX5_UCTX_CAP_RAW_TX;
146 if (is_user && capable(CAP_SYS_RAWIO) &&
147 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
148 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
149 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
150
151 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
152 MLX5_SET(uctx, uctx, cap, cap);
153
154 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
155 if (err)
156 return err;
157
158 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
159 return uid;
160}
161
162void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
163{
164 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
165 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
166
167 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
168 MLX5_SET(destroy_uctx_in, in, uid, uid);
169
170 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
171}
172
173bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
174{
175 struct devx_obj *devx_obj = obj;
176 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
177
178 switch (opcode) {
179 case MLX5_CMD_OP_DESTROY_TIR:
180 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
181 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
182 obj_id);
183 return true;
184
185 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
186 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
187 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
188 table_id);
189 return true;
190 default:
191 return false;
192 }
193}
194
195bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
196{
197 struct devx_obj *devx_obj = obj;
198 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
199
200 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
201 *counter_id = MLX5_GET(dealloc_flow_counter_in,
202 devx_obj->dinbox,
203 flow_counter_id);
204 return true;
205 }
206
207 return false;
208}
209
210static bool is_legacy_unaffiliated_event_num(u16 event_num)
211{
212 switch (event_num) {
213 case MLX5_EVENT_TYPE_PORT_CHANGE:
214 return true;
215 default:
216 return false;
217 }
218}
219
220static bool is_legacy_obj_event_num(u16 event_num)
221{
222 switch (event_num) {
223 case MLX5_EVENT_TYPE_PATH_MIG:
224 case MLX5_EVENT_TYPE_COMM_EST:
225 case MLX5_EVENT_TYPE_SQ_DRAINED:
226 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
227 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
228 case MLX5_EVENT_TYPE_CQ_ERROR:
229 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
230 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
231 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
232 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
233 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
234 case MLX5_EVENT_TYPE_DCT_DRAINED:
235 case MLX5_EVENT_TYPE_COMP:
236 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
237 case MLX5_EVENT_TYPE_XRQ_ERROR:
238 return true;
239 default:
240 return false;
241 }
242}
243
244static u16 get_legacy_obj_type(u16 opcode)
245{
246 switch (opcode) {
247 case MLX5_CMD_OP_CREATE_RQ:
248 return MLX5_EVENT_QUEUE_TYPE_RQ;
249 case MLX5_CMD_OP_CREATE_QP:
250 return MLX5_EVENT_QUEUE_TYPE_QP;
251 case MLX5_CMD_OP_CREATE_SQ:
252 return MLX5_EVENT_QUEUE_TYPE_SQ;
253 case MLX5_CMD_OP_CREATE_DCT:
254 return MLX5_EVENT_QUEUE_TYPE_DCT;
255 default:
256 return 0;
257 }
258}
259
260static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
261{
262 u16 opcode;
263
264 opcode = (obj->obj_id >> 32) & 0xffff;
265
266 if (is_legacy_obj_event_num(event_num))
267 return get_legacy_obj_type(opcode);
268
269 switch (opcode) {
270 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
271 return (obj->obj_id >> 48);
272 case MLX5_CMD_OP_CREATE_RQ:
273 return MLX5_OBJ_TYPE_RQ;
274 case MLX5_CMD_OP_CREATE_QP:
275 return MLX5_OBJ_TYPE_QP;
276 case MLX5_CMD_OP_CREATE_SQ:
277 return MLX5_OBJ_TYPE_SQ;
278 case MLX5_CMD_OP_CREATE_DCT:
279 return MLX5_OBJ_TYPE_DCT;
280 case MLX5_CMD_OP_CREATE_TIR:
281 return MLX5_OBJ_TYPE_TIR;
282 case MLX5_CMD_OP_CREATE_TIS:
283 return MLX5_OBJ_TYPE_TIS;
284 case MLX5_CMD_OP_CREATE_PSV:
285 return MLX5_OBJ_TYPE_PSV;
286 case MLX5_OBJ_TYPE_MKEY:
287 return MLX5_OBJ_TYPE_MKEY;
288 case MLX5_CMD_OP_CREATE_RMP:
289 return MLX5_OBJ_TYPE_RMP;
290 case MLX5_CMD_OP_CREATE_XRC_SRQ:
291 return MLX5_OBJ_TYPE_XRC_SRQ;
292 case MLX5_CMD_OP_CREATE_XRQ:
293 return MLX5_OBJ_TYPE_XRQ;
294 case MLX5_CMD_OP_CREATE_RQT:
295 return MLX5_OBJ_TYPE_RQT;
296 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
297 return MLX5_OBJ_TYPE_FLOW_COUNTER;
298 case MLX5_CMD_OP_CREATE_CQ:
299 return MLX5_OBJ_TYPE_CQ;
300 default:
301 return 0;
302 }
303}
304
305static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
306{
307 switch (event_type) {
308 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
309 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
310 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
311 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
312 case MLX5_EVENT_TYPE_PATH_MIG:
313 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
314 case MLX5_EVENT_TYPE_COMM_EST:
315 case MLX5_EVENT_TYPE_SQ_DRAINED:
316 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
317 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
318 return eqe->data.qp_srq.type;
319 case MLX5_EVENT_TYPE_CQ_ERROR:
320 case MLX5_EVENT_TYPE_XRQ_ERROR:
321 return 0;
322 case MLX5_EVENT_TYPE_DCT_DRAINED:
323 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
324 return MLX5_EVENT_QUEUE_TYPE_DCT;
325 default:
326 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
327 }
328}
329
330static u32 get_dec_obj_id(u64 obj_id)
331{
332 return (obj_id & 0xffffffff);
333}
334
335/*
336 * As the obj_id in the firmware is not globally unique the object type
337 * must be considered upon checking for a valid object id.
338 * For that the opcode of the creator command is encoded as part of the obj_id.
339 */
340static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
341{
342 return ((u64)opcode << 32) | obj_id;
343}
344
345static u64 devx_get_obj_id(const void *in)
346{
347 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
348 u64 obj_id;
349
350 switch (opcode) {
351 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
352 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
353 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
354 MLX5_GET(general_obj_in_cmd_hdr, in,
355 obj_type) << 16,
356 MLX5_GET(general_obj_in_cmd_hdr, in,
357 obj_id));
358 break;
359 case MLX5_CMD_OP_QUERY_MKEY:
360 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
361 MLX5_GET(query_mkey_in, in,
362 mkey_index));
363 break;
364 case MLX5_CMD_OP_QUERY_CQ:
365 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
366 MLX5_GET(query_cq_in, in, cqn));
367 break;
368 case MLX5_CMD_OP_MODIFY_CQ:
369 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
370 MLX5_GET(modify_cq_in, in, cqn));
371 break;
372 case MLX5_CMD_OP_QUERY_SQ:
373 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
374 MLX5_GET(query_sq_in, in, sqn));
375 break;
376 case MLX5_CMD_OP_MODIFY_SQ:
377 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
378 MLX5_GET(modify_sq_in, in, sqn));
379 break;
380 case MLX5_CMD_OP_QUERY_RQ:
381 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
382 MLX5_GET(query_rq_in, in, rqn));
383 break;
384 case MLX5_CMD_OP_MODIFY_RQ:
385 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
386 MLX5_GET(modify_rq_in, in, rqn));
387 break;
388 case MLX5_CMD_OP_QUERY_RMP:
389 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
390 MLX5_GET(query_rmp_in, in, rmpn));
391 break;
392 case MLX5_CMD_OP_MODIFY_RMP:
393 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
394 MLX5_GET(modify_rmp_in, in, rmpn));
395 break;
396 case MLX5_CMD_OP_QUERY_RQT:
397 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
398 MLX5_GET(query_rqt_in, in, rqtn));
399 break;
400 case MLX5_CMD_OP_MODIFY_RQT:
401 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
402 MLX5_GET(modify_rqt_in, in, rqtn));
403 break;
404 case MLX5_CMD_OP_QUERY_TIR:
405 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
406 MLX5_GET(query_tir_in, in, tirn));
407 break;
408 case MLX5_CMD_OP_MODIFY_TIR:
409 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
410 MLX5_GET(modify_tir_in, in, tirn));
411 break;
412 case MLX5_CMD_OP_QUERY_TIS:
413 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
414 MLX5_GET(query_tis_in, in, tisn));
415 break;
416 case MLX5_CMD_OP_MODIFY_TIS:
417 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
418 MLX5_GET(modify_tis_in, in, tisn));
419 break;
420 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
421 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
422 MLX5_GET(query_flow_table_in, in,
423 table_id));
424 break;
425 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
426 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
427 MLX5_GET(modify_flow_table_in, in,
428 table_id));
429 break;
430 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
431 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
432 MLX5_GET(query_flow_group_in, in,
433 group_id));
434 break;
435 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
436 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
437 MLX5_GET(query_fte_in, in,
438 flow_index));
439 break;
440 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
441 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
442 MLX5_GET(set_fte_in, in, flow_index));
443 break;
444 case MLX5_CMD_OP_QUERY_Q_COUNTER:
445 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
446 MLX5_GET(query_q_counter_in, in,
447 counter_set_id));
448 break;
449 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
450 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
451 MLX5_GET(query_flow_counter_in, in,
452 flow_counter_id));
453 break;
454 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
455 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
456 MLX5_GET(general_obj_in_cmd_hdr, in,
457 obj_id));
458 break;
459 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
460 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
461 MLX5_GET(query_scheduling_element_in,
462 in, scheduling_element_id));
463 break;
464 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
465 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
466 MLX5_GET(modify_scheduling_element_in,
467 in, scheduling_element_id));
468 break;
469 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
470 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
471 MLX5_GET(add_vxlan_udp_dport_in, in,
472 vxlan_udp_port));
473 break;
474 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
475 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
476 MLX5_GET(query_l2_table_entry_in, in,
477 table_index));
478 break;
479 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
480 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
481 MLX5_GET(set_l2_table_entry_in, in,
482 table_index));
483 break;
484 case MLX5_CMD_OP_QUERY_QP:
485 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
486 MLX5_GET(query_qp_in, in, qpn));
487 break;
488 case MLX5_CMD_OP_RST2INIT_QP:
489 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
490 MLX5_GET(rst2init_qp_in, in, qpn));
491 break;
492 case MLX5_CMD_OP_INIT2RTR_QP:
493 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
494 MLX5_GET(init2rtr_qp_in, in, qpn));
495 break;
496 case MLX5_CMD_OP_RTR2RTS_QP:
497 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
498 MLX5_GET(rtr2rts_qp_in, in, qpn));
499 break;
500 case MLX5_CMD_OP_RTS2RTS_QP:
501 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
502 MLX5_GET(rts2rts_qp_in, in, qpn));
503 break;
504 case MLX5_CMD_OP_SQERR2RTS_QP:
505 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
506 MLX5_GET(sqerr2rts_qp_in, in, qpn));
507 break;
508 case MLX5_CMD_OP_2ERR_QP:
509 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
510 MLX5_GET(qp_2err_in, in, qpn));
511 break;
512 case MLX5_CMD_OP_2RST_QP:
513 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
514 MLX5_GET(qp_2rst_in, in, qpn));
515 break;
516 case MLX5_CMD_OP_QUERY_DCT:
517 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
518 MLX5_GET(query_dct_in, in, dctn));
519 break;
520 case MLX5_CMD_OP_QUERY_XRQ:
521 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
522 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
523 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
524 MLX5_GET(query_xrq_in, in, xrqn));
525 break;
526 case MLX5_CMD_OP_QUERY_XRC_SRQ:
527 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
528 MLX5_GET(query_xrc_srq_in, in,
529 xrc_srqn));
530 break;
531 case MLX5_CMD_OP_ARM_XRC_SRQ:
532 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
533 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
534 break;
535 case MLX5_CMD_OP_QUERY_SRQ:
536 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
537 MLX5_GET(query_srq_in, in, srqn));
538 break;
539 case MLX5_CMD_OP_ARM_RQ:
540 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
541 MLX5_GET(arm_rq_in, in, srq_number));
542 break;
543 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
544 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
545 MLX5_GET(drain_dct_in, in, dctn));
546 break;
547 case MLX5_CMD_OP_ARM_XRQ:
548 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
549 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
550 case MLX5_CMD_OP_MODIFY_XRQ:
551 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
552 MLX5_GET(arm_xrq_in, in, xrqn));
553 break;
554 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
555 obj_id = get_enc_obj_id
556 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
557 MLX5_GET(query_packet_reformat_context_in,
558 in, packet_reformat_id));
559 break;
560 default:
561 obj_id = 0;
562 }
563
564 return obj_id;
565}
566
567static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
568 struct ib_uobject *uobj, const void *in)
569{
570 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
571 u64 obj_id = devx_get_obj_id(in);
572
573 if (!obj_id)
574 return false;
575
576 switch (uobj_get_object_id(uobj)) {
577 case UVERBS_OBJECT_CQ:
578 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
579 to_mcq(uobj->object)->mcq.cqn) ==
580 obj_id;
581
582 case UVERBS_OBJECT_SRQ:
583 {
584 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
585 u16 opcode;
586
587 switch (srq->common.res) {
588 case MLX5_RES_XSRQ:
589 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
590 break;
591 case MLX5_RES_XRQ:
592 opcode = MLX5_CMD_OP_CREATE_XRQ;
593 break;
594 default:
595 if (!dev->mdev->issi)
596 opcode = MLX5_CMD_OP_CREATE_SRQ;
597 else
598 opcode = MLX5_CMD_OP_CREATE_RMP;
599 }
600
601 return get_enc_obj_id(opcode,
602 to_msrq(uobj->object)->msrq.srqn) ==
603 obj_id;
604 }
605
606 case UVERBS_OBJECT_QP:
607 {
608 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
609 enum ib_qp_type qp_type = qp->ibqp.qp_type;
610
611 if (qp_type == IB_QPT_RAW_PACKET ||
612 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
613 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
614 &qp->raw_packet_qp;
615 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
616 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
617
618 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
619 rq->base.mqp.qpn) == obj_id ||
620 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
621 sq->base.mqp.qpn) == obj_id ||
622 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
623 rq->tirn) == obj_id ||
624 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
625 sq->tisn) == obj_id);
626 }
627
628 if (qp_type == MLX5_IB_QPT_DCT)
629 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
630 qp->dct.mdct.mqp.qpn) == obj_id;
631
632 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
633 qp->ibqp.qp_num) == obj_id;
634 }
635
636 case UVERBS_OBJECT_WQ:
637 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
638 to_mrwq(uobj->object)->core_qp.qpn) ==
639 obj_id;
640
641 case UVERBS_OBJECT_RWQ_IND_TBL:
642 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
643 to_mrwq_ind_table(uobj->object)->rqtn) ==
644 obj_id;
645
646 case MLX5_IB_OBJECT_DEVX_OBJ:
647 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
648
649 default:
650 return false;
651 }
652}
653
654static void devx_set_umem_valid(const void *in)
655{
656 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
657
658 switch (opcode) {
659 case MLX5_CMD_OP_CREATE_MKEY:
660 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
661 break;
662 case MLX5_CMD_OP_CREATE_CQ:
663 {
664 void *cqc;
665
666 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
667 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
668 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
669 break;
670 }
671 case MLX5_CMD_OP_CREATE_QP:
672 {
673 void *qpc;
674
675 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
676 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
677 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
678 break;
679 }
680
681 case MLX5_CMD_OP_CREATE_RQ:
682 {
683 void *rqc, *wq;
684
685 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
686 wq = MLX5_ADDR_OF(rqc, rqc, wq);
687 MLX5_SET(wq, wq, dbr_umem_valid, 1);
688 MLX5_SET(wq, wq, wq_umem_valid, 1);
689 break;
690 }
691
692 case MLX5_CMD_OP_CREATE_SQ:
693 {
694 void *sqc, *wq;
695
696 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
697 wq = MLX5_ADDR_OF(sqc, sqc, wq);
698 MLX5_SET(wq, wq, dbr_umem_valid, 1);
699 MLX5_SET(wq, wq, wq_umem_valid, 1);
700 break;
701 }
702
703 case MLX5_CMD_OP_MODIFY_CQ:
704 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
705 break;
706
707 case MLX5_CMD_OP_CREATE_RMP:
708 {
709 void *rmpc, *wq;
710
711 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
712 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
713 MLX5_SET(wq, wq, dbr_umem_valid, 1);
714 MLX5_SET(wq, wq, wq_umem_valid, 1);
715 break;
716 }
717
718 case MLX5_CMD_OP_CREATE_XRQ:
719 {
720 void *xrqc, *wq;
721
722 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
723 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
724 MLX5_SET(wq, wq, dbr_umem_valid, 1);
725 MLX5_SET(wq, wq, wq_umem_valid, 1);
726 break;
727 }
728
729 case MLX5_CMD_OP_CREATE_XRC_SRQ:
730 {
731 void *xrc_srqc;
732
733 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
734 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
735 xrc_srq_context_entry);
736 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
737 break;
738 }
739
740 default:
741 return;
742 }
743}
744
745static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
746{
747 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
748
749 switch (*opcode) {
750 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
751 case MLX5_CMD_OP_CREATE_MKEY:
752 case MLX5_CMD_OP_CREATE_CQ:
753 case MLX5_CMD_OP_ALLOC_PD:
754 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
755 case MLX5_CMD_OP_CREATE_RMP:
756 case MLX5_CMD_OP_CREATE_SQ:
757 case MLX5_CMD_OP_CREATE_RQ:
758 case MLX5_CMD_OP_CREATE_RQT:
759 case MLX5_CMD_OP_CREATE_TIR:
760 case MLX5_CMD_OP_CREATE_TIS:
761 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
762 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
763 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
764 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
765 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
766 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
767 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
768 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
769 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
770 case MLX5_CMD_OP_CREATE_QP:
771 case MLX5_CMD_OP_CREATE_SRQ:
772 case MLX5_CMD_OP_CREATE_XRC_SRQ:
773 case MLX5_CMD_OP_CREATE_DCT:
774 case MLX5_CMD_OP_CREATE_XRQ:
775 case MLX5_CMD_OP_ATTACH_TO_MCG:
776 case MLX5_CMD_OP_ALLOC_XRCD:
777 return true;
778 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
779 {
780 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
781 if (op_mod == 0)
782 return true;
783 return false;
784 }
785 case MLX5_CMD_OP_CREATE_PSV:
786 {
787 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
788
789 if (num_psv == 1)
790 return true;
791 return false;
792 }
793 default:
794 return false;
795 }
796}
797
798static bool devx_is_obj_modify_cmd(const void *in)
799{
800 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
801
802 switch (opcode) {
803 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
804 case MLX5_CMD_OP_MODIFY_CQ:
805 case MLX5_CMD_OP_MODIFY_RMP:
806 case MLX5_CMD_OP_MODIFY_SQ:
807 case MLX5_CMD_OP_MODIFY_RQ:
808 case MLX5_CMD_OP_MODIFY_RQT:
809 case MLX5_CMD_OP_MODIFY_TIR:
810 case MLX5_CMD_OP_MODIFY_TIS:
811 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
812 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
813 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
814 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
815 case MLX5_CMD_OP_RST2INIT_QP:
816 case MLX5_CMD_OP_INIT2RTR_QP:
817 case MLX5_CMD_OP_RTR2RTS_QP:
818 case MLX5_CMD_OP_RTS2RTS_QP:
819 case MLX5_CMD_OP_SQERR2RTS_QP:
820 case MLX5_CMD_OP_2ERR_QP:
821 case MLX5_CMD_OP_2RST_QP:
822 case MLX5_CMD_OP_ARM_XRC_SRQ:
823 case MLX5_CMD_OP_ARM_RQ:
824 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
825 case MLX5_CMD_OP_ARM_XRQ:
826 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
827 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
828 case MLX5_CMD_OP_MODIFY_XRQ:
829 return true;
830 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
831 {
832 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
833
834 if (op_mod == 1)
835 return true;
836 return false;
837 }
838 default:
839 return false;
840 }
841}
842
843static bool devx_is_obj_query_cmd(const void *in)
844{
845 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
846
847 switch (opcode) {
848 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
849 case MLX5_CMD_OP_QUERY_MKEY:
850 case MLX5_CMD_OP_QUERY_CQ:
851 case MLX5_CMD_OP_QUERY_RMP:
852 case MLX5_CMD_OP_QUERY_SQ:
853 case MLX5_CMD_OP_QUERY_RQ:
854 case MLX5_CMD_OP_QUERY_RQT:
855 case MLX5_CMD_OP_QUERY_TIR:
856 case MLX5_CMD_OP_QUERY_TIS:
857 case MLX5_CMD_OP_QUERY_Q_COUNTER:
858 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
859 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
860 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
861 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
862 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
863 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
864 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
865 case MLX5_CMD_OP_QUERY_QP:
866 case MLX5_CMD_OP_QUERY_SRQ:
867 case MLX5_CMD_OP_QUERY_XRC_SRQ:
868 case MLX5_CMD_OP_QUERY_DCT:
869 case MLX5_CMD_OP_QUERY_XRQ:
870 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
871 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
872 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
873 return true;
874 default:
875 return false;
876 }
877}
878
879static bool devx_is_whitelist_cmd(void *in)
880{
881 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
882
883 switch (opcode) {
884 case MLX5_CMD_OP_QUERY_HCA_CAP:
885 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
886 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
887 return true;
888 default:
889 return false;
890 }
891}
892
893static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
894{
895 if (devx_is_whitelist_cmd(cmd_in)) {
896 struct mlx5_ib_dev *dev;
897
898 if (c->devx_uid)
899 return c->devx_uid;
900
901 dev = to_mdev(c->ibucontext.device);
902 if (dev->devx_whitelist_uid)
903 return dev->devx_whitelist_uid;
904
905 return -EOPNOTSUPP;
906 }
907
908 if (!c->devx_uid)
909 return -EINVAL;
910
911 return c->devx_uid;
912}
913
914static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
915{
916 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
917
918 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
919 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
920 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
921 (opcode >= MLX5_CMD_OP_GENERAL_START &&
922 opcode < MLX5_CMD_OP_GENERAL_END))
923 return true;
924
925 switch (opcode) {
926 case MLX5_CMD_OP_QUERY_HCA_CAP:
927 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
928 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
929 case MLX5_CMD_OP_QUERY_VPORT_STATE:
930 case MLX5_CMD_OP_QUERY_ADAPTER:
931 case MLX5_CMD_OP_QUERY_ISSI:
932 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
933 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
934 case MLX5_CMD_OP_QUERY_VNIC_ENV:
935 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
936 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
937 case MLX5_CMD_OP_NOP:
938 case MLX5_CMD_OP_QUERY_CONG_STATUS:
939 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
940 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
941 case MLX5_CMD_OP_QUERY_LAG:
942 return true;
943 default:
944 return false;
945 }
946}
947
948static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
949 struct uverbs_attr_bundle *attrs)
950{
951 struct mlx5_ib_ucontext *c;
952 struct mlx5_ib_dev *dev;
953 int user_vector;
954 int dev_eqn;
955 unsigned int irqn;
956 int err;
957
958 if (uverbs_copy_from(&user_vector, attrs,
959 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
960 return -EFAULT;
961
962 c = devx_ufile2uctx(attrs);
963 if (IS_ERR(c))
964 return PTR_ERR(c);
965 dev = to_mdev(c->ibucontext.device);
966
967 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
968 if (err < 0)
969 return err;
970
971 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
972 &dev_eqn, sizeof(dev_eqn)))
973 return -EFAULT;
974
975 return 0;
976}
977
978/*
979 *Security note:
980 * The hardware protection mechanism works like this: Each device object that
981 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
982 * the device specification manual) upon its creation. Then upon doorbell,
983 * hardware fetches the object context for which the doorbell was rang, and
984 * validates that the UAR through which the DB was rang matches the UAR ID
985 * of the object.
986 * If no match the doorbell is silently ignored by the hardware. Of course,
987 * the user cannot ring a doorbell on a UAR that was not mapped to it.
988 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
989 * mailboxes (except tagging them with UID), we expose to the user its UAR
990 * ID, so it can embed it in these objects in the expected specification
991 * format. So the only thing the user can do is hurt itself by creating a
992 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
993 * may ring a doorbell on its objects.
994 * The consequence of that will be that another user can schedule a QP/SQ
995 * of the buggy user for execution (just insert it to the hardware schedule
996 * queue or arm its CQ for event generation), no further harm is expected.
997 */
998static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
999 struct uverbs_attr_bundle *attrs)
1000{
1001 struct mlx5_ib_ucontext *c;
1002 struct mlx5_ib_dev *dev;
1003 u32 user_idx;
1004 s32 dev_idx;
1005
1006 c = devx_ufile2uctx(attrs);
1007 if (IS_ERR(c))
1008 return PTR_ERR(c);
1009 dev = to_mdev(c->ibucontext.device);
1010
1011 if (uverbs_copy_from(&user_idx, attrs,
1012 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1013 return -EFAULT;
1014
1015 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1016 if (dev_idx < 0)
1017 return dev_idx;
1018
1019 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1020 &dev_idx, sizeof(dev_idx)))
1021 return -EFAULT;
1022
1023 return 0;
1024}
1025
1026static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1027 struct uverbs_attr_bundle *attrs)
1028{
1029 struct mlx5_ib_ucontext *c;
1030 struct mlx5_ib_dev *dev;
1031 void *cmd_in = uverbs_attr_get_alloced_ptr(
1032 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1033 int cmd_out_len = uverbs_attr_get_len(attrs,
1034 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1035 void *cmd_out;
1036 int err;
1037 int uid;
1038
1039 c = devx_ufile2uctx(attrs);
1040 if (IS_ERR(c))
1041 return PTR_ERR(c);
1042 dev = to_mdev(c->ibucontext.device);
1043
1044 uid = devx_get_uid(c, cmd_in);
1045 if (uid < 0)
1046 return uid;
1047
1048 /* Only white list of some general HCA commands are allowed for this method. */
1049 if (!devx_is_general_cmd(cmd_in, dev))
1050 return -EINVAL;
1051
1052 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1053 if (IS_ERR(cmd_out))
1054 return PTR_ERR(cmd_out);
1055
1056 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1057 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1058 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1059 cmd_out, cmd_out_len);
1060 if (err)
1061 return err;
1062
1063 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1064 cmd_out_len);
1065}
1066
1067static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1068 u32 *dinlen,
1069 u32 *obj_id)
1070{
1071 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
1072 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1073
1074 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1075 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1076
1077 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1078 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1079
1080 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
1081 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1082 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1083 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
1084 break;
1085
1086 case MLX5_CMD_OP_CREATE_UMEM:
1087 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1088 MLX5_CMD_OP_DESTROY_UMEM);
1089 break;
1090 case MLX5_CMD_OP_CREATE_MKEY:
1091 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
1092 break;
1093 case MLX5_CMD_OP_CREATE_CQ:
1094 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1095 break;
1096 case MLX5_CMD_OP_ALLOC_PD:
1097 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1098 break;
1099 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1100 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1101 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1102 break;
1103 case MLX5_CMD_OP_CREATE_RMP:
1104 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1105 break;
1106 case MLX5_CMD_OP_CREATE_SQ:
1107 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1108 break;
1109 case MLX5_CMD_OP_CREATE_RQ:
1110 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1111 break;
1112 case MLX5_CMD_OP_CREATE_RQT:
1113 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1114 break;
1115 case MLX5_CMD_OP_CREATE_TIR:
1116 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1117 break;
1118 case MLX5_CMD_OP_CREATE_TIS:
1119 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1120 break;
1121 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1122 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1123 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1124 break;
1125 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1126 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1127 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
1128 MLX5_SET(destroy_flow_table_in, din, other_vport,
1129 MLX5_GET(create_flow_table_in, in, other_vport));
1130 MLX5_SET(destroy_flow_table_in, din, vport_number,
1131 MLX5_GET(create_flow_table_in, in, vport_number));
1132 MLX5_SET(destroy_flow_table_in, din, table_type,
1133 MLX5_GET(create_flow_table_in, in, table_type));
1134 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1135 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1136 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1137 break;
1138 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1139 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1140 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
1141 MLX5_SET(destroy_flow_group_in, din, other_vport,
1142 MLX5_GET(create_flow_group_in, in, other_vport));
1143 MLX5_SET(destroy_flow_group_in, din, vport_number,
1144 MLX5_GET(create_flow_group_in, in, vport_number));
1145 MLX5_SET(destroy_flow_group_in, din, table_type,
1146 MLX5_GET(create_flow_group_in, in, table_type));
1147 MLX5_SET(destroy_flow_group_in, din, table_id,
1148 MLX5_GET(create_flow_group_in, in, table_id));
1149 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1150 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1151 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1152 break;
1153 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1154 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1155 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
1156 MLX5_SET(delete_fte_in, din, other_vport,
1157 MLX5_GET(set_fte_in, in, other_vport));
1158 MLX5_SET(delete_fte_in, din, vport_number,
1159 MLX5_GET(set_fte_in, in, vport_number));
1160 MLX5_SET(delete_fte_in, din, table_type,
1161 MLX5_GET(set_fte_in, in, table_type));
1162 MLX5_SET(delete_fte_in, din, table_id,
1163 MLX5_GET(set_fte_in, in, table_id));
1164 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1165 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1166 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1167 break;
1168 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1169 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1170 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1171 break;
1172 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1173 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1174 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1175 break;
1176 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1177 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1178 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1179 break;
1180 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1181 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1182 *obj_id = MLX5_GET(create_scheduling_element_out, out,
1183 scheduling_element_id);
1184 MLX5_SET(destroy_scheduling_element_in, din,
1185 scheduling_hierarchy,
1186 MLX5_GET(create_scheduling_element_in, in,
1187 scheduling_hierarchy));
1188 MLX5_SET(destroy_scheduling_element_in, din,
1189 scheduling_element_id, *obj_id);
1190 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1191 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1192 break;
1193 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1194 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1195 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
1196 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1197 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1198 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1199 break;
1200 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1201 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1202 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
1203 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1204 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1205 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1206 break;
1207 case MLX5_CMD_OP_CREATE_QP:
1208 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1209 break;
1210 case MLX5_CMD_OP_CREATE_SRQ:
1211 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1212 break;
1213 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1214 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1215 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1216 break;
1217 case MLX5_CMD_OP_CREATE_DCT:
1218 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1219 break;
1220 case MLX5_CMD_OP_CREATE_XRQ:
1221 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1222 break;
1223 case MLX5_CMD_OP_ATTACH_TO_MCG:
1224 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1225 MLX5_SET(detach_from_mcg_in, din, qpn,
1226 MLX5_GET(attach_to_mcg_in, in, qpn));
1227 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1228 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1229 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1230 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1231 break;
1232 case MLX5_CMD_OP_ALLOC_XRCD:
1233 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1234 break;
1235 case MLX5_CMD_OP_CREATE_PSV:
1236 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1237 MLX5_CMD_OP_DESTROY_PSV);
1238 MLX5_SET(destroy_psv_in, din, psvn,
1239 MLX5_GET(create_psv_out, out, psv0_index));
1240 break;
1241 default:
1242 /* The entry must match to one of the devx_is_obj_create_cmd */
1243 WARN_ON(true);
1244 break;
1245 }
1246}
1247
1248static int devx_handle_mkey_indirect(struct devx_obj *obj,
1249 struct mlx5_ib_dev *dev,
1250 void *in, void *out)
1251{
1252 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1253 struct mlx5_core_mkey *mkey;
1254 void *mkc;
1255 u8 key;
1256
1257 mkey = &devx_mr->mmkey;
1258 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1259 key = MLX5_GET(mkc, mkc, mkey_7_0);
1260 mkey->key = mlx5_idx_to_mkey(
1261 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1262 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1263 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1264 mkey->size = MLX5_GET64(mkc, mkc, len);
1265 mkey->pd = MLX5_GET(mkc, mkc, pd);
1266 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1267
1268 return xa_err(xa_store(&dev->mdev->priv.mkey_table,
1269 mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
1270}
1271
1272static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1273 struct devx_obj *obj,
1274 void *in, int in_len)
1275{
1276 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1277 MLX5_FLD_SZ_BYTES(create_mkey_in,
1278 memory_key_mkey_entry);
1279 void *mkc;
1280 u8 access_mode;
1281
1282 if (in_len < min_len)
1283 return -EINVAL;
1284
1285 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1286
1287 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1288 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1289
1290 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1291 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1292 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1293 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1294 return 0;
1295 }
1296
1297 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1298 return 0;
1299}
1300
1301static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1302 struct devx_event_subscription *sub)
1303{
1304 struct devx_event *event;
1305 struct devx_obj_event *xa_val_level2;
1306
1307 if (sub->is_cleaned)
1308 return;
1309
1310 sub->is_cleaned = 1;
1311 list_del_rcu(&sub->xa_list);
1312
1313 if (list_empty(&sub->obj_list))
1314 return;
1315
1316 list_del_rcu(&sub->obj_list);
1317 /* check whether key level 1 for this obj_sub_list is empty */
1318 event = xa_load(&dev->devx_event_table.event_xa,
1319 sub->xa_key_level1);
1320 WARN_ON(!event);
1321
1322 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1323 if (list_empty(&xa_val_level2->obj_sub_list)) {
1324 xa_erase(&event->object_ids,
1325 sub->xa_key_level2);
1326 kfree_rcu(xa_val_level2, rcu);
1327 }
1328}
1329
1330static int devx_obj_cleanup(struct ib_uobject *uobject,
1331 enum rdma_remove_reason why,
1332 struct uverbs_attr_bundle *attrs)
1333{
1334 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1335 struct mlx5_devx_event_table *devx_event_table;
1336 struct devx_obj *obj = uobject->object;
1337 struct devx_event_subscription *sub_entry, *tmp;
1338 struct mlx5_ib_dev *dev;
1339 int ret;
1340
1341 dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1342 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1343 /*
1344 * The pagefault_single_data_segment() does commands against
1345 * the mmkey, we must wait for that to stop before freeing the
1346 * mkey, as another allocation could get the same mkey #.
1347 */
1348 xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
1349 mlx5_base_mkey(obj->devx_mr.mmkey.key));
1350 synchronize_srcu(&dev->mr_srcu);
1351 }
1352
1353 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1354 ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1355 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1356 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1357 else
1358 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1359 obj->dinlen, out, sizeof(out));
1360 if (ib_is_destroy_retryable(ret, why, uobject))
1361 return ret;
1362
1363 devx_event_table = &dev->devx_event_table;
1364
1365 mutex_lock(&devx_event_table->event_xa_lock);
1366 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1367 devx_cleanup_subscription(dev, sub_entry);
1368 mutex_unlock(&devx_event_table->event_xa_lock);
1369
1370 kfree(obj);
1371 return ret;
1372}
1373
1374static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1375{
1376 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1377 struct mlx5_devx_event_table *table;
1378 struct devx_event *event;
1379 struct devx_obj_event *obj_event;
1380 u32 obj_id = mcq->cqn;
1381
1382 table = &obj->ib_dev->devx_event_table;
1383 rcu_read_lock();
1384 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1385 if (!event)
1386 goto out;
1387
1388 obj_event = xa_load(&event->object_ids, obj_id);
1389 if (!obj_event)
1390 goto out;
1391
1392 dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1393out:
1394 rcu_read_unlock();
1395}
1396
1397static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1398 struct uverbs_attr_bundle *attrs)
1399{
1400 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1401 int cmd_out_len = uverbs_attr_get_len(attrs,
1402 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1403 int cmd_in_len = uverbs_attr_get_len(attrs,
1404 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1405 void *cmd_out;
1406 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1407 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1408 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1409 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1410 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1411 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1412 struct devx_obj *obj;
1413 u16 obj_type = 0;
1414 int err;
1415 int uid;
1416 u32 obj_id;
1417 u16 opcode;
1418
1419 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1420 return -EINVAL;
1421
1422 uid = devx_get_uid(c, cmd_in);
1423 if (uid < 0)
1424 return uid;
1425
1426 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1427 return -EINVAL;
1428
1429 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1430 if (IS_ERR(cmd_out))
1431 return PTR_ERR(cmd_out);
1432
1433 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1434 if (!obj)
1435 return -ENOMEM;
1436
1437 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1438 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1439 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1440 if (err)
1441 goto obj_free;
1442 } else {
1443 devx_set_umem_valid(cmd_in);
1444 }
1445
1446 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1447 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1448 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1449 cmd_in, cmd_in_len,
1450 cmd_out, cmd_out_len);
1451 } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
1452 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1453 obj->core_cq.comp = devx_cq_comp;
1454 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1455 cmd_in, cmd_in_len, cmd_out,
1456 cmd_out_len);
1457 } else {
1458 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1459 cmd_in_len,
1460 cmd_out, cmd_out_len);
1461 }
1462
1463 if (err)
1464 goto obj_free;
1465
1466 uobj->object = obj;
1467 INIT_LIST_HEAD(&obj->event_sub);
1468 obj->ib_dev = dev;
1469 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1470 &obj_id);
1471 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1472
1473 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1474 if (err)
1475 goto obj_destroy;
1476
1477 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1478 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1479 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1480
1481 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1482 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1483 if (err)
1484 goto obj_destroy;
1485 }
1486 return 0;
1487
1488obj_destroy:
1489 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1490 mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1491 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1492 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1493 else
1494 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1495 sizeof(out));
1496obj_free:
1497 kfree(obj);
1498 return err;
1499}
1500
1501static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1502 struct uverbs_attr_bundle *attrs)
1503{
1504 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1505 int cmd_out_len = uverbs_attr_get_len(attrs,
1506 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1507 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1508 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1509 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1510 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1511 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1512 void *cmd_out;
1513 int err;
1514 int uid;
1515
1516 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1517 return -EINVAL;
1518
1519 uid = devx_get_uid(c, cmd_in);
1520 if (uid < 0)
1521 return uid;
1522
1523 if (!devx_is_obj_modify_cmd(cmd_in))
1524 return -EINVAL;
1525
1526 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1527 return -EINVAL;
1528
1529 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1530 if (IS_ERR(cmd_out))
1531 return PTR_ERR(cmd_out);
1532
1533 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1534 devx_set_umem_valid(cmd_in);
1535
1536 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1537 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1538 cmd_out, cmd_out_len);
1539 if (err)
1540 return err;
1541
1542 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1543 cmd_out, cmd_out_len);
1544}
1545
1546static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1547 struct uverbs_attr_bundle *attrs)
1548{
1549 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1550 int cmd_out_len = uverbs_attr_get_len(attrs,
1551 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1552 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1553 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1554 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1555 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1556 void *cmd_out;
1557 int err;
1558 int uid;
1559 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1560
1561 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1562 return -EINVAL;
1563
1564 uid = devx_get_uid(c, cmd_in);
1565 if (uid < 0)
1566 return uid;
1567
1568 if (!devx_is_obj_query_cmd(cmd_in))
1569 return -EINVAL;
1570
1571 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1572 return -EINVAL;
1573
1574 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1575 if (IS_ERR(cmd_out))
1576 return PTR_ERR(cmd_out);
1577
1578 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1579 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1580 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1581 cmd_out, cmd_out_len);
1582 if (err)
1583 return err;
1584
1585 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1586 cmd_out, cmd_out_len);
1587}
1588
1589struct devx_async_event_queue {
1590 spinlock_t lock;
1591 wait_queue_head_t poll_wait;
1592 struct list_head event_list;
1593 atomic_t bytes_in_use;
1594 u8 is_destroyed:1;
1595};
1596
1597struct devx_async_cmd_event_file {
1598 struct ib_uobject uobj;
1599 struct devx_async_event_queue ev_queue;
1600 struct mlx5_async_ctx async_ctx;
1601};
1602
1603static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1604{
1605 spin_lock_init(&ev_queue->lock);
1606 INIT_LIST_HEAD(&ev_queue->event_list);
1607 init_waitqueue_head(&ev_queue->poll_wait);
1608 atomic_set(&ev_queue->bytes_in_use, 0);
1609 ev_queue->is_destroyed = 0;
1610}
1611
1612static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1613 struct uverbs_attr_bundle *attrs)
1614{
1615 struct devx_async_cmd_event_file *ev_file;
1616
1617 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1618 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1619 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1620
1621 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1622 uobj);
1623 devx_init_event_queue(&ev_file->ev_queue);
1624 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1625 return 0;
1626}
1627
1628static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1629 struct uverbs_attr_bundle *attrs)
1630{
1631 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1632 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1633 struct devx_async_event_file *ev_file;
1634 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1635 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1636 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1637 u32 flags;
1638 int err;
1639
1640 err = uverbs_get_flags32(&flags, attrs,
1641 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1642 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1643
1644 if (err)
1645 return err;
1646
1647 ev_file = container_of(uobj, struct devx_async_event_file,
1648 uobj);
1649 spin_lock_init(&ev_file->lock);
1650 INIT_LIST_HEAD(&ev_file->event_list);
1651 init_waitqueue_head(&ev_file->poll_wait);
1652 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1653 ev_file->omit_data = 1;
1654 INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1655 ev_file->dev = dev;
1656 get_device(&dev->ib_dev.dev);
1657 return 0;
1658}
1659
1660static void devx_query_callback(int status, struct mlx5_async_work *context)
1661{
1662 struct devx_async_data *async_data =
1663 container_of(context, struct devx_async_data, cb_work);
1664 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1665 struct devx_async_cmd_event_file *ev_file;
1666 struct devx_async_event_queue *ev_queue;
1667 unsigned long flags;
1668
1669 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1670 uobj);
1671 ev_queue = &ev_file->ev_queue;
1672
1673 spin_lock_irqsave(&ev_queue->lock, flags);
1674 list_add_tail(&async_data->list, &ev_queue->event_list);
1675 spin_unlock_irqrestore(&ev_queue->lock, flags);
1676
1677 wake_up_interruptible(&ev_queue->poll_wait);
1678 fput(fd_uobj->object);
1679}
1680
1681#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1682
1683static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1684 struct uverbs_attr_bundle *attrs)
1685{
1686 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1687 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1688 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1689 attrs,
1690 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1691 u16 cmd_out_len;
1692 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1693 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1694 struct ib_uobject *fd_uobj;
1695 int err;
1696 int uid;
1697 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1698 struct devx_async_cmd_event_file *ev_file;
1699 struct devx_async_data *async_data;
1700
1701 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1702 return -EINVAL;
1703
1704 uid = devx_get_uid(c, cmd_in);
1705 if (uid < 0)
1706 return uid;
1707
1708 if (!devx_is_obj_query_cmd(cmd_in))
1709 return -EINVAL;
1710
1711 err = uverbs_get_const(&cmd_out_len, attrs,
1712 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1713 if (err)
1714 return err;
1715
1716 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1717 return -EINVAL;
1718
1719 fd_uobj = uverbs_attr_get_uobject(attrs,
1720 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1721 if (IS_ERR(fd_uobj))
1722 return PTR_ERR(fd_uobj);
1723
1724 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1725 uobj);
1726
1727 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1728 MAX_ASYNC_BYTES_IN_USE) {
1729 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1730 return -EAGAIN;
1731 }
1732
1733 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1734 cmd_out_len), GFP_KERNEL);
1735 if (!async_data) {
1736 err = -ENOMEM;
1737 goto sub_bytes;
1738 }
1739
1740 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1741 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1742 if (err)
1743 goto free_async;
1744
1745 async_data->cmd_out_len = cmd_out_len;
1746 async_data->mdev = mdev;
1747 async_data->fd_uobj = fd_uobj;
1748
1749 get_file(fd_uobj->object);
1750 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1751 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1752 uverbs_attr_get_len(attrs,
1753 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1754 async_data->hdr.out_data,
1755 async_data->cmd_out_len,
1756 devx_query_callback, &async_data->cb_work);
1757
1758 if (err)
1759 goto cb_err;
1760
1761 return 0;
1762
1763cb_err:
1764 fput(fd_uobj->object);
1765free_async:
1766 kvfree(async_data);
1767sub_bytes:
1768 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1769 return err;
1770}
1771
1772static void
1773subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1774 u32 key_level1,
1775 bool is_level2,
1776 u32 key_level2)
1777{
1778 struct devx_event *event;
1779 struct devx_obj_event *xa_val_level2;
1780
1781 /* Level 1 is valid for future use, no need to free */
1782 if (!is_level2)
1783 return;
1784
1785 event = xa_load(&devx_event_table->event_xa, key_level1);
1786 WARN_ON(!event);
1787
1788 xa_val_level2 = xa_load(&event->object_ids,
1789 key_level2);
1790 if (list_empty(&xa_val_level2->obj_sub_list)) {
1791 xa_erase(&event->object_ids,
1792 key_level2);
1793 kfree_rcu(xa_val_level2, rcu);
1794 }
1795}
1796
1797static int
1798subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1799 u32 key_level1,
1800 bool is_level2,
1801 u32 key_level2)
1802{
1803 struct devx_obj_event *obj_event;
1804 struct devx_event *event;
1805 int err;
1806
1807 event = xa_load(&devx_event_table->event_xa, key_level1);
1808 if (!event) {
1809 event = kzalloc(sizeof(*event), GFP_KERNEL);
1810 if (!event)
1811 return -ENOMEM;
1812
1813 INIT_LIST_HEAD(&event->unaffiliated_list);
1814 xa_init(&event->object_ids);
1815
1816 err = xa_insert(&devx_event_table->event_xa,
1817 key_level1,
1818 event,
1819 GFP_KERNEL);
1820 if (err) {
1821 kfree(event);
1822 return err;
1823 }
1824 }
1825
1826 if (!is_level2)
1827 return 0;
1828
1829 obj_event = xa_load(&event->object_ids, key_level2);
1830 if (!obj_event) {
1831 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1832 if (!obj_event)
1833 /* Level1 is valid for future use, no need to free */
1834 return -ENOMEM;
1835
1836 err = xa_insert(&event->object_ids,
1837 key_level2,
1838 obj_event,
1839 GFP_KERNEL);
1840 if (err)
1841 return err;
1842 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1843 }
1844
1845 return 0;
1846}
1847
1848static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1849 struct devx_obj *obj)
1850{
1851 int i;
1852
1853 for (i = 0; i < num_events; i++) {
1854 if (obj) {
1855 if (!is_legacy_obj_event_num(event_type_num_list[i]))
1856 return false;
1857 } else if (!is_legacy_unaffiliated_event_num(
1858 event_type_num_list[i])) {
1859 return false;
1860 }
1861 }
1862
1863 return true;
1864}
1865
1866#define MAX_SUPP_EVENT_NUM 255
1867static bool is_valid_events(struct mlx5_core_dev *dev,
1868 int num_events, u16 *event_type_num_list,
1869 struct devx_obj *obj)
1870{
1871 __be64 *aff_events;
1872 __be64 *unaff_events;
1873 int mask_entry;
1874 int mask_bit;
1875 int i;
1876
1877 if (MLX5_CAP_GEN(dev, event_cap)) {
1878 aff_events = MLX5_CAP_DEV_EVENT(dev,
1879 user_affiliated_events);
1880 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1881 user_unaffiliated_events);
1882 } else {
1883 return is_valid_events_legacy(num_events, event_type_num_list,
1884 obj);
1885 }
1886
1887 for (i = 0; i < num_events; i++) {
1888 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1889 return false;
1890
1891 mask_entry = event_type_num_list[i] / 64;
1892 mask_bit = event_type_num_list[i] % 64;
1893
1894 if (obj) {
1895 /* CQ completion */
1896 if (event_type_num_list[i] == 0)
1897 continue;
1898
1899 if (!(be64_to_cpu(aff_events[mask_entry]) &
1900 (1ull << mask_bit)))
1901 return false;
1902
1903 continue;
1904 }
1905
1906 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1907 (1ull << mask_bit)))
1908 return false;
1909 }
1910
1911 return true;
1912}
1913
1914#define MAX_NUM_EVENTS 16
1915static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1916 struct uverbs_attr_bundle *attrs)
1917{
1918 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1919 attrs,
1920 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1921 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1922 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1923 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1924 struct ib_uobject *fd_uobj;
1925 struct devx_obj *obj = NULL;
1926 struct devx_async_event_file *ev_file;
1927 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1928 u16 *event_type_num_list;
1929 struct devx_event_subscription *event_sub, *tmp_sub;
1930 struct list_head sub_list;
1931 int redirect_fd;
1932 bool use_eventfd = false;
1933 int num_events;
1934 int num_alloc_xa_entries = 0;
1935 u16 obj_type = 0;
1936 u64 cookie = 0;
1937 u32 obj_id = 0;
1938 int err;
1939 int i;
1940
1941 if (!c->devx_uid)
1942 return -EINVAL;
1943
1944 if (!IS_ERR(devx_uobj)) {
1945 obj = (struct devx_obj *)devx_uobj->object;
1946 if (obj)
1947 obj_id = get_dec_obj_id(obj->obj_id);
1948 }
1949
1950 fd_uobj = uverbs_attr_get_uobject(attrs,
1951 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
1952 if (IS_ERR(fd_uobj))
1953 return PTR_ERR(fd_uobj);
1954
1955 ev_file = container_of(fd_uobj, struct devx_async_event_file,
1956 uobj);
1957
1958 if (uverbs_attr_is_valid(attrs,
1959 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
1960 err = uverbs_copy_from(&redirect_fd, attrs,
1961 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
1962 if (err)
1963 return err;
1964
1965 use_eventfd = true;
1966 }
1967
1968 if (uverbs_attr_is_valid(attrs,
1969 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
1970 if (use_eventfd)
1971 return -EINVAL;
1972
1973 err = uverbs_copy_from(&cookie, attrs,
1974 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
1975 if (err)
1976 return err;
1977 }
1978
1979 num_events = uverbs_attr_ptr_get_array_size(
1980 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
1981 sizeof(u16));
1982
1983 if (num_events < 0)
1984 return num_events;
1985
1986 if (num_events > MAX_NUM_EVENTS)
1987 return -EINVAL;
1988
1989 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
1990 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
1991
1992 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
1993 return -EINVAL;
1994
1995 INIT_LIST_HEAD(&sub_list);
1996
1997 /* Protect from concurrent subscriptions to same XA entries to allow
1998 * both to succeed
1999 */
2000 mutex_lock(&devx_event_table->event_xa_lock);
2001 for (i = 0; i < num_events; i++) {
2002 u32 key_level1;
2003
2004 if (obj)
2005 obj_type = get_dec_obj_type(obj,
2006 event_type_num_list[i]);
2007 key_level1 = event_type_num_list[i] | obj_type << 16;
2008
2009 err = subscribe_event_xa_alloc(devx_event_table,
2010 key_level1,
2011 obj,
2012 obj_id);
2013 if (err)
2014 goto err;
2015
2016 num_alloc_xa_entries++;
2017 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2018 if (!event_sub)
2019 goto err;
2020
2021 list_add_tail(&event_sub->event_list, &sub_list);
2022 if (use_eventfd) {
2023 event_sub->eventfd =
2024 eventfd_ctx_fdget(redirect_fd);
2025
2026 if (IS_ERR(event_sub->eventfd)) {
2027 err = PTR_ERR(event_sub->eventfd);
2028 event_sub->eventfd = NULL;
2029 goto err;
2030 }
2031 }
2032
2033 event_sub->cookie = cookie;
2034 event_sub->ev_file = ev_file;
2035 event_sub->filp = fd_uobj->object;
2036 /* May be needed upon cleanup the devx object/subscription */
2037 event_sub->xa_key_level1 = key_level1;
2038 event_sub->xa_key_level2 = obj_id;
2039 INIT_LIST_HEAD(&event_sub->obj_list);
2040 }
2041
2042 /* Once all the allocations and the XA data insertions were done we
2043 * can go ahead and add all the subscriptions to the relevant lists
2044 * without concern of a failure.
2045 */
2046 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2047 struct devx_event *event;
2048 struct devx_obj_event *obj_event;
2049
2050 list_del_init(&event_sub->event_list);
2051
2052 spin_lock_irq(&ev_file->lock);
2053 list_add_tail_rcu(&event_sub->file_list,
2054 &ev_file->subscribed_events_list);
2055 spin_unlock_irq(&ev_file->lock);
2056
2057 event = xa_load(&devx_event_table->event_xa,
2058 event_sub->xa_key_level1);
2059 WARN_ON(!event);
2060
2061 if (!obj) {
2062 list_add_tail_rcu(&event_sub->xa_list,
2063 &event->unaffiliated_list);
2064 continue;
2065 }
2066
2067 obj_event = xa_load(&event->object_ids, obj_id);
2068 WARN_ON(!obj_event);
2069 list_add_tail_rcu(&event_sub->xa_list,
2070 &obj_event->obj_sub_list);
2071 list_add_tail_rcu(&event_sub->obj_list,
2072 &obj->event_sub);
2073 }
2074
2075 mutex_unlock(&devx_event_table->event_xa_lock);
2076 return 0;
2077
2078err:
2079 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2080 list_del(&event_sub->event_list);
2081
2082 subscribe_event_xa_dealloc(devx_event_table,
2083 event_sub->xa_key_level1,
2084 obj,
2085 obj_id);
2086
2087 if (event_sub->eventfd)
2088 eventfd_ctx_put(event_sub->eventfd);
2089
2090 kfree(event_sub);
2091 }
2092
2093 mutex_unlock(&devx_event_table->event_xa_lock);
2094 return err;
2095}
2096
2097static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2098 struct uverbs_attr_bundle *attrs,
2099 struct devx_umem *obj)
2100{
2101 u64 addr;
2102 size_t size;
2103 u32 access;
2104 int npages;
2105 int err;
2106 u32 page_mask;
2107
2108 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2109 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2110 return -EFAULT;
2111
2112 err = uverbs_get_flags32(&access, attrs,
2113 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2114 IB_ACCESS_LOCAL_WRITE |
2115 IB_ACCESS_REMOTE_WRITE |
2116 IB_ACCESS_REMOTE_READ);
2117 if (err)
2118 return err;
2119
2120 err = ib_check_mr_access(access);
2121 if (err)
2122 return err;
2123
2124 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
2125 if (IS_ERR(obj->umem))
2126 return PTR_ERR(obj->umem);
2127
2128 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
2129 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
2130 &obj->page_shift, &obj->ncont, NULL);
2131
2132 if (!npages) {
2133 ib_umem_release(obj->umem);
2134 return -EINVAL;
2135 }
2136
2137 page_mask = (1 << obj->page_shift) - 1;
2138 obj->page_offset = obj->umem->address & page_mask;
2139
2140 return 0;
2141}
2142
2143static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
2144 struct devx_umem *obj,
2145 struct devx_umem_reg_cmd *cmd)
2146{
2147 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2148 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
2149 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2150 return PTR_ERR_OR_ZERO(cmd->in);
2151}
2152
2153static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
2154 struct devx_umem *obj,
2155 struct devx_umem_reg_cmd *cmd)
2156{
2157 void *umem;
2158 __be64 *mtt;
2159
2160 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2161 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2162
2163 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2164 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
2165 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
2166 MLX5_ADAPTER_PAGE_SHIFT);
2167 MLX5_SET(umem, umem, page_offset, obj->page_offset);
2168 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
2169 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2170 MLX5_IB_MTT_READ);
2171}
2172
2173static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2174 struct uverbs_attr_bundle *attrs)
2175{
2176 struct devx_umem_reg_cmd cmd;
2177 struct devx_umem *obj;
2178 struct ib_uobject *uobj = uverbs_attr_get_uobject(
2179 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2180 u32 obj_id;
2181 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2182 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2183 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2184 int err;
2185
2186 if (!c->devx_uid)
2187 return -EINVAL;
2188
2189 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2190 if (!obj)
2191 return -ENOMEM;
2192
2193 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2194 if (err)
2195 goto err_obj_free;
2196
2197 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
2198 if (err)
2199 goto err_umem_release;
2200
2201 devx_umem_reg_cmd_build(dev, obj, &cmd);
2202
2203 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2204 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2205 sizeof(cmd.out));
2206 if (err)
2207 goto err_umem_release;
2208
2209 obj->mdev = dev->mdev;
2210 uobj->object = obj;
2211 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2212 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
2213 if (err)
2214 goto err_umem_destroy;
2215
2216 return 0;
2217
2218err_umem_destroy:
2219 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
2220err_umem_release:
2221 ib_umem_release(obj->umem);
2222err_obj_free:
2223 kfree(obj);
2224 return err;
2225}
2226
2227static int devx_umem_cleanup(struct ib_uobject *uobject,
2228 enum rdma_remove_reason why,
2229 struct uverbs_attr_bundle *attrs)
2230{
2231 struct devx_umem *obj = uobject->object;
2232 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2233 int err;
2234
2235 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2236 if (ib_is_destroy_retryable(err, why, uobject))
2237 return err;
2238
2239 ib_umem_release(obj->umem);
2240 kfree(obj);
2241 return 0;
2242}
2243
2244static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2245 unsigned long event_type)
2246{
2247 __be64 *unaff_events;
2248 int mask_entry;
2249 int mask_bit;
2250
2251 if (!MLX5_CAP_GEN(dev, event_cap))
2252 return is_legacy_unaffiliated_event_num(event_type);
2253
2254 unaff_events = MLX5_CAP_DEV_EVENT(dev,
2255 user_unaffiliated_events);
2256 WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2257
2258 mask_entry = event_type / 64;
2259 mask_bit = event_type % 64;
2260
2261 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2262 return false;
2263
2264 return true;
2265}
2266
2267static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2268{
2269 struct mlx5_eqe *eqe = data;
2270 u32 obj_id = 0;
2271
2272 switch (event_type) {
2273 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2274 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2275 case MLX5_EVENT_TYPE_PATH_MIG:
2276 case MLX5_EVENT_TYPE_COMM_EST:
2277 case MLX5_EVENT_TYPE_SQ_DRAINED:
2278 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2279 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2280 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2281 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2282 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2283 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2284 break;
2285 case MLX5_EVENT_TYPE_XRQ_ERROR:
2286 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2287 break;
2288 case MLX5_EVENT_TYPE_DCT_DRAINED:
2289 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2290 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2291 break;
2292 case MLX5_EVENT_TYPE_CQ_ERROR:
2293 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2294 break;
2295 default:
2296 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2297 break;
2298 }
2299
2300 return obj_id;
2301}
2302
2303static int deliver_event(struct devx_event_subscription *event_sub,
2304 const void *data)
2305{
2306 struct devx_async_event_file *ev_file;
2307 struct devx_async_event_data *event_data;
2308 unsigned long flags;
2309
2310 ev_file = event_sub->ev_file;
2311
2312 if (ev_file->omit_data) {
2313 spin_lock_irqsave(&ev_file->lock, flags);
2314 if (!list_empty(&event_sub->event_list)) {
2315 spin_unlock_irqrestore(&ev_file->lock, flags);
2316 return 0;
2317 }
2318
2319 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2320 spin_unlock_irqrestore(&ev_file->lock, flags);
2321 wake_up_interruptible(&ev_file->poll_wait);
2322 return 0;
2323 }
2324
2325 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2326 GFP_ATOMIC);
2327 if (!event_data) {
2328 spin_lock_irqsave(&ev_file->lock, flags);
2329 ev_file->is_overflow_err = 1;
2330 spin_unlock_irqrestore(&ev_file->lock, flags);
2331 return -ENOMEM;
2332 }
2333
2334 event_data->hdr.cookie = event_sub->cookie;
2335 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2336
2337 spin_lock_irqsave(&ev_file->lock, flags);
2338 list_add_tail(&event_data->list, &ev_file->event_list);
2339 spin_unlock_irqrestore(&ev_file->lock, flags);
2340 wake_up_interruptible(&ev_file->poll_wait);
2341
2342 return 0;
2343}
2344
2345static void dispatch_event_fd(struct list_head *fd_list,
2346 const void *data)
2347{
2348 struct devx_event_subscription *item;
2349
2350 list_for_each_entry_rcu(item, fd_list, xa_list) {
2351 if (!get_file_rcu(item->filp))
2352 continue;
2353
2354 if (item->eventfd) {
2355 eventfd_signal(item->eventfd, 1);
2356 fput(item->filp);
2357 continue;
2358 }
2359
2360 deliver_event(item, data);
2361 fput(item->filp);
2362 }
2363}
2364
2365static int devx_event_notifier(struct notifier_block *nb,
2366 unsigned long event_type, void *data)
2367{
2368 struct mlx5_devx_event_table *table;
2369 struct mlx5_ib_dev *dev;
2370 struct devx_event *event;
2371 struct devx_obj_event *obj_event;
2372 u16 obj_type = 0;
2373 bool is_unaffiliated;
2374 u32 obj_id;
2375
2376 /* Explicit filtering to kernel events which may occur frequently */
2377 if (event_type == MLX5_EVENT_TYPE_CMD ||
2378 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2379 return NOTIFY_OK;
2380
2381 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2382 dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2383 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2384
2385 if (!is_unaffiliated)
2386 obj_type = get_event_obj_type(event_type, data);
2387
2388 rcu_read_lock();
2389 event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2390 if (!event) {
2391 rcu_read_unlock();
2392 return NOTIFY_DONE;
2393 }
2394
2395 if (is_unaffiliated) {
2396 dispatch_event_fd(&event->unaffiliated_list, data);
2397 rcu_read_unlock();
2398 return NOTIFY_OK;
2399 }
2400
2401 obj_id = devx_get_obj_id_from_event(event_type, data);
2402 obj_event = xa_load(&event->object_ids, obj_id);
2403 if (!obj_event) {
2404 rcu_read_unlock();
2405 return NOTIFY_DONE;
2406 }
2407
2408 dispatch_event_fd(&obj_event->obj_sub_list, data);
2409
2410 rcu_read_unlock();
2411 return NOTIFY_OK;
2412}
2413
2414void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
2415{
2416 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2417
2418 xa_init(&table->event_xa);
2419 mutex_init(&table->event_xa_lock);
2420 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2421 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2422}
2423
2424void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
2425{
2426 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2427 struct devx_event_subscription *sub, *tmp;
2428 struct devx_event *event;
2429 void *entry;
2430 unsigned long id;
2431
2432 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2433 mutex_lock(&dev->devx_event_table.event_xa_lock);
2434 xa_for_each(&table->event_xa, id, entry) {
2435 event = entry;
2436 list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
2437 xa_list)
2438 devx_cleanup_subscription(dev, sub);
2439 kfree(entry);
2440 }
2441 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2442 xa_destroy(&table->event_xa);
2443}
2444
2445static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2446 size_t count, loff_t *pos)
2447{
2448 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2449 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2450 struct devx_async_data *event;
2451 int ret = 0;
2452 size_t eventsz;
2453
2454 spin_lock_irq(&ev_queue->lock);
2455
2456 while (list_empty(&ev_queue->event_list)) {
2457 spin_unlock_irq(&ev_queue->lock);
2458
2459 if (filp->f_flags & O_NONBLOCK)
2460 return -EAGAIN;
2461
2462 if (wait_event_interruptible(
2463 ev_queue->poll_wait,
2464 (!list_empty(&ev_queue->event_list) ||
2465 ev_queue->is_destroyed))) {
2466 return -ERESTARTSYS;
2467 }
2468
2469 if (list_empty(&ev_queue->event_list) &&
2470 ev_queue->is_destroyed)
2471 return -EIO;
2472
2473 spin_lock_irq(&ev_queue->lock);
2474 }
2475
2476 event = list_entry(ev_queue->event_list.next,
2477 struct devx_async_data, list);
2478 eventsz = event->cmd_out_len +
2479 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2480
2481 if (eventsz > count) {
2482 spin_unlock_irq(&ev_queue->lock);
2483 return -ENOSPC;
2484 }
2485
2486 list_del(ev_queue->event_list.next);
2487 spin_unlock_irq(&ev_queue->lock);
2488
2489 if (copy_to_user(buf, &event->hdr, eventsz))
2490 ret = -EFAULT;
2491 else
2492 ret = eventsz;
2493
2494 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2495 kvfree(event);
2496 return ret;
2497}
2498
2499static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
2500{
2501 struct ib_uobject *uobj = filp->private_data;
2502 struct devx_async_cmd_event_file *comp_ev_file = container_of(
2503 uobj, struct devx_async_cmd_event_file, uobj);
2504 struct devx_async_data *entry, *tmp;
2505
2506 spin_lock_irq(&comp_ev_file->ev_queue.lock);
2507 list_for_each_entry_safe(entry, tmp,
2508 &comp_ev_file->ev_queue.event_list, list)
2509 kvfree(entry);
2510 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2511
2512 uverbs_close_fd(filp);
2513 return 0;
2514}
2515
2516static __poll_t devx_async_cmd_event_poll(struct file *filp,
2517 struct poll_table_struct *wait)
2518{
2519 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2520 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2521 __poll_t pollflags = 0;
2522
2523 poll_wait(filp, &ev_queue->poll_wait, wait);
2524
2525 spin_lock_irq(&ev_queue->lock);
2526 if (ev_queue->is_destroyed)
2527 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2528 else if (!list_empty(&ev_queue->event_list))
2529 pollflags = EPOLLIN | EPOLLRDNORM;
2530 spin_unlock_irq(&ev_queue->lock);
2531
2532 return pollflags;
2533}
2534
2535static const struct file_operations devx_async_cmd_event_fops = {
2536 .owner = THIS_MODULE,
2537 .read = devx_async_cmd_event_read,
2538 .poll = devx_async_cmd_event_poll,
2539 .release = devx_async_cmd_event_close,
2540 .llseek = no_llseek,
2541};
2542
2543static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2544 size_t count, loff_t *pos)
2545{
2546 struct devx_async_event_file *ev_file = filp->private_data;
2547 struct devx_event_subscription *event_sub;
2548 struct devx_async_event_data *uninitialized_var(event);
2549 int ret = 0;
2550 size_t eventsz;
2551 bool omit_data;
2552 void *event_data;
2553
2554 omit_data = ev_file->omit_data;
2555
2556 spin_lock_irq(&ev_file->lock);
2557
2558 if (ev_file->is_overflow_err) {
2559 ev_file->is_overflow_err = 0;
2560 spin_unlock_irq(&ev_file->lock);
2561 return -EOVERFLOW;
2562 }
2563
2564 if (ev_file->is_destroyed) {
2565 spin_unlock_irq(&ev_file->lock);
2566 return -EIO;
2567 }
2568
2569 while (list_empty(&ev_file->event_list)) {
2570 spin_unlock_irq(&ev_file->lock);
2571
2572 if (filp->f_flags & O_NONBLOCK)
2573 return -EAGAIN;
2574
2575 if (wait_event_interruptible(ev_file->poll_wait,
2576 (!list_empty(&ev_file->event_list) ||
2577 ev_file->is_destroyed))) {
2578 return -ERESTARTSYS;
2579 }
2580
2581 spin_lock_irq(&ev_file->lock);
2582 if (ev_file->is_destroyed) {
2583 spin_unlock_irq(&ev_file->lock);
2584 return -EIO;
2585 }
2586 }
2587
2588 if (omit_data) {
2589 event_sub = list_first_entry(&ev_file->event_list,
2590 struct devx_event_subscription,
2591 event_list);
2592 eventsz = sizeof(event_sub->cookie);
2593 event_data = &event_sub->cookie;
2594 } else {
2595 event = list_first_entry(&ev_file->event_list,
2596 struct devx_async_event_data, list);
2597 eventsz = sizeof(struct mlx5_eqe) +
2598 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2599 event_data = &event->hdr;
2600 }
2601
2602 if (eventsz > count) {
2603 spin_unlock_irq(&ev_file->lock);
2604 return -EINVAL;
2605 }
2606
2607 if (omit_data)
2608 list_del_init(&event_sub->event_list);
2609 else
2610 list_del(&event->list);
2611
2612 spin_unlock_irq(&ev_file->lock);
2613
2614 if (copy_to_user(buf, event_data, eventsz))
2615 /* This points to an application issue, not a kernel concern */
2616 ret = -EFAULT;
2617 else
2618 ret = eventsz;
2619
2620 if (!omit_data)
2621 kfree(event);
2622 return ret;
2623}
2624
2625static __poll_t devx_async_event_poll(struct file *filp,
2626 struct poll_table_struct *wait)
2627{
2628 struct devx_async_event_file *ev_file = filp->private_data;
2629 __poll_t pollflags = 0;
2630
2631 poll_wait(filp, &ev_file->poll_wait, wait);
2632
2633 spin_lock_irq(&ev_file->lock);
2634 if (ev_file->is_destroyed)
2635 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2636 else if (!list_empty(&ev_file->event_list))
2637 pollflags = EPOLLIN | EPOLLRDNORM;
2638 spin_unlock_irq(&ev_file->lock);
2639
2640 return pollflags;
2641}
2642
2643static int devx_async_event_close(struct inode *inode, struct file *filp)
2644{
2645 struct devx_async_event_file *ev_file = filp->private_data;
2646 struct devx_event_subscription *event_sub, *event_sub_tmp;
2647 struct devx_async_event_data *entry, *tmp;
2648 struct mlx5_ib_dev *dev = ev_file->dev;
2649
2650 mutex_lock(&dev->devx_event_table.event_xa_lock);
2651 /* delete the subscriptions which are related to this FD */
2652 list_for_each_entry_safe(event_sub, event_sub_tmp,
2653 &ev_file->subscribed_events_list, file_list) {
2654 devx_cleanup_subscription(dev, event_sub);
2655 if (event_sub->eventfd)
2656 eventfd_ctx_put(event_sub->eventfd);
2657
2658 list_del_rcu(&event_sub->file_list);
2659 /* subscription may not be used by the read API any more */
2660 kfree_rcu(event_sub, rcu);
2661 }
2662
2663 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2664
2665 /* free the pending events allocation */
2666 if (!ev_file->omit_data) {
2667 spin_lock_irq(&ev_file->lock);
2668 list_for_each_entry_safe(entry, tmp,
2669 &ev_file->event_list, list)
2670 kfree(entry); /* read can't come any more */
2671 spin_unlock_irq(&ev_file->lock);
2672 }
2673
2674 uverbs_close_fd(filp);
2675 put_device(&dev->ib_dev.dev);
2676 return 0;
2677}
2678
2679static const struct file_operations devx_async_event_fops = {
2680 .owner = THIS_MODULE,
2681 .read = devx_async_event_read,
2682 .poll = devx_async_event_poll,
2683 .release = devx_async_event_close,
2684 .llseek = no_llseek,
2685};
2686
2687static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
2688 enum rdma_remove_reason why)
2689{
2690 struct devx_async_cmd_event_file *comp_ev_file =
2691 container_of(uobj, struct devx_async_cmd_event_file,
2692 uobj);
2693 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2694
2695 spin_lock_irq(&ev_queue->lock);
2696 ev_queue->is_destroyed = 1;
2697 spin_unlock_irq(&ev_queue->lock);
2698
2699 if (why == RDMA_REMOVE_DRIVER_REMOVE)
2700 wake_up_interruptible(&ev_queue->poll_wait);
2701
2702 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2703 return 0;
2704};
2705
2706static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
2707 enum rdma_remove_reason why)
2708{
2709 struct devx_async_event_file *ev_file =
2710 container_of(uobj, struct devx_async_event_file,
2711 uobj);
2712
2713 spin_lock_irq(&ev_file->lock);
2714 ev_file->is_destroyed = 1;
2715 spin_unlock_irq(&ev_file->lock);
2716
2717 wake_up_interruptible(&ev_file->poll_wait);
2718 return 0;
2719};
2720
2721DECLARE_UVERBS_NAMED_METHOD(
2722 MLX5_IB_METHOD_DEVX_UMEM_REG,
2723 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2724 MLX5_IB_OBJECT_DEVX_UMEM,
2725 UVERBS_ACCESS_NEW,
2726 UA_MANDATORY),
2727 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2728 UVERBS_ATTR_TYPE(u64),
2729 UA_MANDATORY),
2730 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2731 UVERBS_ATTR_TYPE(u64),
2732 UA_MANDATORY),
2733 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2734 enum ib_access_flags),
2735 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2736 UVERBS_ATTR_TYPE(u32),
2737 UA_MANDATORY));
2738
2739DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2740 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2741 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2742 MLX5_IB_OBJECT_DEVX_UMEM,
2743 UVERBS_ACCESS_DESTROY,
2744 UA_MANDATORY));
2745
2746DECLARE_UVERBS_NAMED_METHOD(
2747 MLX5_IB_METHOD_DEVX_QUERY_EQN,
2748 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2749 UVERBS_ATTR_TYPE(u32),
2750 UA_MANDATORY),
2751 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2752 UVERBS_ATTR_TYPE(u32),
2753 UA_MANDATORY));
2754
2755DECLARE_UVERBS_NAMED_METHOD(
2756 MLX5_IB_METHOD_DEVX_QUERY_UAR,
2757 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2758 UVERBS_ATTR_TYPE(u32),
2759 UA_MANDATORY),
2760 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2761 UVERBS_ATTR_TYPE(u32),
2762 UA_MANDATORY));
2763
2764DECLARE_UVERBS_NAMED_METHOD(
2765 MLX5_IB_METHOD_DEVX_OTHER,
2766 UVERBS_ATTR_PTR_IN(
2767 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2768 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2769 UA_MANDATORY,
2770 UA_ALLOC_AND_COPY),
2771 UVERBS_ATTR_PTR_OUT(
2772 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2773 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2774 UA_MANDATORY));
2775
2776DECLARE_UVERBS_NAMED_METHOD(
2777 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2778 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2779 MLX5_IB_OBJECT_DEVX_OBJ,
2780 UVERBS_ACCESS_NEW,
2781 UA_MANDATORY),
2782 UVERBS_ATTR_PTR_IN(
2783 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2784 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2785 UA_MANDATORY,
2786 UA_ALLOC_AND_COPY),
2787 UVERBS_ATTR_PTR_OUT(
2788 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2789 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2790 UA_MANDATORY));
2791
2792DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2793 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2794 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2795 MLX5_IB_OBJECT_DEVX_OBJ,
2796 UVERBS_ACCESS_DESTROY,
2797 UA_MANDATORY));
2798
2799DECLARE_UVERBS_NAMED_METHOD(
2800 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2801 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2802 UVERBS_IDR_ANY_OBJECT,
2803 UVERBS_ACCESS_WRITE,
2804 UA_MANDATORY),
2805 UVERBS_ATTR_PTR_IN(
2806 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2807 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2808 UA_MANDATORY,
2809 UA_ALLOC_AND_COPY),
2810 UVERBS_ATTR_PTR_OUT(
2811 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2812 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2813 UA_MANDATORY));
2814
2815DECLARE_UVERBS_NAMED_METHOD(
2816 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2817 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2818 UVERBS_IDR_ANY_OBJECT,
2819 UVERBS_ACCESS_READ,
2820 UA_MANDATORY),
2821 UVERBS_ATTR_PTR_IN(
2822 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2823 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2824 UA_MANDATORY,
2825 UA_ALLOC_AND_COPY),
2826 UVERBS_ATTR_PTR_OUT(
2827 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2828 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2829 UA_MANDATORY));
2830
2831DECLARE_UVERBS_NAMED_METHOD(
2832 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2833 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2834 UVERBS_IDR_ANY_OBJECT,
2835 UVERBS_ACCESS_READ,
2836 UA_MANDATORY),
2837 UVERBS_ATTR_PTR_IN(
2838 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2839 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2840 UA_MANDATORY,
2841 UA_ALLOC_AND_COPY),
2842 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2843 u16, UA_MANDATORY),
2844 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2845 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2846 UVERBS_ACCESS_READ,
2847 UA_MANDATORY),
2848 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2849 UVERBS_ATTR_TYPE(u64),
2850 UA_MANDATORY));
2851
2852DECLARE_UVERBS_NAMED_METHOD(
2853 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2854 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2855 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2856 UVERBS_ACCESS_READ,
2857 UA_MANDATORY),
2858 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2859 MLX5_IB_OBJECT_DEVX_OBJ,
2860 UVERBS_ACCESS_READ,
2861 UA_OPTIONAL),
2862 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2863 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2864 UA_MANDATORY,
2865 UA_ALLOC_AND_COPY),
2866 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2867 UVERBS_ATTR_TYPE(u64),
2868 UA_OPTIONAL),
2869 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2870 UVERBS_ATTR_TYPE(u32),
2871 UA_OPTIONAL));
2872
2873DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2874 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2875 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2876 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2877 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2878
2879DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2880 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2881 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2882 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2883 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2884 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2885 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2886
2887DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2888 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2889 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2890 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2891
2892
2893DECLARE_UVERBS_NAMED_METHOD(
2894 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2895 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2896 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2897 UVERBS_ACCESS_NEW,
2898 UA_MANDATORY));
2899
2900DECLARE_UVERBS_NAMED_OBJECT(
2901 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2902 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
2903 devx_hot_unplug_async_cmd_event_file,
2904 &devx_async_cmd_event_fops, "[devx_async_cmd]",
2905 O_RDONLY),
2906 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
2907
2908DECLARE_UVERBS_NAMED_METHOD(
2909 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
2910 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
2911 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2912 UVERBS_ACCESS_NEW,
2913 UA_MANDATORY),
2914 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
2915 enum mlx5_ib_uapi_devx_create_event_channel_flags,
2916 UA_MANDATORY));
2917
2918DECLARE_UVERBS_NAMED_OBJECT(
2919 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2920 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
2921 devx_hot_unplug_async_event_file,
2922 &devx_async_event_fops, "[devx_async_event]",
2923 O_RDONLY),
2924 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
2925
2926static bool devx_is_supported(struct ib_device *device)
2927{
2928 struct mlx5_ib_dev *dev = to_mdev(device);
2929
2930 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
2931}
2932
2933const struct uapi_definition mlx5_ib_devx_defs[] = {
2934 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2935 MLX5_IB_OBJECT_DEVX,
2936 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2937 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2938 MLX5_IB_OBJECT_DEVX_OBJ,
2939 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2940 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2941 MLX5_IB_OBJECT_DEVX_UMEM,
2942 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2943 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2944 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2945 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2946 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2947 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2948 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2949 {},
2950};