Loading...
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 */
6
7#include <linux/debugfs.h>
8#include <linux/highmem.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/pci.h>
13#include <linux/dma-mapping.h>
14#include <linux/slab.h>
15#include <linux/bitmap.h>
16#include <linux/sched.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/task.h>
19#include <linux/delay.h>
20#include <rdma/ib_user_verbs.h>
21#include <rdma/ib_addr.h>
22#include <rdma/ib_cache.h>
23#include <linux/mlx5/port.h>
24#include <linux/mlx5/vport.h>
25#include <linux/mlx5/fs.h>
26#include <linux/mlx5/eswitch.h>
27#include <linux/mlx5/driver.h>
28#include <linux/list.h>
29#include <rdma/ib_smi.h>
30#include <rdma/ib_umem_odp.h>
31#include <rdma/lag.h>
32#include <linux/in.h>
33#include <linux/etherdevice.h>
34#include "mlx5_ib.h"
35#include "ib_rep.h"
36#include "cmd.h"
37#include "devx.h"
38#include "dm.h"
39#include "fs.h"
40#include "srq.h"
41#include "qp.h"
42#include "wr.h"
43#include "restrack.h"
44#include "counters.h"
45#include "umr.h"
46#include <rdma/uverbs_std_types.h>
47#include <rdma/uverbs_ioctl.h>
48#include <rdma/mlx5_user_ioctl_verbs.h>
49#include <rdma/mlx5_user_ioctl_cmds.h>
50#include "macsec.h"
51
52#define UVERBS_MODULE_NAME mlx5_ib
53#include <rdma/uverbs_named_ioctl.h>
54
55MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
56MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
57MODULE_LICENSE("Dual BSD/GPL");
58
59struct mlx5_ib_event_work {
60 struct work_struct work;
61 union {
62 struct mlx5_ib_dev *dev;
63 struct mlx5_ib_multiport_info *mpi;
64 };
65 bool is_slave;
66 unsigned int event;
67 void *param;
68};
69
70enum {
71 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
72};
73
74static struct workqueue_struct *mlx5_ib_event_wq;
75static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
76static LIST_HEAD(mlx5_ib_dev_list);
77/*
78 * This mutex should be held when accessing either of the above lists
79 */
80static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
81
82struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
83{
84 struct mlx5_ib_dev *dev;
85
86 mutex_lock(&mlx5_ib_multiport_mutex);
87 dev = mpi->ibdev;
88 mutex_unlock(&mlx5_ib_multiport_mutex);
89 return dev;
90}
91
92static enum rdma_link_layer
93mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
94{
95 switch (port_type_cap) {
96 case MLX5_CAP_PORT_TYPE_IB:
97 return IB_LINK_LAYER_INFINIBAND;
98 case MLX5_CAP_PORT_TYPE_ETH:
99 return IB_LINK_LAYER_ETHERNET;
100 default:
101 return IB_LINK_LAYER_UNSPECIFIED;
102 }
103}
104
105static enum rdma_link_layer
106mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
107{
108 struct mlx5_ib_dev *dev = to_mdev(device);
109 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
110
111 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
112}
113
114static int get_port_state(struct ib_device *ibdev,
115 u32 port_num,
116 enum ib_port_state *state)
117{
118 struct ib_port_attr attr;
119 int ret;
120
121 memset(&attr, 0, sizeof(attr));
122 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
123 if (!ret)
124 *state = attr.state;
125 return ret;
126}
127
128static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
129 struct net_device *ndev,
130 struct net_device *upper,
131 u32 *port_num)
132{
133 struct net_device *rep_ndev;
134 struct mlx5_ib_port *port;
135 int i;
136
137 for (i = 0; i < dev->num_ports; i++) {
138 port = &dev->port[i];
139 if (!port->rep)
140 continue;
141
142 if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) {
143 *port_num = i + 1;
144 return &port->roce;
145 }
146
147 if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
148 continue;
149
150 read_lock(&port->roce.netdev_lock);
151 rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw,
152 port->rep->vport);
153 if (rep_ndev == ndev) {
154 read_unlock(&port->roce.netdev_lock);
155 *port_num = i + 1;
156 return &port->roce;
157 }
158 read_unlock(&port->roce.netdev_lock);
159 }
160
161 return NULL;
162}
163
164static int mlx5_netdev_event(struct notifier_block *this,
165 unsigned long event, void *ptr)
166{
167 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
168 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
169 u32 port_num = roce->native_port_num;
170 struct mlx5_core_dev *mdev;
171 struct mlx5_ib_dev *ibdev;
172
173 ibdev = roce->dev;
174 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
175 if (!mdev)
176 return NOTIFY_DONE;
177
178 switch (event) {
179 case NETDEV_REGISTER:
180 /* Should already be registered during the load */
181 if (ibdev->is_rep)
182 break;
183 write_lock(&roce->netdev_lock);
184 if (ndev->dev.parent == mdev->device)
185 roce->netdev = ndev;
186 write_unlock(&roce->netdev_lock);
187 break;
188
189 case NETDEV_UNREGISTER:
190 /* In case of reps, ib device goes away before the netdevs */
191 write_lock(&roce->netdev_lock);
192 if (roce->netdev == ndev)
193 roce->netdev = NULL;
194 write_unlock(&roce->netdev_lock);
195 break;
196
197 case NETDEV_CHANGE:
198 case NETDEV_UP:
199 case NETDEV_DOWN: {
200 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
201 struct net_device *upper = NULL;
202
203 if (lag_ndev) {
204 upper = netdev_master_upper_dev_get(lag_ndev);
205 dev_put(lag_ndev);
206 }
207
208 if (ibdev->is_rep)
209 roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
210 if (!roce)
211 return NOTIFY_DONE;
212 if ((upper == ndev ||
213 ((!upper || ibdev->is_rep) && ndev == roce->netdev)) &&
214 ibdev->ib_active) {
215 struct ib_event ibev = { };
216 enum ib_port_state port_state;
217
218 if (get_port_state(&ibdev->ib_dev, port_num,
219 &port_state))
220 goto done;
221
222 if (roce->last_port_state == port_state)
223 goto done;
224
225 roce->last_port_state = port_state;
226 ibev.device = &ibdev->ib_dev;
227 if (port_state == IB_PORT_DOWN)
228 ibev.event = IB_EVENT_PORT_ERR;
229 else if (port_state == IB_PORT_ACTIVE)
230 ibev.event = IB_EVENT_PORT_ACTIVE;
231 else
232 goto done;
233
234 ibev.element.port_num = port_num;
235 ib_dispatch_event(&ibev);
236 }
237 break;
238 }
239
240 default:
241 break;
242 }
243done:
244 mlx5_ib_put_native_port_mdev(ibdev, port_num);
245 return NOTIFY_DONE;
246}
247
248static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
249 u32 port_num)
250{
251 struct mlx5_ib_dev *ibdev = to_mdev(device);
252 struct net_device *ndev;
253 struct mlx5_core_dev *mdev;
254
255 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
256 if (!mdev)
257 return NULL;
258
259 ndev = mlx5_lag_get_roce_netdev(mdev);
260 if (ndev)
261 goto out;
262
263 /* Ensure ndev does not disappear before we invoke dev_hold()
264 */
265 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
266 ndev = ibdev->port[port_num - 1].roce.netdev;
267 if (ndev)
268 dev_hold(ndev);
269 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
270
271out:
272 mlx5_ib_put_native_port_mdev(ibdev, port_num);
273 return ndev;
274}
275
276struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
277 u32 ib_port_num,
278 u32 *native_port_num)
279{
280 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
281 ib_port_num);
282 struct mlx5_core_dev *mdev = NULL;
283 struct mlx5_ib_multiport_info *mpi;
284 struct mlx5_ib_port *port;
285
286 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
287 ll != IB_LINK_LAYER_ETHERNET) {
288 if (native_port_num)
289 *native_port_num = ib_port_num;
290 return ibdev->mdev;
291 }
292
293 if (native_port_num)
294 *native_port_num = 1;
295
296 port = &ibdev->port[ib_port_num - 1];
297 spin_lock(&port->mp.mpi_lock);
298 mpi = ibdev->port[ib_port_num - 1].mp.mpi;
299 if (mpi && !mpi->unaffiliate) {
300 mdev = mpi->mdev;
301 /* If it's the master no need to refcount, it'll exist
302 * as long as the ib_dev exists.
303 */
304 if (!mpi->is_master)
305 mpi->mdev_refcnt++;
306 }
307 spin_unlock(&port->mp.mpi_lock);
308
309 return mdev;
310}
311
312void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
313{
314 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
315 port_num);
316 struct mlx5_ib_multiport_info *mpi;
317 struct mlx5_ib_port *port;
318
319 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
320 return;
321
322 port = &ibdev->port[port_num - 1];
323
324 spin_lock(&port->mp.mpi_lock);
325 mpi = ibdev->port[port_num - 1].mp.mpi;
326 if (mpi->is_master)
327 goto out;
328
329 mpi->mdev_refcnt--;
330 if (mpi->unaffiliate)
331 complete(&mpi->unref_comp);
332out:
333 spin_unlock(&port->mp.mpi_lock);
334}
335
336static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
337 u16 *active_speed, u8 *active_width)
338{
339 switch (eth_proto_oper) {
340 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
341 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
342 case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
343 case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
344 *active_width = IB_WIDTH_1X;
345 *active_speed = IB_SPEED_SDR;
346 break;
347 case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
348 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
349 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
350 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
351 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
352 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
353 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
354 *active_width = IB_WIDTH_1X;
355 *active_speed = IB_SPEED_QDR;
356 break;
357 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
358 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
359 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
360 *active_width = IB_WIDTH_1X;
361 *active_speed = IB_SPEED_EDR;
362 break;
363 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
364 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
365 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
366 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
367 *active_width = IB_WIDTH_4X;
368 *active_speed = IB_SPEED_QDR;
369 break;
370 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
371 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
372 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
373 *active_width = IB_WIDTH_1X;
374 *active_speed = IB_SPEED_HDR;
375 break;
376 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
377 *active_width = IB_WIDTH_4X;
378 *active_speed = IB_SPEED_FDR;
379 break;
380 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
381 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
382 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
383 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
384 *active_width = IB_WIDTH_4X;
385 *active_speed = IB_SPEED_EDR;
386 break;
387 default:
388 return -EINVAL;
389 }
390
391 return 0;
392}
393
394static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
395 u8 *active_width)
396{
397 switch (eth_proto_oper) {
398 case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
399 case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
400 *active_width = IB_WIDTH_1X;
401 *active_speed = IB_SPEED_SDR;
402 break;
403 case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
404 *active_width = IB_WIDTH_1X;
405 *active_speed = IB_SPEED_DDR;
406 break;
407 case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
408 *active_width = IB_WIDTH_1X;
409 *active_speed = IB_SPEED_QDR;
410 break;
411 case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
412 *active_width = IB_WIDTH_4X;
413 *active_speed = IB_SPEED_QDR;
414 break;
415 case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
416 *active_width = IB_WIDTH_1X;
417 *active_speed = IB_SPEED_EDR;
418 break;
419 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
420 *active_width = IB_WIDTH_2X;
421 *active_speed = IB_SPEED_EDR;
422 break;
423 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
424 *active_width = IB_WIDTH_1X;
425 *active_speed = IB_SPEED_HDR;
426 break;
427 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
428 *active_width = IB_WIDTH_4X;
429 *active_speed = IB_SPEED_EDR;
430 break;
431 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
432 *active_width = IB_WIDTH_2X;
433 *active_speed = IB_SPEED_HDR;
434 break;
435 case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
436 *active_width = IB_WIDTH_1X;
437 *active_speed = IB_SPEED_NDR;
438 break;
439 case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
440 *active_width = IB_WIDTH_4X;
441 *active_speed = IB_SPEED_HDR;
442 break;
443 case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
444 *active_width = IB_WIDTH_2X;
445 *active_speed = IB_SPEED_NDR;
446 break;
447 case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
448 *active_width = IB_WIDTH_8X;
449 *active_speed = IB_SPEED_HDR;
450 break;
451 case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
452 *active_width = IB_WIDTH_4X;
453 *active_speed = IB_SPEED_NDR;
454 break;
455 case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
456 *active_width = IB_WIDTH_8X;
457 *active_speed = IB_SPEED_NDR;
458 break;
459 default:
460 return -EINVAL;
461 }
462
463 return 0;
464}
465
466static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
467 u8 *active_width, bool ext)
468{
469 return ext ?
470 translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
471 active_width) :
472 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
473 active_width);
474}
475
476static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
477 struct ib_port_attr *props)
478{
479 struct mlx5_ib_dev *dev = to_mdev(device);
480 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
481 struct mlx5_core_dev *mdev;
482 struct net_device *ndev, *upper;
483 enum ib_mtu ndev_ib_mtu;
484 bool put_mdev = true;
485 u32 eth_prot_oper;
486 u32 mdev_port_num;
487 bool ext;
488 int err;
489
490 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
491 if (!mdev) {
492 /* This means the port isn't affiliated yet. Get the
493 * info for the master port instead.
494 */
495 put_mdev = false;
496 mdev = dev->mdev;
497 mdev_port_num = 1;
498 port_num = 1;
499 }
500
501 /* Possible bad flows are checked before filling out props so in case
502 * of an error it will still be zeroed out.
503 * Use native port in case of reps
504 */
505 if (dev->is_rep)
506 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
507 1);
508 else
509 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
510 mdev_port_num);
511 if (err)
512 goto out;
513 ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
514 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
515
516 props->active_width = IB_WIDTH_4X;
517 props->active_speed = IB_SPEED_QDR;
518
519 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
520 &props->active_width, ext);
521
522 if (!dev->is_rep && dev->mdev->roce.roce_en) {
523 u16 qkey_viol_cntr;
524
525 props->port_cap_flags |= IB_PORT_CM_SUP;
526 props->ip_gids = true;
527 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
528 roce_address_table_size);
529 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
530 props->qkey_viol_cntr = qkey_viol_cntr;
531 }
532 props->max_mtu = IB_MTU_4096;
533 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
534 props->pkey_tbl_len = 1;
535 props->state = IB_PORT_DOWN;
536 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
537
538 /* If this is a stub query for an unaffiliated port stop here */
539 if (!put_mdev)
540 goto out;
541
542 ndev = mlx5_ib_get_netdev(device, port_num);
543 if (!ndev)
544 goto out;
545
546 if (dev->lag_active) {
547 rcu_read_lock();
548 upper = netdev_master_upper_dev_get_rcu(ndev);
549 if (upper) {
550 dev_put(ndev);
551 ndev = upper;
552 dev_hold(ndev);
553 }
554 rcu_read_unlock();
555 }
556
557 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
558 props->state = IB_PORT_ACTIVE;
559 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
560 }
561
562 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
563
564 dev_put(ndev);
565
566 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
567out:
568 if (put_mdev)
569 mlx5_ib_put_native_port_mdev(dev, port_num);
570 return err;
571}
572
573int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
574 unsigned int index, const union ib_gid *gid,
575 const struct ib_gid_attr *attr)
576{
577 enum ib_gid_type gid_type;
578 u16 vlan_id = 0xffff;
579 u8 roce_version = 0;
580 u8 roce_l3_type = 0;
581 u8 mac[ETH_ALEN];
582 int ret;
583
584 gid_type = attr->gid_type;
585 if (gid) {
586 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
587 if (ret)
588 return ret;
589 }
590
591 switch (gid_type) {
592 case IB_GID_TYPE_ROCE:
593 roce_version = MLX5_ROCE_VERSION_1;
594 break;
595 case IB_GID_TYPE_ROCE_UDP_ENCAP:
596 roce_version = MLX5_ROCE_VERSION_2;
597 if (gid && ipv6_addr_v4mapped((void *)gid))
598 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
599 else
600 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
601 break;
602
603 default:
604 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
605 }
606
607 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
608 roce_l3_type, gid->raw, mac,
609 vlan_id < VLAN_CFI_MASK, vlan_id,
610 port_num);
611}
612
613static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
614 __always_unused void **context)
615{
616 int ret;
617
618 ret = mlx5r_add_gid_macsec_operations(attr);
619 if (ret)
620 return ret;
621
622 return set_roce_addr(to_mdev(attr->device), attr->port_num,
623 attr->index, &attr->gid, attr);
624}
625
626static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
627 __always_unused void **context)
628{
629 int ret;
630
631 ret = set_roce_addr(to_mdev(attr->device), attr->port_num,
632 attr->index, NULL, attr);
633 if (ret)
634 return ret;
635
636 mlx5r_del_gid_macsec_operations(attr);
637 return 0;
638}
639
640__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
641 const struct ib_gid_attr *attr)
642{
643 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
644 return 0;
645
646 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
647}
648
649static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
650{
651 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
652 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
653 return 0;
654}
655
656enum {
657 MLX5_VPORT_ACCESS_METHOD_MAD,
658 MLX5_VPORT_ACCESS_METHOD_HCA,
659 MLX5_VPORT_ACCESS_METHOD_NIC,
660};
661
662static int mlx5_get_vport_access_method(struct ib_device *ibdev)
663{
664 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
665 return MLX5_VPORT_ACCESS_METHOD_MAD;
666
667 if (mlx5_ib_port_link_layer(ibdev, 1) ==
668 IB_LINK_LAYER_ETHERNET)
669 return MLX5_VPORT_ACCESS_METHOD_NIC;
670
671 return MLX5_VPORT_ACCESS_METHOD_HCA;
672}
673
674static void get_atomic_caps(struct mlx5_ib_dev *dev,
675 u8 atomic_size_qp,
676 struct ib_device_attr *props)
677{
678 u8 tmp;
679 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
680 u8 atomic_req_8B_endianness_mode =
681 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
682
683 /* Check if HW supports 8 bytes standard atomic operations and capable
684 * of host endianness respond
685 */
686 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
687 if (((atomic_operations & tmp) == tmp) &&
688 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
689 (atomic_req_8B_endianness_mode)) {
690 props->atomic_cap = IB_ATOMIC_HCA;
691 } else {
692 props->atomic_cap = IB_ATOMIC_NONE;
693 }
694}
695
696static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
697 struct ib_device_attr *props)
698{
699 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
700
701 get_atomic_caps(dev, atomic_size_qp, props);
702}
703
704static int mlx5_query_system_image_guid(struct ib_device *ibdev,
705 __be64 *sys_image_guid)
706{
707 struct mlx5_ib_dev *dev = to_mdev(ibdev);
708 struct mlx5_core_dev *mdev = dev->mdev;
709 u64 tmp;
710 int err;
711
712 switch (mlx5_get_vport_access_method(ibdev)) {
713 case MLX5_VPORT_ACCESS_METHOD_MAD:
714 return mlx5_query_mad_ifc_system_image_guid(ibdev,
715 sys_image_guid);
716
717 case MLX5_VPORT_ACCESS_METHOD_HCA:
718 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
719 break;
720
721 case MLX5_VPORT_ACCESS_METHOD_NIC:
722 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
723 break;
724
725 default:
726 return -EINVAL;
727 }
728
729 if (!err)
730 *sys_image_guid = cpu_to_be64(tmp);
731
732 return err;
733
734}
735
736static int mlx5_query_max_pkeys(struct ib_device *ibdev,
737 u16 *max_pkeys)
738{
739 struct mlx5_ib_dev *dev = to_mdev(ibdev);
740 struct mlx5_core_dev *mdev = dev->mdev;
741
742 switch (mlx5_get_vport_access_method(ibdev)) {
743 case MLX5_VPORT_ACCESS_METHOD_MAD:
744 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
745
746 case MLX5_VPORT_ACCESS_METHOD_HCA:
747 case MLX5_VPORT_ACCESS_METHOD_NIC:
748 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
749 pkey_table_size));
750 return 0;
751
752 default:
753 return -EINVAL;
754 }
755}
756
757static int mlx5_query_vendor_id(struct ib_device *ibdev,
758 u32 *vendor_id)
759{
760 struct mlx5_ib_dev *dev = to_mdev(ibdev);
761
762 switch (mlx5_get_vport_access_method(ibdev)) {
763 case MLX5_VPORT_ACCESS_METHOD_MAD:
764 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
765
766 case MLX5_VPORT_ACCESS_METHOD_HCA:
767 case MLX5_VPORT_ACCESS_METHOD_NIC:
768 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
769
770 default:
771 return -EINVAL;
772 }
773}
774
775static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
776 __be64 *node_guid)
777{
778 u64 tmp;
779 int err;
780
781 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
782 case MLX5_VPORT_ACCESS_METHOD_MAD:
783 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
784
785 case MLX5_VPORT_ACCESS_METHOD_HCA:
786 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
787 break;
788
789 case MLX5_VPORT_ACCESS_METHOD_NIC:
790 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
791 break;
792
793 default:
794 return -EINVAL;
795 }
796
797 if (!err)
798 *node_guid = cpu_to_be64(tmp);
799
800 return err;
801}
802
803struct mlx5_reg_node_desc {
804 u8 desc[IB_DEVICE_NODE_DESC_MAX];
805};
806
807static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
808{
809 struct mlx5_reg_node_desc in;
810
811 if (mlx5_use_mad_ifc(dev))
812 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
813
814 memset(&in, 0, sizeof(in));
815
816 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
817 sizeof(struct mlx5_reg_node_desc),
818 MLX5_REG_NODE_DESC, 0, 0);
819}
820
821static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
822 struct mlx5_ib_query_device_resp *resp)
823{
824 struct mlx5_eswitch *esw = mdev->priv.eswitch;
825 u16 vport = mlx5_eswitch_manager_vport(mdev);
826
827 resp->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(esw,
828 vport);
829 resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
830}
831
832static int mlx5_ib_query_device(struct ib_device *ibdev,
833 struct ib_device_attr *props,
834 struct ib_udata *uhw)
835{
836 size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
837 struct mlx5_ib_dev *dev = to_mdev(ibdev);
838 struct mlx5_core_dev *mdev = dev->mdev;
839 int err = -ENOMEM;
840 int max_sq_desc;
841 int max_rq_sg;
842 int max_sq_sg;
843 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
844 bool raw_support = !mlx5_core_mp_enabled(mdev);
845 struct mlx5_ib_query_device_resp resp = {};
846 size_t resp_len;
847 u64 max_tso;
848
849 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
850 if (uhw_outlen && uhw_outlen < resp_len)
851 return -EINVAL;
852
853 resp.response_length = resp_len;
854
855 if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
856 return -EINVAL;
857
858 memset(props, 0, sizeof(*props));
859 err = mlx5_query_system_image_guid(ibdev,
860 &props->sys_image_guid);
861 if (err)
862 return err;
863
864 props->max_pkeys = dev->pkey_table_len;
865
866 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
867 if (err)
868 return err;
869
870 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
871 (fw_rev_min(dev->mdev) << 16) |
872 fw_rev_sub(dev->mdev);
873 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
874 IB_DEVICE_PORT_ACTIVE_EVENT |
875 IB_DEVICE_SYS_IMAGE_GUID |
876 IB_DEVICE_RC_RNR_NAK_GEN;
877
878 if (MLX5_CAP_GEN(mdev, pkv))
879 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
880 if (MLX5_CAP_GEN(mdev, qkv))
881 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
882 if (MLX5_CAP_GEN(mdev, apm))
883 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
884 if (MLX5_CAP_GEN(mdev, xrc))
885 props->device_cap_flags |= IB_DEVICE_XRC;
886 if (MLX5_CAP_GEN(mdev, imaicl)) {
887 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
888 IB_DEVICE_MEM_WINDOW_TYPE_2B;
889 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
890 /* We support 'Gappy' memory registration too */
891 props->kernel_cap_flags |= IBK_SG_GAPS_REG;
892 }
893 /* IB_WR_REG_MR always requires changing the entity size with UMR */
894 if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
895 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
896 if (MLX5_CAP_GEN(mdev, sho)) {
897 props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER;
898 /* At this stage no support for signature handover */
899 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
900 IB_PROT_T10DIF_TYPE_2 |
901 IB_PROT_T10DIF_TYPE_3;
902 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
903 IB_GUARD_T10DIF_CSUM;
904 }
905 if (MLX5_CAP_GEN(mdev, block_lb_mc))
906 props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
907
908 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
909 if (MLX5_CAP_ETH(mdev, csum_cap)) {
910 /* Legacy bit to support old userspace libraries */
911 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
912 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
913 }
914
915 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
916 props->raw_packet_caps |=
917 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
918
919 if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
920 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
921 if (max_tso) {
922 resp.tso_caps.max_tso = 1 << max_tso;
923 resp.tso_caps.supported_qpts |=
924 1 << IB_QPT_RAW_PACKET;
925 resp.response_length += sizeof(resp.tso_caps);
926 }
927 }
928
929 if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
930 resp.rss_caps.rx_hash_function =
931 MLX5_RX_HASH_FUNC_TOEPLITZ;
932 resp.rss_caps.rx_hash_fields_mask =
933 MLX5_RX_HASH_SRC_IPV4 |
934 MLX5_RX_HASH_DST_IPV4 |
935 MLX5_RX_HASH_SRC_IPV6 |
936 MLX5_RX_HASH_DST_IPV6 |
937 MLX5_RX_HASH_SRC_PORT_TCP |
938 MLX5_RX_HASH_DST_PORT_TCP |
939 MLX5_RX_HASH_SRC_PORT_UDP |
940 MLX5_RX_HASH_DST_PORT_UDP |
941 MLX5_RX_HASH_INNER;
942 resp.response_length += sizeof(resp.rss_caps);
943 }
944 } else {
945 if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
946 resp.response_length += sizeof(resp.tso_caps);
947 if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
948 resp.response_length += sizeof(resp.rss_caps);
949 }
950
951 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
952 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
953 props->kernel_cap_flags |= IBK_UD_TSO;
954 }
955
956 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
957 MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
958 raw_support)
959 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
960
961 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
962 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
963 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
964
965 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
966 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
967 raw_support) {
968 /* Legacy bit to support old userspace libraries */
969 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
970 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
971 }
972
973 if (MLX5_CAP_DEV_MEM(mdev, memic)) {
974 props->max_dm_size =
975 MLX5_CAP_DEV_MEM(mdev, max_memic_size);
976 }
977
978 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
979 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
980
981 if (MLX5_CAP_GEN(mdev, end_pad))
982 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
983
984 props->vendor_part_id = mdev->pdev->device;
985 props->hw_ver = mdev->pdev->revision;
986
987 props->max_mr_size = ~0ull;
988 props->page_size_cap = ~(min_page_size - 1);
989 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
990 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
991 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
992 sizeof(struct mlx5_wqe_data_seg);
993 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
994 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
995 sizeof(struct mlx5_wqe_raddr_seg)) /
996 sizeof(struct mlx5_wqe_data_seg);
997 props->max_send_sge = max_sq_sg;
998 props->max_recv_sge = max_rq_sg;
999 props->max_sge_rd = MLX5_MAX_SGE_RD;
1000 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
1001 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
1002 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1003 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
1004 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
1005 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
1006 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
1007 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
1008 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
1009 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
1010 props->max_srq_sge = max_rq_sg - 1;
1011 props->max_fast_reg_page_list_len =
1012 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1013 props->max_pi_fast_reg_page_list_len =
1014 props->max_fast_reg_page_list_len / 2;
1015 props->max_sgl_rd =
1016 MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
1017 get_atomic_caps_qp(dev, props);
1018 props->masked_atomic_cap = IB_ATOMIC_NONE;
1019 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1020 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1021 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1022 props->max_mcast_grp;
1023 props->max_ah = INT_MAX;
1024 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1025 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1026
1027 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1028 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1029 props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
1030 props->odp_caps = dev->odp_caps;
1031 if (!uhw) {
1032 /* ODP for kernel QPs is not implemented for receive
1033 * WQEs and SRQ WQEs
1034 */
1035 props->odp_caps.per_transport_caps.rc_odp_caps &=
1036 ~(IB_ODP_SUPPORT_READ |
1037 IB_ODP_SUPPORT_SRQ_RECV);
1038 props->odp_caps.per_transport_caps.uc_odp_caps &=
1039 ~(IB_ODP_SUPPORT_READ |
1040 IB_ODP_SUPPORT_SRQ_RECV);
1041 props->odp_caps.per_transport_caps.ud_odp_caps &=
1042 ~(IB_ODP_SUPPORT_READ |
1043 IB_ODP_SUPPORT_SRQ_RECV);
1044 props->odp_caps.per_transport_caps.xrc_odp_caps &=
1045 ~(IB_ODP_SUPPORT_READ |
1046 IB_ODP_SUPPORT_SRQ_RECV);
1047 }
1048 }
1049
1050 if (mlx5_core_is_vf(mdev))
1051 props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
1052
1053 if (mlx5_ib_port_link_layer(ibdev, 1) ==
1054 IB_LINK_LAYER_ETHERNET && raw_support) {
1055 props->rss_caps.max_rwq_indirection_tables =
1056 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1057 props->rss_caps.max_rwq_indirection_table_size =
1058 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1059 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1060 props->max_wq_type_rq =
1061 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1062 }
1063
1064 if (MLX5_CAP_GEN(mdev, tag_matching)) {
1065 props->tm_caps.max_num_tags =
1066 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1067 props->tm_caps.max_ops =
1068 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1069 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1070 }
1071
1072 if (MLX5_CAP_GEN(mdev, tag_matching) &&
1073 MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1074 props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1075 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1076 }
1077
1078 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1079 props->cq_caps.max_cq_moderation_count =
1080 MLX5_MAX_CQ_COUNT;
1081 props->cq_caps.max_cq_moderation_period =
1082 MLX5_MAX_CQ_PERIOD;
1083 }
1084
1085 if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
1086 resp.response_length += sizeof(resp.cqe_comp_caps);
1087
1088 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1089 resp.cqe_comp_caps.max_num =
1090 MLX5_CAP_GEN(dev->mdev,
1091 cqe_compression_max_num);
1092
1093 resp.cqe_comp_caps.supported_format =
1094 MLX5_IB_CQE_RES_FORMAT_HASH |
1095 MLX5_IB_CQE_RES_FORMAT_CSUM;
1096
1097 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1098 resp.cqe_comp_caps.supported_format |=
1099 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1100 }
1101 }
1102
1103 if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
1104 raw_support) {
1105 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1106 MLX5_CAP_GEN(mdev, qos)) {
1107 resp.packet_pacing_caps.qp_rate_limit_max =
1108 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1109 resp.packet_pacing_caps.qp_rate_limit_min =
1110 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1111 resp.packet_pacing_caps.supported_qpts |=
1112 1 << IB_QPT_RAW_PACKET;
1113 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1114 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1115 resp.packet_pacing_caps.cap_flags |=
1116 MLX5_IB_PP_SUPPORT_BURST;
1117 }
1118 resp.response_length += sizeof(resp.packet_pacing_caps);
1119 }
1120
1121 if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
1122 uhw_outlen) {
1123 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1124 resp.mlx5_ib_support_multi_pkt_send_wqes =
1125 MLX5_IB_ALLOW_MPW;
1126
1127 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1128 resp.mlx5_ib_support_multi_pkt_send_wqes |=
1129 MLX5_IB_SUPPORT_EMPW;
1130
1131 resp.response_length +=
1132 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1133 }
1134
1135 if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
1136 resp.response_length += sizeof(resp.flags);
1137
1138 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1139 resp.flags |=
1140 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1141
1142 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1143 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1144 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1145 resp.flags |=
1146 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1147
1148 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1149 }
1150
1151 if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
1152 resp.response_length += sizeof(resp.sw_parsing_caps);
1153 if (MLX5_CAP_ETH(mdev, swp)) {
1154 resp.sw_parsing_caps.sw_parsing_offloads |=
1155 MLX5_IB_SW_PARSING;
1156
1157 if (MLX5_CAP_ETH(mdev, swp_csum))
1158 resp.sw_parsing_caps.sw_parsing_offloads |=
1159 MLX5_IB_SW_PARSING_CSUM;
1160
1161 if (MLX5_CAP_ETH(mdev, swp_lso))
1162 resp.sw_parsing_caps.sw_parsing_offloads |=
1163 MLX5_IB_SW_PARSING_LSO;
1164
1165 if (resp.sw_parsing_caps.sw_parsing_offloads)
1166 resp.sw_parsing_caps.supported_qpts =
1167 BIT(IB_QPT_RAW_PACKET);
1168 }
1169 }
1170
1171 if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
1172 raw_support) {
1173 resp.response_length += sizeof(resp.striding_rq_caps);
1174 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1175 resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1176 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1177 resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1178 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1179 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1180 resp.striding_rq_caps
1181 .min_single_wqe_log_num_of_strides =
1182 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1183 else
1184 resp.striding_rq_caps
1185 .min_single_wqe_log_num_of_strides =
1186 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1187 resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1188 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1189 resp.striding_rq_caps.supported_qpts =
1190 BIT(IB_QPT_RAW_PACKET);
1191 }
1192 }
1193
1194 if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
1195 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1196 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1197 resp.tunnel_offloads_caps |=
1198 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1199 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1200 resp.tunnel_offloads_caps |=
1201 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1202 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1203 resp.tunnel_offloads_caps |=
1204 MLX5_IB_TUNNELED_OFFLOADS_GRE;
1205 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1206 resp.tunnel_offloads_caps |=
1207 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1208 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1209 resp.tunnel_offloads_caps |=
1210 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1211 }
1212
1213 if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
1214 resp.response_length += sizeof(resp.dci_streams_caps);
1215
1216 resp.dci_streams_caps.max_log_num_concurent =
1217 MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
1218
1219 resp.dci_streams_caps.max_log_num_errored =
1220 MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
1221 }
1222
1223 if (offsetofend(typeof(resp), reserved) <= uhw_outlen)
1224 resp.response_length += sizeof(resp.reserved);
1225
1226 if (offsetofend(typeof(resp), reg_c0) <= uhw_outlen) {
1227 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1228
1229 resp.response_length += sizeof(resp.reg_c0);
1230
1231 if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS &&
1232 mlx5_eswitch_vport_match_metadata_enabled(esw))
1233 fill_esw_mgr_reg_c0(mdev, &resp);
1234 }
1235
1236 if (uhw_outlen) {
1237 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1238
1239 if (err)
1240 return err;
1241 }
1242
1243 return 0;
1244}
1245
1246static void translate_active_width(struct ib_device *ibdev, u16 active_width,
1247 u8 *ib_width)
1248{
1249 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1250
1251 if (active_width & MLX5_PTYS_WIDTH_1X)
1252 *ib_width = IB_WIDTH_1X;
1253 else if (active_width & MLX5_PTYS_WIDTH_2X)
1254 *ib_width = IB_WIDTH_2X;
1255 else if (active_width & MLX5_PTYS_WIDTH_4X)
1256 *ib_width = IB_WIDTH_4X;
1257 else if (active_width & MLX5_PTYS_WIDTH_8X)
1258 *ib_width = IB_WIDTH_8X;
1259 else if (active_width & MLX5_PTYS_WIDTH_12X)
1260 *ib_width = IB_WIDTH_12X;
1261 else {
1262 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1263 active_width);
1264 *ib_width = IB_WIDTH_4X;
1265 }
1266
1267 return;
1268}
1269
1270static int mlx5_mtu_to_ib_mtu(int mtu)
1271{
1272 switch (mtu) {
1273 case 256: return 1;
1274 case 512: return 2;
1275 case 1024: return 3;
1276 case 2048: return 4;
1277 case 4096: return 5;
1278 default:
1279 pr_warn("invalid mtu\n");
1280 return -1;
1281 }
1282}
1283
1284enum ib_max_vl_num {
1285 __IB_MAX_VL_0 = 1,
1286 __IB_MAX_VL_0_1 = 2,
1287 __IB_MAX_VL_0_3 = 3,
1288 __IB_MAX_VL_0_7 = 4,
1289 __IB_MAX_VL_0_14 = 5,
1290};
1291
1292enum mlx5_vl_hw_cap {
1293 MLX5_VL_HW_0 = 1,
1294 MLX5_VL_HW_0_1 = 2,
1295 MLX5_VL_HW_0_2 = 3,
1296 MLX5_VL_HW_0_3 = 4,
1297 MLX5_VL_HW_0_4 = 5,
1298 MLX5_VL_HW_0_5 = 6,
1299 MLX5_VL_HW_0_6 = 7,
1300 MLX5_VL_HW_0_7 = 8,
1301 MLX5_VL_HW_0_14 = 15
1302};
1303
1304static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1305 u8 *max_vl_num)
1306{
1307 switch (vl_hw_cap) {
1308 case MLX5_VL_HW_0:
1309 *max_vl_num = __IB_MAX_VL_0;
1310 break;
1311 case MLX5_VL_HW_0_1:
1312 *max_vl_num = __IB_MAX_VL_0_1;
1313 break;
1314 case MLX5_VL_HW_0_3:
1315 *max_vl_num = __IB_MAX_VL_0_3;
1316 break;
1317 case MLX5_VL_HW_0_7:
1318 *max_vl_num = __IB_MAX_VL_0_7;
1319 break;
1320 case MLX5_VL_HW_0_14:
1321 *max_vl_num = __IB_MAX_VL_0_14;
1322 break;
1323
1324 default:
1325 return -EINVAL;
1326 }
1327
1328 return 0;
1329}
1330
1331static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1332 struct ib_port_attr *props)
1333{
1334 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1335 struct mlx5_core_dev *mdev = dev->mdev;
1336 struct mlx5_hca_vport_context *rep;
1337 u16 max_mtu;
1338 u16 oper_mtu;
1339 int err;
1340 u16 ib_link_width_oper;
1341 u8 vl_hw_cap;
1342
1343 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1344 if (!rep) {
1345 err = -ENOMEM;
1346 goto out;
1347 }
1348
1349 /* props being zeroed by the caller, avoid zeroing it here */
1350
1351 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1352 if (err)
1353 goto out;
1354
1355 props->lid = rep->lid;
1356 props->lmc = rep->lmc;
1357 props->sm_lid = rep->sm_lid;
1358 props->sm_sl = rep->sm_sl;
1359 props->state = rep->vport_state;
1360 props->phys_state = rep->port_physical_state;
1361 props->port_cap_flags = rep->cap_mask1;
1362 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1363 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1364 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1365 props->bad_pkey_cntr = rep->pkey_violation_counter;
1366 props->qkey_viol_cntr = rep->qkey_violation_counter;
1367 props->subnet_timeout = rep->subnet_timeout;
1368 props->init_type_reply = rep->init_type_reply;
1369
1370 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1371 props->port_cap_flags2 = rep->cap_mask2;
1372
1373 err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1374 &props->active_speed, port);
1375 if (err)
1376 goto out;
1377
1378 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1379
1380 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1381
1382 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1383
1384 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1385
1386 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1387
1388 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1389 if (err)
1390 goto out;
1391
1392 err = translate_max_vl_num(ibdev, vl_hw_cap,
1393 &props->max_vl_num);
1394out:
1395 kfree(rep);
1396 return err;
1397}
1398
1399int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1400 struct ib_port_attr *props)
1401{
1402 unsigned int count;
1403 int ret;
1404
1405 switch (mlx5_get_vport_access_method(ibdev)) {
1406 case MLX5_VPORT_ACCESS_METHOD_MAD:
1407 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1408 break;
1409
1410 case MLX5_VPORT_ACCESS_METHOD_HCA:
1411 ret = mlx5_query_hca_port(ibdev, port, props);
1412 break;
1413
1414 case MLX5_VPORT_ACCESS_METHOD_NIC:
1415 ret = mlx5_query_port_roce(ibdev, port, props);
1416 break;
1417
1418 default:
1419 ret = -EINVAL;
1420 }
1421
1422 if (!ret && props) {
1423 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1424 struct mlx5_core_dev *mdev;
1425 bool put_mdev = true;
1426
1427 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1428 if (!mdev) {
1429 /* If the port isn't affiliated yet query the master.
1430 * The master and slave will have the same values.
1431 */
1432 mdev = dev->mdev;
1433 port = 1;
1434 put_mdev = false;
1435 }
1436 count = mlx5_core_reserved_gids_count(mdev);
1437 if (put_mdev)
1438 mlx5_ib_put_native_port_mdev(dev, port);
1439 props->gid_tbl_len -= count;
1440 }
1441 return ret;
1442}
1443
1444static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
1445 struct ib_port_attr *props)
1446{
1447 return mlx5_query_port_roce(ibdev, port, props);
1448}
1449
1450static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1451 u16 *pkey)
1452{
1453 /* Default special Pkey for representor device port as per the
1454 * IB specification 1.3 section 10.9.1.2.
1455 */
1456 *pkey = 0xffff;
1457 return 0;
1458}
1459
1460static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
1461 union ib_gid *gid)
1462{
1463 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1464 struct mlx5_core_dev *mdev = dev->mdev;
1465
1466 switch (mlx5_get_vport_access_method(ibdev)) {
1467 case MLX5_VPORT_ACCESS_METHOD_MAD:
1468 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1469
1470 case MLX5_VPORT_ACCESS_METHOD_HCA:
1471 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1472
1473 default:
1474 return -EINVAL;
1475 }
1476
1477}
1478
1479static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
1480 u16 index, u16 *pkey)
1481{
1482 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1483 struct mlx5_core_dev *mdev;
1484 bool put_mdev = true;
1485 u32 mdev_port_num;
1486 int err;
1487
1488 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1489 if (!mdev) {
1490 /* The port isn't affiliated yet, get the PKey from the master
1491 * port. For RoCE the PKey tables will be the same.
1492 */
1493 put_mdev = false;
1494 mdev = dev->mdev;
1495 mdev_port_num = 1;
1496 }
1497
1498 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1499 index, pkey);
1500 if (put_mdev)
1501 mlx5_ib_put_native_port_mdev(dev, port);
1502
1503 return err;
1504}
1505
1506static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1507 u16 *pkey)
1508{
1509 switch (mlx5_get_vport_access_method(ibdev)) {
1510 case MLX5_VPORT_ACCESS_METHOD_MAD:
1511 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1512
1513 case MLX5_VPORT_ACCESS_METHOD_HCA:
1514 case MLX5_VPORT_ACCESS_METHOD_NIC:
1515 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1516 default:
1517 return -EINVAL;
1518 }
1519}
1520
1521static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1522 struct ib_device_modify *props)
1523{
1524 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1525 struct mlx5_reg_node_desc in;
1526 struct mlx5_reg_node_desc out;
1527 int err;
1528
1529 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1530 return -EOPNOTSUPP;
1531
1532 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1533 return 0;
1534
1535 /*
1536 * If possible, pass node desc to FW, so it can generate
1537 * a 144 trap. If cmd fails, just ignore.
1538 */
1539 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1540 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1541 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1542 if (err)
1543 return err;
1544
1545 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1546
1547 return err;
1548}
1549
1550static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
1551 u32 value)
1552{
1553 struct mlx5_hca_vport_context ctx = {};
1554 struct mlx5_core_dev *mdev;
1555 u32 mdev_port_num;
1556 int err;
1557
1558 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1559 if (!mdev)
1560 return -ENODEV;
1561
1562 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1563 if (err)
1564 goto out;
1565
1566 if (~ctx.cap_mask1_perm & mask) {
1567 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1568 mask, ctx.cap_mask1_perm);
1569 err = -EINVAL;
1570 goto out;
1571 }
1572
1573 ctx.cap_mask1 = value;
1574 ctx.cap_mask1_perm = mask;
1575 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1576 0, &ctx);
1577
1578out:
1579 mlx5_ib_put_native_port_mdev(dev, port_num);
1580
1581 return err;
1582}
1583
1584static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1585 struct ib_port_modify *props)
1586{
1587 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1588 struct ib_port_attr attr;
1589 u32 tmp;
1590 int err;
1591 u32 change_mask;
1592 u32 value;
1593 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1594 IB_LINK_LAYER_INFINIBAND);
1595
1596 /* CM layer calls ib_modify_port() regardless of the link layer. For
1597 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1598 */
1599 if (!is_ib)
1600 return 0;
1601
1602 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1603 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1604 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1605 return set_port_caps_atomic(dev, port, change_mask, value);
1606 }
1607
1608 mutex_lock(&dev->cap_mask_mutex);
1609
1610 err = ib_query_port(ibdev, port, &attr);
1611 if (err)
1612 goto out;
1613
1614 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1615 ~props->clr_port_cap_mask;
1616
1617 err = mlx5_set_port_caps(dev->mdev, port, tmp);
1618
1619out:
1620 mutex_unlock(&dev->cap_mask_mutex);
1621 return err;
1622}
1623
1624static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1625{
1626 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1627 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1628}
1629
1630static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1631{
1632 /* Large page with non 4k uar support might limit the dynamic size */
1633 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
1634 return MLX5_MIN_DYN_BFREGS;
1635
1636 return MLX5_MAX_DYN_BFREGS;
1637}
1638
1639static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1640 struct mlx5_ib_alloc_ucontext_req_v2 *req,
1641 struct mlx5_bfreg_info *bfregi)
1642{
1643 int uars_per_sys_page;
1644 int bfregs_per_sys_page;
1645 int ref_bfregs = req->total_num_bfregs;
1646
1647 if (req->total_num_bfregs == 0)
1648 return -EINVAL;
1649
1650 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1651 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1652
1653 if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1654 return -ENOMEM;
1655
1656 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1657 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1658 /* This holds the required static allocation asked by the user */
1659 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1660 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1661 return -EINVAL;
1662
1663 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1664 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1665 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1666 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1667
1668 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1669 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1670 lib_uar_4k ? "yes" : "no", ref_bfregs,
1671 req->total_num_bfregs, bfregi->total_num_bfregs,
1672 bfregi->num_sys_pages);
1673
1674 return 0;
1675}
1676
1677static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1678{
1679 struct mlx5_bfreg_info *bfregi;
1680 int err;
1681 int i;
1682
1683 bfregi = &context->bfregi;
1684 for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1685 err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
1686 context->devx_uid);
1687 if (err)
1688 goto error;
1689
1690 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1691 }
1692
1693 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1694 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1695
1696 return 0;
1697
1698error:
1699 for (--i; i >= 0; i--)
1700 if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1701 context->devx_uid))
1702 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1703
1704 return err;
1705}
1706
1707static void deallocate_uars(struct mlx5_ib_dev *dev,
1708 struct mlx5_ib_ucontext *context)
1709{
1710 struct mlx5_bfreg_info *bfregi;
1711 int i;
1712
1713 bfregi = &context->bfregi;
1714 for (i = 0; i < bfregi->num_sys_pages; i++)
1715 if (i < bfregi->num_static_sys_pages ||
1716 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1717 mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1718 context->devx_uid);
1719}
1720
1721int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1722{
1723 int err = 0;
1724
1725 mutex_lock(&dev->lb.mutex);
1726 if (td)
1727 dev->lb.user_td++;
1728 if (qp)
1729 dev->lb.qps++;
1730
1731 if (dev->lb.user_td == 2 ||
1732 dev->lb.qps == 1) {
1733 if (!dev->lb.enabled) {
1734 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1735 dev->lb.enabled = true;
1736 }
1737 }
1738
1739 mutex_unlock(&dev->lb.mutex);
1740
1741 return err;
1742}
1743
1744void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1745{
1746 mutex_lock(&dev->lb.mutex);
1747 if (td)
1748 dev->lb.user_td--;
1749 if (qp)
1750 dev->lb.qps--;
1751
1752 if (dev->lb.user_td == 1 &&
1753 dev->lb.qps == 0) {
1754 if (dev->lb.enabled) {
1755 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1756 dev->lb.enabled = false;
1757 }
1758 }
1759
1760 mutex_unlock(&dev->lb.mutex);
1761}
1762
1763static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1764 u16 uid)
1765{
1766 int err;
1767
1768 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1769 return 0;
1770
1771 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1772 if (err)
1773 return err;
1774
1775 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1776 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1777 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1778 return err;
1779
1780 return mlx5_ib_enable_lb(dev, true, false);
1781}
1782
1783static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1784 u16 uid)
1785{
1786 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1787 return;
1788
1789 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1790
1791 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1792 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1793 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1794 return;
1795
1796 mlx5_ib_disable_lb(dev, true, false);
1797}
1798
1799static int set_ucontext_resp(struct ib_ucontext *uctx,
1800 struct mlx5_ib_alloc_ucontext_resp *resp)
1801{
1802 struct ib_device *ibdev = uctx->device;
1803 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1804 struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1805 struct mlx5_bfreg_info *bfregi = &context->bfregi;
1806
1807 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1808 resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
1809 resp->comp_mask |=
1810 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1811 }
1812
1813 resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1814 if (dev->wc_support)
1815 resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1816 log_bf_reg_size);
1817 resp->cache_line_size = cache_line_size();
1818 resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1819 resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1820 resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1821 resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1822 resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1823 resp->cqe_version = context->cqe_version;
1824 resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1825 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1826 resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1827 MLX5_CAP_GEN(dev->mdev,
1828 num_of_uars_per_page) : 1;
1829 resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
1830 bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
1831 resp->num_ports = dev->num_ports;
1832 resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1833 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1834
1835 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1836 mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
1837 resp->eth_min_inline++;
1838 }
1839
1840 if (dev->mdev->clock_info)
1841 resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1842
1843 /*
1844 * We don't want to expose information from the PCI bar that is located
1845 * after 4096 bytes, so if the arch only supports larger pages, let's
1846 * pretend we don't support reading the HCA's core clock. This is also
1847 * forced by mmap function.
1848 */
1849 if (PAGE_SIZE <= 4096) {
1850 resp->comp_mask |=
1851 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1852 resp->hca_core_clock_offset =
1853 offsetof(struct mlx5_init_seg,
1854 internal_timer_h) % PAGE_SIZE;
1855 }
1856
1857 if (MLX5_CAP_GEN(dev->mdev, ece_support))
1858 resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
1859
1860 if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
1861 rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
1862 rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
1863 resp->comp_mask |=
1864 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS;
1865
1866 resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
1867
1868 if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
1869 resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
1870
1871 resp->comp_mask |=
1872 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
1873
1874 return 0;
1875}
1876
1877static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1878 struct ib_udata *udata)
1879{
1880 struct ib_device *ibdev = uctx->device;
1881 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1882 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1883 struct mlx5_ib_alloc_ucontext_resp resp = {};
1884 struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1885 struct mlx5_bfreg_info *bfregi;
1886 int ver;
1887 int err;
1888 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1889 max_cqe_version);
1890 bool lib_uar_4k;
1891 bool lib_uar_dyn;
1892
1893 if (!dev->ib_active)
1894 return -EAGAIN;
1895
1896 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1897 ver = 0;
1898 else if (udata->inlen >= min_req_v2)
1899 ver = 2;
1900 else
1901 return -EINVAL;
1902
1903 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1904 if (err)
1905 return err;
1906
1907 if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1908 return -EOPNOTSUPP;
1909
1910 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1911 return -EOPNOTSUPP;
1912
1913 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1914 MLX5_NON_FP_BFREGS_PER_UAR);
1915 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1916 return -EINVAL;
1917
1918 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1919 err = mlx5_ib_devx_create(dev, true);
1920 if (err < 0)
1921 goto out_ctx;
1922 context->devx_uid = err;
1923 }
1924
1925 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1926 lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
1927 bfregi = &context->bfregi;
1928
1929 if (lib_uar_dyn) {
1930 bfregi->lib_uar_dyn = lib_uar_dyn;
1931 goto uar_done;
1932 }
1933
1934 /* updates req->total_num_bfregs */
1935 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1936 if (err)
1937 goto out_devx;
1938
1939 mutex_init(&bfregi->lock);
1940 bfregi->lib_uar_4k = lib_uar_4k;
1941 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1942 GFP_KERNEL);
1943 if (!bfregi->count) {
1944 err = -ENOMEM;
1945 goto out_devx;
1946 }
1947
1948 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1949 sizeof(*bfregi->sys_pages),
1950 GFP_KERNEL);
1951 if (!bfregi->sys_pages) {
1952 err = -ENOMEM;
1953 goto out_count;
1954 }
1955
1956 err = allocate_uars(dev, context);
1957 if (err)
1958 goto out_sys_pages;
1959
1960uar_done:
1961 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1962 context->devx_uid);
1963 if (err)
1964 goto out_uars;
1965
1966 INIT_LIST_HEAD(&context->db_page_list);
1967 mutex_init(&context->db_page_mutex);
1968
1969 context->cqe_version = min_t(__u8,
1970 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1971 req.max_cqe_version);
1972
1973 err = set_ucontext_resp(uctx, &resp);
1974 if (err)
1975 goto out_mdev;
1976
1977 resp.response_length = min(udata->outlen, sizeof(resp));
1978 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1979 if (err)
1980 goto out_mdev;
1981
1982 bfregi->ver = ver;
1983 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1984 context->lib_caps = req.lib_caps;
1985 print_lib_caps(dev, context->lib_caps);
1986
1987 if (mlx5_ib_lag_should_assign_affinity(dev)) {
1988 u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
1989
1990 atomic_set(&context->tx_port_affinity,
1991 atomic_add_return(
1992 1, &dev->port[port].roce.tx_port_affinity));
1993 }
1994
1995 return 0;
1996
1997out_mdev:
1998 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1999
2000out_uars:
2001 deallocate_uars(dev, context);
2002
2003out_sys_pages:
2004 kfree(bfregi->sys_pages);
2005
2006out_count:
2007 kfree(bfregi->count);
2008
2009out_devx:
2010 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
2011 mlx5_ib_devx_destroy(dev, context->devx_uid);
2012
2013out_ctx:
2014 return err;
2015}
2016
2017static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext,
2018 struct uverbs_attr_bundle *attrs)
2019{
2020 struct mlx5_ib_alloc_ucontext_resp uctx_resp = {};
2021 int ret;
2022
2023 ret = set_ucontext_resp(ibcontext, &uctx_resp);
2024 if (ret)
2025 return ret;
2026
2027 uctx_resp.response_length =
2028 min_t(size_t,
2029 uverbs_attr_get_len(attrs,
2030 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX),
2031 sizeof(uctx_resp));
2032
2033 ret = uverbs_copy_to_struct_or_zero(attrs,
2034 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
2035 &uctx_resp,
2036 sizeof(uctx_resp));
2037 return ret;
2038}
2039
2040static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
2041{
2042 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2043 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2044 struct mlx5_bfreg_info *bfregi;
2045
2046 bfregi = &context->bfregi;
2047 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2048
2049 deallocate_uars(dev, context);
2050 kfree(bfregi->sys_pages);
2051 kfree(bfregi->count);
2052
2053 if (context->devx_uid)
2054 mlx5_ib_devx_destroy(dev, context->devx_uid);
2055}
2056
2057static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2058 int uar_idx)
2059{
2060 int fw_uars_per_page;
2061
2062 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2063
2064 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2065}
2066
2067static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
2068 int uar_idx)
2069{
2070 unsigned int fw_uars_per_page;
2071
2072 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2073 MLX5_UARS_IN_PAGE : 1;
2074
2075 return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
2076}
2077
2078static int get_command(unsigned long offset)
2079{
2080 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2081}
2082
2083static int get_arg(unsigned long offset)
2084{
2085 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2086}
2087
2088static int get_index(unsigned long offset)
2089{
2090 return get_arg(offset);
2091}
2092
2093/* Index resides in an extra byte to enable larger values than 255 */
2094static int get_extended_index(unsigned long offset)
2095{
2096 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2097}
2098
2099
2100static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2101{
2102}
2103
2104static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2105{
2106 switch (cmd) {
2107 case MLX5_IB_MMAP_WC_PAGE:
2108 return "WC";
2109 case MLX5_IB_MMAP_REGULAR_PAGE:
2110 return "best effort WC";
2111 case MLX5_IB_MMAP_NC_PAGE:
2112 return "NC";
2113 case MLX5_IB_MMAP_DEVICE_MEM:
2114 return "Device Memory";
2115 default:
2116 return "Unknown";
2117 }
2118}
2119
2120static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2121 struct vm_area_struct *vma,
2122 struct mlx5_ib_ucontext *context)
2123{
2124 if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2125 !(vma->vm_flags & VM_SHARED))
2126 return -EINVAL;
2127
2128 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2129 return -EOPNOTSUPP;
2130
2131 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2132 return -EPERM;
2133 vm_flags_clear(vma, VM_MAYWRITE);
2134
2135 if (!dev->mdev->clock_info)
2136 return -EOPNOTSUPP;
2137
2138 return vm_insert_page(vma, vma->vm_start,
2139 virt_to_page(dev->mdev->clock_info));
2140}
2141
2142static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
2143{
2144 struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
2145 struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
2146 struct mlx5_var_table *var_table = &dev->var_table;
2147 struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);
2148
2149 switch (mentry->mmap_flag) {
2150 case MLX5_IB_MMAP_TYPE_MEMIC:
2151 case MLX5_IB_MMAP_TYPE_MEMIC_OP:
2152 mlx5_ib_dm_mmap_free(dev, mentry);
2153 break;
2154 case MLX5_IB_MMAP_TYPE_VAR:
2155 mutex_lock(&var_table->bitmap_lock);
2156 clear_bit(mentry->page_idx, var_table->bitmap);
2157 mutex_unlock(&var_table->bitmap_lock);
2158 kfree(mentry);
2159 break;
2160 case MLX5_IB_MMAP_TYPE_UAR_WC:
2161 case MLX5_IB_MMAP_TYPE_UAR_NC:
2162 mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
2163 context->devx_uid);
2164 kfree(mentry);
2165 break;
2166 default:
2167 WARN_ON(true);
2168 }
2169}
2170
2171static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2172 struct vm_area_struct *vma,
2173 struct mlx5_ib_ucontext *context)
2174{
2175 struct mlx5_bfreg_info *bfregi = &context->bfregi;
2176 int err;
2177 unsigned long idx;
2178 phys_addr_t pfn;
2179 pgprot_t prot;
2180 u32 bfreg_dyn_idx = 0;
2181 u32 uar_index;
2182 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2183 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2184 bfregi->num_static_sys_pages;
2185
2186 if (bfregi->lib_uar_dyn)
2187 return -EINVAL;
2188
2189 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2190 return -EINVAL;
2191
2192 if (dyn_uar)
2193 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2194 else
2195 idx = get_index(vma->vm_pgoff);
2196
2197 if (idx >= max_valid_idx) {
2198 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2199 idx, max_valid_idx);
2200 return -EINVAL;
2201 }
2202
2203 switch (cmd) {
2204 case MLX5_IB_MMAP_WC_PAGE:
2205 case MLX5_IB_MMAP_ALLOC_WC:
2206 case MLX5_IB_MMAP_REGULAR_PAGE:
2207 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2208 prot = pgprot_writecombine(vma->vm_page_prot);
2209 break;
2210 case MLX5_IB_MMAP_NC_PAGE:
2211 prot = pgprot_noncached(vma->vm_page_prot);
2212 break;
2213 default:
2214 return -EINVAL;
2215 }
2216
2217 if (dyn_uar) {
2218 int uars_per_page;
2219
2220 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2221 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2222 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2223 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2224 bfreg_dyn_idx, bfregi->total_num_bfregs);
2225 return -EINVAL;
2226 }
2227
2228 mutex_lock(&bfregi->lock);
2229 /* Fail if uar already allocated, first bfreg index of each
2230 * page holds its count.
2231 */
2232 if (bfregi->count[bfreg_dyn_idx]) {
2233 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2234 mutex_unlock(&bfregi->lock);
2235 return -EINVAL;
2236 }
2237
2238 bfregi->count[bfreg_dyn_idx]++;
2239 mutex_unlock(&bfregi->lock);
2240
2241 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
2242 context->devx_uid);
2243 if (err) {
2244 mlx5_ib_warn(dev, "UAR alloc failed\n");
2245 goto free_bfreg;
2246 }
2247 } else {
2248 uar_index = bfregi->sys_pages[idx];
2249 }
2250
2251 pfn = uar_index2pfn(dev, uar_index);
2252 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2253
2254 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2255 prot, NULL);
2256 if (err) {
2257 mlx5_ib_err(dev,
2258 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2259 err, mmap_cmd2str(cmd));
2260 goto err;
2261 }
2262
2263 if (dyn_uar)
2264 bfregi->sys_pages[idx] = uar_index;
2265 return 0;
2266
2267err:
2268 if (!dyn_uar)
2269 return err;
2270
2271 mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
2272
2273free_bfreg:
2274 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2275
2276 return err;
2277}
2278
2279static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
2280{
2281 unsigned long idx;
2282 u8 command;
2283
2284 command = get_command(vma->vm_pgoff);
2285 idx = get_extended_index(vma->vm_pgoff);
2286
2287 return (command << 16 | idx);
2288}
2289
2290static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
2291 struct vm_area_struct *vma,
2292 struct ib_ucontext *ucontext)
2293{
2294 struct mlx5_user_mmap_entry *mentry;
2295 struct rdma_user_mmap_entry *entry;
2296 unsigned long pgoff;
2297 pgprot_t prot;
2298 phys_addr_t pfn;
2299 int ret;
2300
2301 pgoff = mlx5_vma_to_pgoff(vma);
2302 entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
2303 if (!entry)
2304 return -EINVAL;
2305
2306 mentry = to_mmmap(entry);
2307 pfn = (mentry->address >> PAGE_SHIFT);
2308 if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
2309 mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
2310 prot = pgprot_noncached(vma->vm_page_prot);
2311 else
2312 prot = pgprot_writecombine(vma->vm_page_prot);
2313 ret = rdma_user_mmap_io(ucontext, vma, pfn,
2314 entry->npages * PAGE_SIZE,
2315 prot,
2316 entry);
2317 rdma_user_mmap_entry_put(&mentry->rdma_entry);
2318 return ret;
2319}
2320
2321static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
2322{
2323 u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
2324 u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
2325
2326 return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
2327 (index & 0xFF)) << PAGE_SHIFT;
2328}
2329
2330static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2331{
2332 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2333 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2334 unsigned long command;
2335 phys_addr_t pfn;
2336
2337 command = get_command(vma->vm_pgoff);
2338 switch (command) {
2339 case MLX5_IB_MMAP_WC_PAGE:
2340 case MLX5_IB_MMAP_ALLOC_WC:
2341 if (!dev->wc_support)
2342 return -EPERM;
2343 fallthrough;
2344 case MLX5_IB_MMAP_NC_PAGE:
2345 case MLX5_IB_MMAP_REGULAR_PAGE:
2346 return uar_mmap(dev, command, vma, context);
2347
2348 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2349 return -ENOSYS;
2350
2351 case MLX5_IB_MMAP_CORE_CLOCK:
2352 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2353 return -EINVAL;
2354
2355 if (vma->vm_flags & VM_WRITE)
2356 return -EPERM;
2357 vm_flags_clear(vma, VM_MAYWRITE);
2358
2359 /* Don't expose to user-space information it shouldn't have */
2360 if (PAGE_SIZE > 4096)
2361 return -EOPNOTSUPP;
2362
2363 pfn = (dev->mdev->iseg_base +
2364 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2365 PAGE_SHIFT;
2366 return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2367 PAGE_SIZE,
2368 pgprot_noncached(vma->vm_page_prot),
2369 NULL);
2370 case MLX5_IB_MMAP_CLOCK_INFO:
2371 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2372
2373 default:
2374 return mlx5_ib_mmap_offset(dev, vma, ibcontext);
2375 }
2376
2377 return 0;
2378}
2379
2380static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2381{
2382 struct mlx5_ib_pd *pd = to_mpd(ibpd);
2383 struct ib_device *ibdev = ibpd->device;
2384 struct mlx5_ib_alloc_pd_resp resp;
2385 int err;
2386 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2387 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2388 u16 uid = 0;
2389 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2390 udata, struct mlx5_ib_ucontext, ibucontext);
2391
2392 uid = context ? context->devx_uid : 0;
2393 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2394 MLX5_SET(alloc_pd_in, in, uid, uid);
2395 err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2396 if (err)
2397 return err;
2398
2399 pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2400 pd->uid = uid;
2401 if (udata) {
2402 resp.pdn = pd->pdn;
2403 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2404 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2405 return -EFAULT;
2406 }
2407 }
2408
2409 return 0;
2410}
2411
2412static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2413{
2414 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2415 struct mlx5_ib_pd *mpd = to_mpd(pd);
2416
2417 return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2418}
2419
2420static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2421{
2422 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2423 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
2424 int err;
2425 u16 uid;
2426
2427 uid = ibqp->pd ?
2428 to_mpd(ibqp->pd)->uid : 0;
2429
2430 if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
2431 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2432 return -EOPNOTSUPP;
2433 }
2434
2435 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2436 if (err)
2437 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2438 ibqp->qp_num, gid->raw);
2439
2440 return err;
2441}
2442
2443static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2444{
2445 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2446 int err;
2447 u16 uid;
2448
2449 uid = ibqp->pd ?
2450 to_mpd(ibqp->pd)->uid : 0;
2451 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2452 if (err)
2453 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2454 ibqp->qp_num, gid->raw);
2455
2456 return err;
2457}
2458
2459static int init_node_data(struct mlx5_ib_dev *dev)
2460{
2461 int err;
2462
2463 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2464 if (err)
2465 return err;
2466
2467 dev->mdev->rev_id = dev->mdev->pdev->revision;
2468
2469 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2470}
2471
2472static ssize_t fw_pages_show(struct device *device,
2473 struct device_attribute *attr, char *buf)
2474{
2475 struct mlx5_ib_dev *dev =
2476 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2477
2478 return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
2479}
2480static DEVICE_ATTR_RO(fw_pages);
2481
2482static ssize_t reg_pages_show(struct device *device,
2483 struct device_attribute *attr, char *buf)
2484{
2485 struct mlx5_ib_dev *dev =
2486 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2487
2488 return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2489}
2490static DEVICE_ATTR_RO(reg_pages);
2491
2492static ssize_t hca_type_show(struct device *device,
2493 struct device_attribute *attr, char *buf)
2494{
2495 struct mlx5_ib_dev *dev =
2496 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2497
2498 return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
2499}
2500static DEVICE_ATTR_RO(hca_type);
2501
2502static ssize_t hw_rev_show(struct device *device,
2503 struct device_attribute *attr, char *buf)
2504{
2505 struct mlx5_ib_dev *dev =
2506 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2507
2508 return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
2509}
2510static DEVICE_ATTR_RO(hw_rev);
2511
2512static ssize_t board_id_show(struct device *device,
2513 struct device_attribute *attr, char *buf)
2514{
2515 struct mlx5_ib_dev *dev =
2516 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2517
2518 return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
2519 dev->mdev->board_id);
2520}
2521static DEVICE_ATTR_RO(board_id);
2522
2523static struct attribute *mlx5_class_attributes[] = {
2524 &dev_attr_hw_rev.attr,
2525 &dev_attr_hca_type.attr,
2526 &dev_attr_board_id.attr,
2527 &dev_attr_fw_pages.attr,
2528 &dev_attr_reg_pages.attr,
2529 NULL,
2530};
2531
2532static const struct attribute_group mlx5_attr_group = {
2533 .attrs = mlx5_class_attributes,
2534};
2535
2536static void pkey_change_handler(struct work_struct *work)
2537{
2538 struct mlx5_ib_port_resources *ports =
2539 container_of(work, struct mlx5_ib_port_resources,
2540 pkey_change_work);
2541
2542 if (!ports->gsi)
2543 /*
2544 * We got this event before device was fully configured
2545 * and MAD registration code wasn't called/finished yet.
2546 */
2547 return;
2548
2549 mlx5_ib_gsi_pkey_change(ports->gsi);
2550}
2551
2552static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2553{
2554 struct mlx5_ib_qp *mqp;
2555 struct mlx5_ib_cq *send_mcq, *recv_mcq;
2556 struct mlx5_core_cq *mcq;
2557 struct list_head cq_armed_list;
2558 unsigned long flags_qp;
2559 unsigned long flags_cq;
2560 unsigned long flags;
2561
2562 INIT_LIST_HEAD(&cq_armed_list);
2563
2564 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2565 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2566 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2567 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2568 if (mqp->sq.tail != mqp->sq.head) {
2569 send_mcq = to_mcq(mqp->ibqp.send_cq);
2570 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2571 if (send_mcq->mcq.comp &&
2572 mqp->ibqp.send_cq->comp_handler) {
2573 if (!send_mcq->mcq.reset_notify_added) {
2574 send_mcq->mcq.reset_notify_added = 1;
2575 list_add_tail(&send_mcq->mcq.reset_notify,
2576 &cq_armed_list);
2577 }
2578 }
2579 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2580 }
2581 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2582 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2583 /* no handling is needed for SRQ */
2584 if (!mqp->ibqp.srq) {
2585 if (mqp->rq.tail != mqp->rq.head) {
2586 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2587 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2588 if (recv_mcq->mcq.comp &&
2589 mqp->ibqp.recv_cq->comp_handler) {
2590 if (!recv_mcq->mcq.reset_notify_added) {
2591 recv_mcq->mcq.reset_notify_added = 1;
2592 list_add_tail(&recv_mcq->mcq.reset_notify,
2593 &cq_armed_list);
2594 }
2595 }
2596 spin_unlock_irqrestore(&recv_mcq->lock,
2597 flags_cq);
2598 }
2599 }
2600 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2601 }
2602 /*At that point all inflight post send were put to be executed as of we
2603 * lock/unlock above locks Now need to arm all involved CQs.
2604 */
2605 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2606 mcq->comp(mcq, NULL);
2607 }
2608 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2609}
2610
2611static void delay_drop_handler(struct work_struct *work)
2612{
2613 int err;
2614 struct mlx5_ib_delay_drop *delay_drop =
2615 container_of(work, struct mlx5_ib_delay_drop,
2616 delay_drop_work);
2617
2618 atomic_inc(&delay_drop->events_cnt);
2619
2620 mutex_lock(&delay_drop->lock);
2621 err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
2622 if (err) {
2623 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
2624 delay_drop->timeout);
2625 delay_drop->activate = false;
2626 }
2627 mutex_unlock(&delay_drop->lock);
2628}
2629
2630static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2631 struct ib_event *ibev)
2632{
2633 u32 port = (eqe->data.port.port >> 4) & 0xf;
2634
2635 switch (eqe->sub_type) {
2636 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
2637 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2638 IB_LINK_LAYER_ETHERNET)
2639 schedule_work(&ibdev->delay_drop.delay_drop_work);
2640 break;
2641 default: /* do nothing */
2642 return;
2643 }
2644}
2645
2646static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2647 struct ib_event *ibev)
2648{
2649 u32 port = (eqe->data.port.port >> 4) & 0xf;
2650
2651 ibev->element.port_num = port;
2652
2653 switch (eqe->sub_type) {
2654 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
2655 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
2656 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
2657 /* In RoCE, port up/down events are handled in
2658 * mlx5_netdev_event().
2659 */
2660 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2661 IB_LINK_LAYER_ETHERNET)
2662 return -EINVAL;
2663
2664 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
2665 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2666 break;
2667
2668 case MLX5_PORT_CHANGE_SUBTYPE_LID:
2669 ibev->event = IB_EVENT_LID_CHANGE;
2670 break;
2671
2672 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
2673 ibev->event = IB_EVENT_PKEY_CHANGE;
2674 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
2675 break;
2676
2677 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
2678 ibev->event = IB_EVENT_GID_CHANGE;
2679 break;
2680
2681 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
2682 ibev->event = IB_EVENT_CLIENT_REREGISTER;
2683 break;
2684 default:
2685 return -EINVAL;
2686 }
2687
2688 return 0;
2689}
2690
2691static void mlx5_ib_handle_event(struct work_struct *_work)
2692{
2693 struct mlx5_ib_event_work *work =
2694 container_of(_work, struct mlx5_ib_event_work, work);
2695 struct mlx5_ib_dev *ibdev;
2696 struct ib_event ibev;
2697 bool fatal = false;
2698
2699 if (work->is_slave) {
2700 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
2701 if (!ibdev)
2702 goto out;
2703 } else {
2704 ibdev = work->dev;
2705 }
2706
2707 switch (work->event) {
2708 case MLX5_DEV_EVENT_SYS_ERROR:
2709 ibev.event = IB_EVENT_DEVICE_FATAL;
2710 mlx5_ib_handle_internal_error(ibdev);
2711 ibev.element.port_num = (u8)(unsigned long)work->param;
2712 fatal = true;
2713 break;
2714 case MLX5_EVENT_TYPE_PORT_CHANGE:
2715 if (handle_port_change(ibdev, work->param, &ibev))
2716 goto out;
2717 break;
2718 case MLX5_EVENT_TYPE_GENERAL_EVENT:
2719 handle_general_event(ibdev, work->param, &ibev);
2720 fallthrough;
2721 default:
2722 goto out;
2723 }
2724
2725 ibev.device = &ibdev->ib_dev;
2726
2727 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
2728 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
2729 goto out;
2730 }
2731
2732 if (ibdev->ib_active)
2733 ib_dispatch_event(&ibev);
2734
2735 if (fatal)
2736 ibdev->ib_active = false;
2737out:
2738 kfree(work);
2739}
2740
2741static int mlx5_ib_event(struct notifier_block *nb,
2742 unsigned long event, void *param)
2743{
2744 struct mlx5_ib_event_work *work;
2745
2746 work = kmalloc(sizeof(*work), GFP_ATOMIC);
2747 if (!work)
2748 return NOTIFY_DONE;
2749
2750 INIT_WORK(&work->work, mlx5_ib_handle_event);
2751 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
2752 work->is_slave = false;
2753 work->param = param;
2754 work->event = event;
2755
2756 queue_work(mlx5_ib_event_wq, &work->work);
2757
2758 return NOTIFY_OK;
2759}
2760
2761static int mlx5_ib_event_slave_port(struct notifier_block *nb,
2762 unsigned long event, void *param)
2763{
2764 struct mlx5_ib_event_work *work;
2765
2766 work = kmalloc(sizeof(*work), GFP_ATOMIC);
2767 if (!work)
2768 return NOTIFY_DONE;
2769
2770 INIT_WORK(&work->work, mlx5_ib_handle_event);
2771 work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
2772 work->is_slave = true;
2773 work->param = param;
2774 work->event = event;
2775 queue_work(mlx5_ib_event_wq, &work->work);
2776
2777 return NOTIFY_OK;
2778}
2779
2780static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2781{
2782 struct mlx5_hca_vport_context vport_ctx;
2783 int err;
2784 int port;
2785
2786 if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
2787 return 0;
2788
2789 for (port = 1; port <= dev->num_ports; port++) {
2790 if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2791 dev->port_caps[port - 1].has_smi = true;
2792 continue;
2793 }
2794 err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
2795 &vport_ctx);
2796 if (err) {
2797 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
2798 port, err);
2799 return err;
2800 }
2801 dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
2802 }
2803
2804 return 0;
2805}
2806
2807static void get_ext_port_caps(struct mlx5_ib_dev *dev)
2808{
2809 unsigned int port;
2810
2811 rdma_for_each_port (&dev->ib_dev, port)
2812 mlx5_query_ext_port_caps(dev, port);
2813}
2814
2815static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
2816{
2817 switch (umr_fence_cap) {
2818 case MLX5_CAP_UMR_FENCE_NONE:
2819 return MLX5_FENCE_MODE_NONE;
2820 case MLX5_CAP_UMR_FENCE_SMALL:
2821 return MLX5_FENCE_MODE_INITIATOR_SMALL;
2822 default:
2823 return MLX5_FENCE_MODE_STRONG_ORDERING;
2824 }
2825}
2826
2827static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
2828{
2829 struct mlx5_ib_resources *devr = &dev->devr;
2830 struct ib_srq_init_attr attr;
2831 struct ib_device *ibdev;
2832 struct ib_cq_init_attr cq_attr = {.cqe = 1};
2833 int port;
2834 int ret = 0;
2835
2836 ibdev = &dev->ib_dev;
2837
2838 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2839 return -EOPNOTSUPP;
2840
2841 devr->p0 = ib_alloc_pd(ibdev, 0);
2842 if (IS_ERR(devr->p0))
2843 return PTR_ERR(devr->p0);
2844
2845 devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
2846 if (IS_ERR(devr->c0)) {
2847 ret = PTR_ERR(devr->c0);
2848 goto error1;
2849 }
2850
2851 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
2852 if (ret)
2853 goto error2;
2854
2855 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
2856 if (ret)
2857 goto error3;
2858
2859 memset(&attr, 0, sizeof(attr));
2860 attr.attr.max_sge = 1;
2861 attr.attr.max_wr = 1;
2862 attr.srq_type = IB_SRQT_XRC;
2863 attr.ext.cq = devr->c0;
2864
2865 devr->s0 = ib_create_srq(devr->p0, &attr);
2866 if (IS_ERR(devr->s0)) {
2867 ret = PTR_ERR(devr->s0);
2868 goto err_create;
2869 }
2870
2871 memset(&attr, 0, sizeof(attr));
2872 attr.attr.max_sge = 1;
2873 attr.attr.max_wr = 1;
2874 attr.srq_type = IB_SRQT_BASIC;
2875
2876 devr->s1 = ib_create_srq(devr->p0, &attr);
2877 if (IS_ERR(devr->s1)) {
2878 ret = PTR_ERR(devr->s1);
2879 goto error6;
2880 }
2881
2882 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2883 INIT_WORK(&devr->ports[port].pkey_change_work,
2884 pkey_change_handler);
2885
2886 return 0;
2887
2888error6:
2889 ib_destroy_srq(devr->s0);
2890err_create:
2891 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2892error3:
2893 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2894error2:
2895 ib_destroy_cq(devr->c0);
2896error1:
2897 ib_dealloc_pd(devr->p0);
2898 return ret;
2899}
2900
2901static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
2902{
2903 struct mlx5_ib_resources *devr = &dev->devr;
2904 int port;
2905
2906 /*
2907 * Make sure no change P_Key work items are still executing.
2908 *
2909 * At this stage, the mlx5_ib_event should be unregistered
2910 * and it ensures that no new works are added.
2911 */
2912 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2913 cancel_work_sync(&devr->ports[port].pkey_change_work);
2914
2915 ib_destroy_srq(devr->s1);
2916 ib_destroy_srq(devr->s0);
2917 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2918 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2919 ib_destroy_cq(devr->c0);
2920 ib_dealloc_pd(devr->p0);
2921}
2922
2923static u32 get_core_cap_flags(struct ib_device *ibdev,
2924 struct mlx5_hca_vport_context *rep)
2925{
2926 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2927 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2928 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2929 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2930 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
2931 u32 ret = 0;
2932
2933 if (rep->grh_required)
2934 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
2935
2936 if (ll == IB_LINK_LAYER_INFINIBAND)
2937 return ret | RDMA_CORE_PORT_IBA_IB;
2938
2939 if (raw_support)
2940 ret |= RDMA_CORE_PORT_RAW_PACKET;
2941
2942 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
2943 return ret;
2944
2945 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
2946 return ret;
2947
2948 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
2949 ret |= RDMA_CORE_PORT_IBA_ROCE;
2950
2951 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
2952 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2953
2954 return ret;
2955}
2956
2957static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
2958 struct ib_port_immutable *immutable)
2959{
2960 struct ib_port_attr attr;
2961 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2962 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
2963 struct mlx5_hca_vport_context rep = {0};
2964 int err;
2965
2966 err = ib_query_port(ibdev, port_num, &attr);
2967 if (err)
2968 return err;
2969
2970 if (ll == IB_LINK_LAYER_INFINIBAND) {
2971 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
2972 &rep);
2973 if (err)
2974 return err;
2975 }
2976
2977 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2978 immutable->gid_tbl_len = attr.gid_tbl_len;
2979 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
2980 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2981
2982 return 0;
2983}
2984
2985static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
2986 struct ib_port_immutable *immutable)
2987{
2988 struct ib_port_attr attr;
2989 int err;
2990
2991 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
2992
2993 err = ib_query_port(ibdev, port_num, &attr);
2994 if (err)
2995 return err;
2996
2997 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2998 immutable->gid_tbl_len = attr.gid_tbl_len;
2999 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
3000
3001 return 0;
3002}
3003
3004static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3005{
3006 struct mlx5_ib_dev *dev =
3007 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3008 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3009 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3010 fw_rev_sub(dev->mdev));
3011}
3012
3013static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
3014{
3015 struct mlx5_core_dev *mdev = dev->mdev;
3016 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
3017 MLX5_FLOW_NAMESPACE_LAG);
3018 struct mlx5_flow_table *ft;
3019 int err;
3020
3021 if (!ns || !mlx5_lag_is_active(mdev))
3022 return 0;
3023
3024 err = mlx5_cmd_create_vport_lag(mdev);
3025 if (err)
3026 return err;
3027
3028 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
3029 if (IS_ERR(ft)) {
3030 err = PTR_ERR(ft);
3031 goto err_destroy_vport_lag;
3032 }
3033
3034 dev->flow_db->lag_demux_ft = ft;
3035 dev->lag_ports = mlx5_lag_get_num_ports(mdev);
3036 dev->lag_active = true;
3037 return 0;
3038
3039err_destroy_vport_lag:
3040 mlx5_cmd_destroy_vport_lag(mdev);
3041 return err;
3042}
3043
3044static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
3045{
3046 struct mlx5_core_dev *mdev = dev->mdev;
3047
3048 if (dev->lag_active) {
3049 dev->lag_active = false;
3050
3051 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
3052 dev->flow_db->lag_demux_ft = NULL;
3053
3054 mlx5_cmd_destroy_vport_lag(mdev);
3055 }
3056}
3057
3058static void mlx5_netdev_notifier_register(struct mlx5_roce *roce,
3059 struct net_device *netdev)
3060{
3061 int err;
3062
3063 if (roce->tracking_netdev)
3064 return;
3065 roce->tracking_netdev = netdev;
3066 roce->nb.notifier_call = mlx5_netdev_event;
3067 err = register_netdevice_notifier_dev_net(netdev, &roce->nb, &roce->nn);
3068 WARN_ON(err);
3069}
3070
3071static void mlx5_netdev_notifier_unregister(struct mlx5_roce *roce)
3072{
3073 if (!roce->tracking_netdev)
3074 return;
3075 unregister_netdevice_notifier_dev_net(roce->tracking_netdev, &roce->nb,
3076 &roce->nn);
3077 roce->tracking_netdev = NULL;
3078}
3079
3080static int mlx5e_mdev_notifier_event(struct notifier_block *nb,
3081 unsigned long event, void *data)
3082{
3083 struct mlx5_roce *roce = container_of(nb, struct mlx5_roce, mdev_nb);
3084 struct net_device *netdev = data;
3085
3086 switch (event) {
3087 case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
3088 if (netdev)
3089 mlx5_netdev_notifier_register(roce, netdev);
3090 else
3091 mlx5_netdev_notifier_unregister(roce);
3092 break;
3093 default:
3094 return NOTIFY_DONE;
3095 }
3096
3097 return NOTIFY_OK;
3098}
3099
3100static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num)
3101{
3102 struct mlx5_roce *roce = &dev->port[port_num].roce;
3103
3104 roce->mdev_nb.notifier_call = mlx5e_mdev_notifier_event;
3105 mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
3106 mlx5_core_uplink_netdev_event_replay(dev->mdev);
3107}
3108
3109static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num)
3110{
3111 struct mlx5_roce *roce = &dev->port[port_num].roce;
3112
3113 mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
3114 mlx5_netdev_notifier_unregister(roce);
3115}
3116
3117static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
3118{
3119 int err;
3120
3121 if (!dev->is_rep && dev->profile != &raw_eth_profile) {
3122 err = mlx5_nic_vport_enable_roce(dev->mdev);
3123 if (err)
3124 return err;
3125 }
3126
3127 err = mlx5_eth_lag_init(dev);
3128 if (err)
3129 goto err_disable_roce;
3130
3131 return 0;
3132
3133err_disable_roce:
3134 if (!dev->is_rep && dev->profile != &raw_eth_profile)
3135 mlx5_nic_vport_disable_roce(dev->mdev);
3136
3137 return err;
3138}
3139
3140static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3141{
3142 mlx5_eth_lag_cleanup(dev);
3143 if (!dev->is_rep && dev->profile != &raw_eth_profile)
3144 mlx5_nic_vport_disable_roce(dev->mdev);
3145}
3146
3147static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
3148 enum rdma_netdev_t type,
3149 struct rdma_netdev_alloc_params *params)
3150{
3151 if (type != RDMA_NETDEV_IPOIB)
3152 return -EOPNOTSUPP;
3153
3154 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3155}
3156
3157static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
3158 size_t count, loff_t *pos)
3159{
3160 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3161 char lbuf[20];
3162 int len;
3163
3164 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
3165 return simple_read_from_buffer(buf, count, pos, lbuf, len);
3166}
3167
3168static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
3169 size_t count, loff_t *pos)
3170{
3171 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3172 u32 timeout;
3173 u32 var;
3174
3175 if (kstrtouint_from_user(buf, count, 0, &var))
3176 return -EFAULT;
3177
3178 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
3179 1000);
3180 if (timeout != var)
3181 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3182 timeout);
3183
3184 delay_drop->timeout = timeout;
3185
3186 return count;
3187}
3188
3189static const struct file_operations fops_delay_drop_timeout = {
3190 .owner = THIS_MODULE,
3191 .open = simple_open,
3192 .write = delay_drop_timeout_write,
3193 .read = delay_drop_timeout_read,
3194};
3195
3196static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3197 struct mlx5_ib_multiport_info *mpi)
3198{
3199 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3200 struct mlx5_ib_port *port = &ibdev->port[port_num];
3201 int comps;
3202 int err;
3203 int i;
3204
3205 lockdep_assert_held(&mlx5_ib_multiport_mutex);
3206
3207 mlx5_core_mp_event_replay(ibdev->mdev,
3208 MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
3209 NULL);
3210 mlx5_core_mp_event_replay(mpi->mdev,
3211 MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
3212 NULL);
3213
3214 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
3215
3216 spin_lock(&port->mp.mpi_lock);
3217 if (!mpi->ibdev) {
3218 spin_unlock(&port->mp.mpi_lock);
3219 return;
3220 }
3221
3222 mpi->ibdev = NULL;
3223
3224 spin_unlock(&port->mp.mpi_lock);
3225 if (mpi->mdev_events.notifier_call)
3226 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
3227 mpi->mdev_events.notifier_call = NULL;
3228 mlx5_mdev_netdev_untrack(ibdev, port_num);
3229 spin_lock(&port->mp.mpi_lock);
3230
3231 comps = mpi->mdev_refcnt;
3232 if (comps) {
3233 mpi->unaffiliate = true;
3234 init_completion(&mpi->unref_comp);
3235 spin_unlock(&port->mp.mpi_lock);
3236
3237 for (i = 0; i < comps; i++)
3238 wait_for_completion(&mpi->unref_comp);
3239
3240 spin_lock(&port->mp.mpi_lock);
3241 mpi->unaffiliate = false;
3242 }
3243
3244 port->mp.mpi = NULL;
3245
3246 spin_unlock(&port->mp.mpi_lock);
3247
3248 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3249
3250 mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
3251 /* Log an error, still needed to cleanup the pointers and add
3252 * it back to the list.
3253 */
3254 if (err)
3255 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3256 port_num + 1);
3257
3258 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3259}
3260
3261static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3262 struct mlx5_ib_multiport_info *mpi)
3263{
3264 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3265 u64 key;
3266 int err;
3267
3268 lockdep_assert_held(&mlx5_ib_multiport_mutex);
3269
3270 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3271 if (ibdev->port[port_num].mp.mpi) {
3272 mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
3273 port_num + 1);
3274 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3275 return false;
3276 }
3277
3278 ibdev->port[port_num].mp.mpi = mpi;
3279 mpi->ibdev = ibdev;
3280 mpi->mdev_events.notifier_call = NULL;
3281 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3282
3283 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3284 if (err)
3285 goto unbind;
3286
3287 mlx5_mdev_netdev_track(ibdev, port_num);
3288
3289 mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
3290 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
3291
3292 mlx5_ib_init_cong_debugfs(ibdev, port_num);
3293
3294 key = mpi->mdev->priv.adev_idx;
3295 mlx5_core_mp_event_replay(mpi->mdev,
3296 MLX5_DRIVER_EVENT_AFFILIATION_DONE,
3297 &key);
3298 mlx5_core_mp_event_replay(ibdev->mdev,
3299 MLX5_DRIVER_EVENT_AFFILIATION_DONE,
3300 &key);
3301
3302 return true;
3303
3304unbind:
3305 mlx5_ib_unbind_slave_port(ibdev, mpi);
3306 return false;
3307}
3308
3309static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
3310{
3311 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3312 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3313 port_num + 1);
3314 struct mlx5_ib_multiport_info *mpi;
3315 int err;
3316 u32 i;
3317
3318 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3319 return 0;
3320
3321 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
3322 &dev->sys_image_guid);
3323 if (err)
3324 return err;
3325
3326 err = mlx5_nic_vport_enable_roce(dev->mdev);
3327 if (err)
3328 return err;
3329
3330 mutex_lock(&mlx5_ib_multiport_mutex);
3331 for (i = 0; i < dev->num_ports; i++) {
3332 bool bound = false;
3333
3334 /* build a stub multiport info struct for the native port. */
3335 if (i == port_num) {
3336 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
3337 if (!mpi) {
3338 mutex_unlock(&mlx5_ib_multiport_mutex);
3339 mlx5_nic_vport_disable_roce(dev->mdev);
3340 return -ENOMEM;
3341 }
3342
3343 mpi->is_master = true;
3344 mpi->mdev = dev->mdev;
3345 mpi->sys_image_guid = dev->sys_image_guid;
3346 dev->port[i].mp.mpi = mpi;
3347 mpi->ibdev = dev;
3348 mpi = NULL;
3349 continue;
3350 }
3351
3352 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
3353 list) {
3354 if (dev->sys_image_guid == mpi->sys_image_guid &&
3355 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
3356 bound = mlx5_ib_bind_slave_port(dev, mpi);
3357 }
3358
3359 if (bound) {
3360 dev_dbg(mpi->mdev->device,
3361 "removing port from unaffiliated list.\n");
3362 mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
3363 list_del(&mpi->list);
3364 break;
3365 }
3366 }
3367 if (!bound)
3368 mlx5_ib_dbg(dev, "no free port found for port %d\n",
3369 i + 1);
3370 }
3371
3372 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
3373 mutex_unlock(&mlx5_ib_multiport_mutex);
3374 return err;
3375}
3376
3377static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
3378{
3379 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3380 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3381 port_num + 1);
3382 u32 i;
3383
3384 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3385 return;
3386
3387 mutex_lock(&mlx5_ib_multiport_mutex);
3388 for (i = 0; i < dev->num_ports; i++) {
3389 if (dev->port[i].mp.mpi) {
3390 /* Destroy the native port stub */
3391 if (i == port_num) {
3392 kfree(dev->port[i].mp.mpi);
3393 dev->port[i].mp.mpi = NULL;
3394 } else {
3395 mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
3396 i + 1);
3397 list_add_tail(&dev->port[i].mp.mpi->list,
3398 &mlx5_ib_unaffiliated_port_list);
3399 mlx5_ib_unbind_slave_port(dev,
3400 dev->port[i].mp.mpi);
3401 }
3402 }
3403 }
3404
3405 mlx5_ib_dbg(dev, "removing from devlist\n");
3406 list_del(&dev->ib_dev_list);
3407 mutex_unlock(&mlx5_ib_multiport_mutex);
3408
3409 mlx5_nic_vport_disable_roce(dev->mdev);
3410}
3411
3412static int mmap_obj_cleanup(struct ib_uobject *uobject,
3413 enum rdma_remove_reason why,
3414 struct uverbs_attr_bundle *attrs)
3415{
3416 struct mlx5_user_mmap_entry *obj = uobject->object;
3417
3418 rdma_user_mmap_entry_remove(&obj->rdma_entry);
3419 return 0;
3420}
3421
3422static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
3423 struct mlx5_user_mmap_entry *entry,
3424 size_t length)
3425{
3426 return rdma_user_mmap_entry_insert_range(
3427 &c->ibucontext, &entry->rdma_entry, length,
3428 (MLX5_IB_MMAP_OFFSET_START << 16),
3429 ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
3430}
3431
3432static struct mlx5_user_mmap_entry *
3433alloc_var_entry(struct mlx5_ib_ucontext *c)
3434{
3435 struct mlx5_user_mmap_entry *entry;
3436 struct mlx5_var_table *var_table;
3437 u32 page_idx;
3438 int err;
3439
3440 var_table = &to_mdev(c->ibucontext.device)->var_table;
3441 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3442 if (!entry)
3443 return ERR_PTR(-ENOMEM);
3444
3445 mutex_lock(&var_table->bitmap_lock);
3446 page_idx = find_first_zero_bit(var_table->bitmap,
3447 var_table->num_var_hw_entries);
3448 if (page_idx >= var_table->num_var_hw_entries) {
3449 err = -ENOSPC;
3450 mutex_unlock(&var_table->bitmap_lock);
3451 goto end;
3452 }
3453
3454 set_bit(page_idx, var_table->bitmap);
3455 mutex_unlock(&var_table->bitmap_lock);
3456
3457 entry->address = var_table->hw_start_addr +
3458 (page_idx * var_table->stride_size);
3459 entry->page_idx = page_idx;
3460 entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
3461
3462 err = mlx5_rdma_user_mmap_entry_insert(c, entry,
3463 var_table->stride_size);
3464 if (err)
3465 goto err_insert;
3466
3467 return entry;
3468
3469err_insert:
3470 mutex_lock(&var_table->bitmap_lock);
3471 clear_bit(page_idx, var_table->bitmap);
3472 mutex_unlock(&var_table->bitmap_lock);
3473end:
3474 kfree(entry);
3475 return ERR_PTR(err);
3476}
3477
3478static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
3479 struct uverbs_attr_bundle *attrs)
3480{
3481 struct ib_uobject *uobj = uverbs_attr_get_uobject(
3482 attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
3483 struct mlx5_ib_ucontext *c;
3484 struct mlx5_user_mmap_entry *entry;
3485 u64 mmap_offset;
3486 u32 length;
3487 int err;
3488
3489 c = to_mucontext(ib_uverbs_get_ucontext(attrs));
3490 if (IS_ERR(c))
3491 return PTR_ERR(c);
3492
3493 entry = alloc_var_entry(c);
3494 if (IS_ERR(entry))
3495 return PTR_ERR(entry);
3496
3497 mmap_offset = mlx5_entry_to_mmap_offset(entry);
3498 length = entry->rdma_entry.npages * PAGE_SIZE;
3499 uobj->object = entry;
3500 uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
3501
3502 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
3503 &mmap_offset, sizeof(mmap_offset));
3504 if (err)
3505 return err;
3506
3507 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
3508 &entry->page_idx, sizeof(entry->page_idx));
3509 if (err)
3510 return err;
3511
3512 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
3513 &length, sizeof(length));
3514 return err;
3515}
3516
3517DECLARE_UVERBS_NAMED_METHOD(
3518 MLX5_IB_METHOD_VAR_OBJ_ALLOC,
3519 UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
3520 MLX5_IB_OBJECT_VAR,
3521 UVERBS_ACCESS_NEW,
3522 UA_MANDATORY),
3523 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
3524 UVERBS_ATTR_TYPE(u32),
3525 UA_MANDATORY),
3526 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
3527 UVERBS_ATTR_TYPE(u32),
3528 UA_MANDATORY),
3529 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
3530 UVERBS_ATTR_TYPE(u64),
3531 UA_MANDATORY));
3532
3533DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3534 MLX5_IB_METHOD_VAR_OBJ_DESTROY,
3535 UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
3536 MLX5_IB_OBJECT_VAR,
3537 UVERBS_ACCESS_DESTROY,
3538 UA_MANDATORY));
3539
3540DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
3541 UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
3542 &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
3543 &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
3544
3545static bool var_is_supported(struct ib_device *device)
3546{
3547 struct mlx5_ib_dev *dev = to_mdev(device);
3548
3549 return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3550 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
3551}
3552
3553static struct mlx5_user_mmap_entry *
3554alloc_uar_entry(struct mlx5_ib_ucontext *c,
3555 enum mlx5_ib_uapi_uar_alloc_type alloc_type)
3556{
3557 struct mlx5_user_mmap_entry *entry;
3558 struct mlx5_ib_dev *dev;
3559 u32 uar_index;
3560 int err;
3561
3562 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3563 if (!entry)
3564 return ERR_PTR(-ENOMEM);
3565
3566 dev = to_mdev(c->ibucontext.device);
3567 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
3568 if (err)
3569 goto end;
3570
3571 entry->page_idx = uar_index;
3572 entry->address = uar_index2paddress(dev, uar_index);
3573 if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
3574 entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
3575 else
3576 entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
3577
3578 err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
3579 if (err)
3580 goto err_insert;
3581
3582 return entry;
3583
3584err_insert:
3585 mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
3586end:
3587 kfree(entry);
3588 return ERR_PTR(err);
3589}
3590
3591static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
3592 struct uverbs_attr_bundle *attrs)
3593{
3594 struct ib_uobject *uobj = uverbs_attr_get_uobject(
3595 attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
3596 enum mlx5_ib_uapi_uar_alloc_type alloc_type;
3597 struct mlx5_ib_ucontext *c;
3598 struct mlx5_user_mmap_entry *entry;
3599 u64 mmap_offset;
3600 u32 length;
3601 int err;
3602
3603 c = to_mucontext(ib_uverbs_get_ucontext(attrs));
3604 if (IS_ERR(c))
3605 return PTR_ERR(c);
3606
3607 err = uverbs_get_const(&alloc_type, attrs,
3608 MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
3609 if (err)
3610 return err;
3611
3612 if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
3613 alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
3614 return -EOPNOTSUPP;
3615
3616 if (!to_mdev(c->ibucontext.device)->wc_support &&
3617 alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
3618 return -EOPNOTSUPP;
3619
3620 entry = alloc_uar_entry(c, alloc_type);
3621 if (IS_ERR(entry))
3622 return PTR_ERR(entry);
3623
3624 mmap_offset = mlx5_entry_to_mmap_offset(entry);
3625 length = entry->rdma_entry.npages * PAGE_SIZE;
3626 uobj->object = entry;
3627 uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
3628
3629 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
3630 &mmap_offset, sizeof(mmap_offset));
3631 if (err)
3632 return err;
3633
3634 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
3635 &entry->page_idx, sizeof(entry->page_idx));
3636 if (err)
3637 return err;
3638
3639 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
3640 &length, sizeof(length));
3641 return err;
3642}
3643
3644DECLARE_UVERBS_NAMED_METHOD(
3645 MLX5_IB_METHOD_UAR_OBJ_ALLOC,
3646 UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
3647 MLX5_IB_OBJECT_UAR,
3648 UVERBS_ACCESS_NEW,
3649 UA_MANDATORY),
3650 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
3651 enum mlx5_ib_uapi_uar_alloc_type,
3652 UA_MANDATORY),
3653 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
3654 UVERBS_ATTR_TYPE(u32),
3655 UA_MANDATORY),
3656 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
3657 UVERBS_ATTR_TYPE(u32),
3658 UA_MANDATORY),
3659 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
3660 UVERBS_ATTR_TYPE(u64),
3661 UA_MANDATORY));
3662
3663DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3664 MLX5_IB_METHOD_UAR_OBJ_DESTROY,
3665 UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
3666 MLX5_IB_OBJECT_UAR,
3667 UVERBS_ACCESS_DESTROY,
3668 UA_MANDATORY));
3669
3670DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
3671 UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
3672 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
3673 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
3674
3675ADD_UVERBS_ATTRIBUTES_SIMPLE(
3676 mlx5_ib_query_context,
3677 UVERBS_OBJECT_DEVICE,
3678 UVERBS_METHOD_QUERY_CONTEXT,
3679 UVERBS_ATTR_PTR_OUT(
3680 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
3681 UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp,
3682 dump_fill_mkey),
3683 UA_MANDATORY));
3684
3685static const struct uapi_definition mlx5_ib_defs[] = {
3686 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
3687 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
3688 UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
3689 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
3690 UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
3691
3692 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
3693 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
3694 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
3695 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
3696 {}
3697};
3698
3699static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
3700{
3701 mlx5_ib_cleanup_multiport_master(dev);
3702 WARN_ON(!xa_empty(&dev->odp_mkeys));
3703 mutex_destroy(&dev->cap_mask_mutex);
3704 WARN_ON(!xa_empty(&dev->sig_mrs));
3705 WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
3706 mlx5r_macsec_dealloc_gids(dev);
3707}
3708
3709static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
3710{
3711 struct mlx5_core_dev *mdev = dev->mdev;
3712 int err, i;
3713
3714 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
3715 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
3716 dev->ib_dev.phys_port_cnt = dev->num_ports;
3717 dev->ib_dev.dev.parent = mdev->device;
3718 dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
3719
3720 for (i = 0; i < dev->num_ports; i++) {
3721 spin_lock_init(&dev->port[i].mp.mpi_lock);
3722 rwlock_init(&dev->port[i].roce.netdev_lock);
3723 dev->port[i].roce.dev = dev;
3724 dev->port[i].roce.native_port_num = i + 1;
3725 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
3726 }
3727
3728 err = mlx5r_cmd_query_special_mkeys(dev);
3729 if (err)
3730 return err;
3731
3732 err = mlx5r_macsec_init_gids_and_devlist(dev);
3733 if (err)
3734 return err;
3735
3736 err = mlx5_ib_init_multiport_master(dev);
3737 if (err)
3738 goto err;
3739
3740 err = set_has_smi_cap(dev);
3741 if (err)
3742 goto err_mp;
3743
3744 err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
3745 if (err)
3746 goto err_mp;
3747
3748 if (mlx5_use_mad_ifc(dev))
3749 get_ext_port_caps(dev);
3750
3751 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
3752
3753 mutex_init(&dev->cap_mask_mutex);
3754 INIT_LIST_HEAD(&dev->qp_list);
3755 spin_lock_init(&dev->reset_flow_resource_lock);
3756 xa_init(&dev->odp_mkeys);
3757 xa_init(&dev->sig_mrs);
3758 atomic_set(&dev->mkey_var, 0);
3759
3760 spin_lock_init(&dev->dm.lock);
3761 dev->dm.dev = mdev;
3762 return 0;
3763err:
3764 mlx5r_macsec_dealloc_gids(dev);
3765err_mp:
3766 mlx5_ib_cleanup_multiport_master(dev);
3767 return err;
3768}
3769
3770static int mlx5_ib_enable_driver(struct ib_device *dev)
3771{
3772 struct mlx5_ib_dev *mdev = to_mdev(dev);
3773 int ret;
3774
3775 ret = mlx5_ib_test_wc(mdev);
3776 mlx5_ib_dbg(mdev, "Write-Combining %s",
3777 mdev->wc_support ? "supported" : "not supported");
3778
3779 return ret;
3780}
3781
3782static const struct ib_device_ops mlx5_ib_dev_ops = {
3783 .owner = THIS_MODULE,
3784 .driver_id = RDMA_DRIVER_MLX5,
3785 .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
3786
3787 .add_gid = mlx5_ib_add_gid,
3788 .alloc_mr = mlx5_ib_alloc_mr,
3789 .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
3790 .alloc_pd = mlx5_ib_alloc_pd,
3791 .alloc_ucontext = mlx5_ib_alloc_ucontext,
3792 .attach_mcast = mlx5_ib_mcg_attach,
3793 .check_mr_status = mlx5_ib_check_mr_status,
3794 .create_ah = mlx5_ib_create_ah,
3795 .create_cq = mlx5_ib_create_cq,
3796 .create_qp = mlx5_ib_create_qp,
3797 .create_srq = mlx5_ib_create_srq,
3798 .create_user_ah = mlx5_ib_create_ah,
3799 .dealloc_pd = mlx5_ib_dealloc_pd,
3800 .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
3801 .del_gid = mlx5_ib_del_gid,
3802 .dereg_mr = mlx5_ib_dereg_mr,
3803 .destroy_ah = mlx5_ib_destroy_ah,
3804 .destroy_cq = mlx5_ib_destroy_cq,
3805 .destroy_qp = mlx5_ib_destroy_qp,
3806 .destroy_srq = mlx5_ib_destroy_srq,
3807 .detach_mcast = mlx5_ib_mcg_detach,
3808 .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
3809 .drain_rq = mlx5_ib_drain_rq,
3810 .drain_sq = mlx5_ib_drain_sq,
3811 .device_group = &mlx5_attr_group,
3812 .enable_driver = mlx5_ib_enable_driver,
3813 .get_dev_fw_str = get_dev_fw_str,
3814 .get_dma_mr = mlx5_ib_get_dma_mr,
3815 .get_link_layer = mlx5_ib_port_link_layer,
3816 .map_mr_sg = mlx5_ib_map_mr_sg,
3817 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
3818 .mmap = mlx5_ib_mmap,
3819 .mmap_free = mlx5_ib_mmap_free,
3820 .modify_cq = mlx5_ib_modify_cq,
3821 .modify_device = mlx5_ib_modify_device,
3822 .modify_port = mlx5_ib_modify_port,
3823 .modify_qp = mlx5_ib_modify_qp,
3824 .modify_srq = mlx5_ib_modify_srq,
3825 .poll_cq = mlx5_ib_poll_cq,
3826 .post_recv = mlx5_ib_post_recv_nodrain,
3827 .post_send = mlx5_ib_post_send_nodrain,
3828 .post_srq_recv = mlx5_ib_post_srq_recv,
3829 .process_mad = mlx5_ib_process_mad,
3830 .query_ah = mlx5_ib_query_ah,
3831 .query_device = mlx5_ib_query_device,
3832 .query_gid = mlx5_ib_query_gid,
3833 .query_pkey = mlx5_ib_query_pkey,
3834 .query_qp = mlx5_ib_query_qp,
3835 .query_srq = mlx5_ib_query_srq,
3836 .query_ucontext = mlx5_ib_query_ucontext,
3837 .reg_user_mr = mlx5_ib_reg_user_mr,
3838 .reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
3839 .req_notify_cq = mlx5_ib_arm_cq,
3840 .rereg_user_mr = mlx5_ib_rereg_user_mr,
3841 .resize_cq = mlx5_ib_resize_cq,
3842
3843 INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
3844 INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
3845 INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
3846 INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
3847 INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
3848 INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
3849 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
3850};
3851
3852static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
3853 .rdma_netdev_get_params = mlx5_ib_rn_get_params,
3854};
3855
3856static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
3857 .get_vf_config = mlx5_ib_get_vf_config,
3858 .get_vf_guid = mlx5_ib_get_vf_guid,
3859 .get_vf_stats = mlx5_ib_get_vf_stats,
3860 .set_vf_guid = mlx5_ib_set_vf_guid,
3861 .set_vf_link_state = mlx5_ib_set_vf_link_state,
3862};
3863
3864static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
3865 .alloc_mw = mlx5_ib_alloc_mw,
3866 .dealloc_mw = mlx5_ib_dealloc_mw,
3867
3868 INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
3869};
3870
3871static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
3872 .alloc_xrcd = mlx5_ib_alloc_xrcd,
3873 .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
3874
3875 INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
3876};
3877
3878static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
3879{
3880 struct mlx5_core_dev *mdev = dev->mdev;
3881 struct mlx5_var_table *var_table = &dev->var_table;
3882 u8 log_doorbell_bar_size;
3883 u8 log_doorbell_stride;
3884 u64 bar_size;
3885
3886 log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3887 log_doorbell_bar_size);
3888 log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3889 log_doorbell_stride);
3890 var_table->hw_start_addr = dev->mdev->bar_addr +
3891 MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
3892 doorbell_bar_offset);
3893 bar_size = (1ULL << log_doorbell_bar_size) * 4096;
3894 var_table->stride_size = 1ULL << log_doorbell_stride;
3895 var_table->num_var_hw_entries = div_u64(bar_size,
3896 var_table->stride_size);
3897 mutex_init(&var_table->bitmap_lock);
3898 var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
3899 GFP_KERNEL);
3900 return (var_table->bitmap) ? 0 : -ENOMEM;
3901}
3902
3903static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
3904{
3905 bitmap_free(dev->var_table.bitmap);
3906}
3907
3908static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
3909{
3910 struct mlx5_core_dev *mdev = dev->mdev;
3911 int err;
3912
3913 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
3914 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
3915 ib_set_device_ops(&dev->ib_dev,
3916 &mlx5_ib_dev_ipoib_enhanced_ops);
3917
3918 if (mlx5_core_is_pf(mdev))
3919 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
3920
3921 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
3922
3923 if (MLX5_CAP_GEN(mdev, imaicl))
3924 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
3925
3926 if (MLX5_CAP_GEN(mdev, xrc))
3927 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
3928
3929 if (MLX5_CAP_DEV_MEM(mdev, memic) ||
3930 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3931 MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
3932 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
3933
3934 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
3935
3936 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
3937 dev->ib_dev.driver_def = mlx5_ib_defs;
3938
3939 err = init_node_data(dev);
3940 if (err)
3941 return err;
3942
3943 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3944 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
3945 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
3946 mutex_init(&dev->lb.mutex);
3947
3948 if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3949 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
3950 err = mlx5_ib_init_var_table(dev);
3951 if (err)
3952 return err;
3953 }
3954
3955 dev->ib_dev.use_cq_dim = true;
3956
3957 return 0;
3958}
3959
3960static const struct ib_device_ops mlx5_ib_dev_port_ops = {
3961 .get_port_immutable = mlx5_port_immutable,
3962 .query_port = mlx5_ib_query_port,
3963};
3964
3965static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
3966{
3967 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
3968 return 0;
3969}
3970
3971static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
3972 .get_port_immutable = mlx5_port_rep_immutable,
3973 .query_port = mlx5_ib_rep_query_port,
3974 .query_pkey = mlx5_ib_rep_query_pkey,
3975};
3976
3977static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
3978{
3979 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
3980 return 0;
3981}
3982
3983static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
3984 .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
3985 .create_wq = mlx5_ib_create_wq,
3986 .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
3987 .destroy_wq = mlx5_ib_destroy_wq,
3988 .get_netdev = mlx5_ib_get_netdev,
3989 .modify_wq = mlx5_ib_modify_wq,
3990
3991 INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
3992 ib_rwq_ind_tbl),
3993};
3994
3995static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
3996{
3997 struct mlx5_core_dev *mdev = dev->mdev;
3998 enum rdma_link_layer ll;
3999 int port_type_cap;
4000 u32 port_num = 0;
4001 int err;
4002
4003 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4004 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4005
4006 if (ll == IB_LINK_LAYER_ETHERNET) {
4007 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
4008
4009 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4010
4011 /* Register only for native ports */
4012 mlx5_mdev_netdev_track(dev, port_num);
4013
4014 err = mlx5_enable_eth(dev);
4015 if (err)
4016 goto cleanup;
4017 }
4018
4019 return 0;
4020cleanup:
4021 mlx5_mdev_netdev_untrack(dev, port_num);
4022 return err;
4023}
4024
4025static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
4026{
4027 struct mlx5_core_dev *mdev = dev->mdev;
4028 enum rdma_link_layer ll;
4029 int port_type_cap;
4030 u32 port_num;
4031
4032 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4033 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4034
4035 if (ll == IB_LINK_LAYER_ETHERNET) {
4036 mlx5_disable_eth(dev);
4037
4038 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4039 mlx5_mdev_netdev_untrack(dev, port_num);
4040 }
4041}
4042
4043static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
4044{
4045 mlx5_ib_init_cong_debugfs(dev,
4046 mlx5_core_native_port_num(dev->mdev) - 1);
4047 return 0;
4048}
4049
4050static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
4051{
4052 mlx5_ib_cleanup_cong_debugfs(dev,
4053 mlx5_core_native_port_num(dev->mdev) - 1);
4054}
4055
4056static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
4057{
4058 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
4059 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
4060}
4061
4062static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
4063{
4064 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4065}
4066
4067static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
4068{
4069 int err;
4070
4071 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4072 if (err)
4073 return err;
4074
4075 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4076 if (err)
4077 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4078
4079 return err;
4080}
4081
4082static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
4083{
4084 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4085 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4086}
4087
4088static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4089{
4090 const char *name;
4091
4092 if (!mlx5_lag_is_active(dev->mdev))
4093 name = "mlx5_%d";
4094 else
4095 name = "mlx5_bond_%d";
4096 return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4097}
4098
4099static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4100{
4101 mlx5_mkey_cache_cleanup(dev);
4102 mlx5r_umr_resource_cleanup(dev);
4103}
4104
4105static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4106{
4107 ib_unregister_device(&dev->ib_dev);
4108}
4109
4110static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4111{
4112 int ret;
4113
4114 ret = mlx5r_umr_resource_init(dev);
4115 if (ret)
4116 return ret;
4117
4118 ret = mlx5_mkey_cache_init(dev);
4119 if (ret)
4120 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4121 return ret;
4122}
4123
4124static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
4125{
4126 struct dentry *root;
4127
4128 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4129 return 0;
4130
4131 mutex_init(&dev->delay_drop.lock);
4132 dev->delay_drop.dev = dev;
4133 dev->delay_drop.activate = false;
4134 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
4135 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
4136 atomic_set(&dev->delay_drop.rqs_cnt, 0);
4137 atomic_set(&dev->delay_drop.events_cnt, 0);
4138
4139 if (!mlx5_debugfs_root)
4140 return 0;
4141
4142 root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
4143 dev->delay_drop.dir_debugfs = root;
4144
4145 debugfs_create_atomic_t("num_timeout_events", 0400, root,
4146 &dev->delay_drop.events_cnt);
4147 debugfs_create_atomic_t("num_rqs", 0400, root,
4148 &dev->delay_drop.rqs_cnt);
4149 debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
4150 &fops_delay_drop_timeout);
4151 return 0;
4152}
4153
4154static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
4155{
4156 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4157 return;
4158
4159 cancel_work_sync(&dev->delay_drop.delay_drop_work);
4160 if (!dev->delay_drop.dir_debugfs)
4161 return;
4162
4163 debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
4164 dev->delay_drop.dir_debugfs = NULL;
4165}
4166
4167static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
4168{
4169 dev->mdev_events.notifier_call = mlx5_ib_event;
4170 mlx5_notifier_register(dev->mdev, &dev->mdev_events);
4171
4172 mlx5r_macsec_event_register(dev);
4173
4174 return 0;
4175}
4176
4177static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
4178{
4179 mlx5r_macsec_event_unregister(dev);
4180 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4181}
4182
4183void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4184 const struct mlx5_ib_profile *profile,
4185 int stage)
4186{
4187 dev->ib_active = false;
4188
4189 /* Number of stages to cleanup */
4190 while (stage) {
4191 stage--;
4192 if (profile->stage[stage].cleanup)
4193 profile->stage[stage].cleanup(dev);
4194 }
4195
4196 kfree(dev->port);
4197 ib_dealloc_device(&dev->ib_dev);
4198}
4199
4200int __mlx5_ib_add(struct mlx5_ib_dev *dev,
4201 const struct mlx5_ib_profile *profile)
4202{
4203 int err;
4204 int i;
4205
4206 dev->profile = profile;
4207
4208 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
4209 if (profile->stage[i].init) {
4210 err = profile->stage[i].init(dev);
4211 if (err)
4212 goto err_out;
4213 }
4214 }
4215
4216 dev->ib_active = true;
4217 return 0;
4218
4219err_out:
4220 /* Clean up stages which were initialized */
4221 while (i) {
4222 i--;
4223 if (profile->stage[i].cleanup)
4224 profile->stage[i].cleanup(dev);
4225 }
4226 return -ENOMEM;
4227}
4228
4229static const struct mlx5_ib_profile pf_profile = {
4230 STAGE_CREATE(MLX5_IB_STAGE_INIT,
4231 mlx5_ib_stage_init_init,
4232 mlx5_ib_stage_init_cleanup),
4233 STAGE_CREATE(MLX5_IB_STAGE_FS,
4234 mlx5_ib_fs_init,
4235 mlx5_ib_fs_cleanup),
4236 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4237 mlx5_ib_stage_caps_init,
4238 mlx5_ib_stage_caps_cleanup),
4239 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
4240 mlx5_ib_stage_non_default_cb,
4241 NULL),
4242 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
4243 mlx5_ib_roce_init,
4244 mlx5_ib_roce_cleanup),
4245 STAGE_CREATE(MLX5_IB_STAGE_QP,
4246 mlx5_init_qp_table,
4247 mlx5_cleanup_qp_table),
4248 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
4249 mlx5_init_srq_table,
4250 mlx5_cleanup_srq_table),
4251 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4252 mlx5_ib_dev_res_init,
4253 mlx5_ib_dev_res_cleanup),
4254 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
4255 mlx5_ib_stage_dev_notifier_init,
4256 mlx5_ib_stage_dev_notifier_cleanup),
4257 STAGE_CREATE(MLX5_IB_STAGE_ODP,
4258 mlx5_ib_odp_init_one,
4259 mlx5_ib_odp_cleanup_one),
4260 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
4261 mlx5_ib_counters_init,
4262 mlx5_ib_counters_cleanup),
4263 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
4264 mlx5_ib_stage_cong_debugfs_init,
4265 mlx5_ib_stage_cong_debugfs_cleanup),
4266 STAGE_CREATE(MLX5_IB_STAGE_UAR,
4267 mlx5_ib_stage_uar_init,
4268 mlx5_ib_stage_uar_cleanup),
4269 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4270 mlx5_ib_stage_bfrag_init,
4271 mlx5_ib_stage_bfrag_cleanup),
4272 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
4273 NULL,
4274 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
4275 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
4276 mlx5_ib_devx_init,
4277 mlx5_ib_devx_cleanup),
4278 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4279 mlx5_ib_stage_ib_reg_init,
4280 mlx5_ib_stage_ib_reg_cleanup),
4281 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
4282 mlx5_ib_stage_post_ib_reg_umr_init,
4283 NULL),
4284 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4285 mlx5_ib_stage_delay_drop_init,
4286 mlx5_ib_stage_delay_drop_cleanup),
4287 STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
4288 mlx5_ib_restrack_init,
4289 NULL),
4290};
4291
4292const struct mlx5_ib_profile raw_eth_profile = {
4293 STAGE_CREATE(MLX5_IB_STAGE_INIT,
4294 mlx5_ib_stage_init_init,
4295 mlx5_ib_stage_init_cleanup),
4296 STAGE_CREATE(MLX5_IB_STAGE_FS,
4297 mlx5_ib_fs_init,
4298 mlx5_ib_fs_cleanup),
4299 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4300 mlx5_ib_stage_caps_init,
4301 mlx5_ib_stage_caps_cleanup),
4302 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
4303 mlx5_ib_stage_raw_eth_non_default_cb,
4304 NULL),
4305 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
4306 mlx5_ib_roce_init,
4307 mlx5_ib_roce_cleanup),
4308 STAGE_CREATE(MLX5_IB_STAGE_QP,
4309 mlx5_init_qp_table,
4310 mlx5_cleanup_qp_table),
4311 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
4312 mlx5_init_srq_table,
4313 mlx5_cleanup_srq_table),
4314 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4315 mlx5_ib_dev_res_init,
4316 mlx5_ib_dev_res_cleanup),
4317 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
4318 mlx5_ib_stage_dev_notifier_init,
4319 mlx5_ib_stage_dev_notifier_cleanup),
4320 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
4321 mlx5_ib_counters_init,
4322 mlx5_ib_counters_cleanup),
4323 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
4324 mlx5_ib_stage_cong_debugfs_init,
4325 mlx5_ib_stage_cong_debugfs_cleanup),
4326 STAGE_CREATE(MLX5_IB_STAGE_UAR,
4327 mlx5_ib_stage_uar_init,
4328 mlx5_ib_stage_uar_cleanup),
4329 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4330 mlx5_ib_stage_bfrag_init,
4331 mlx5_ib_stage_bfrag_cleanup),
4332 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
4333 NULL,
4334 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
4335 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
4336 mlx5_ib_devx_init,
4337 mlx5_ib_devx_cleanup),
4338 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4339 mlx5_ib_stage_ib_reg_init,
4340 mlx5_ib_stage_ib_reg_cleanup),
4341 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
4342 mlx5_ib_stage_post_ib_reg_umr_init,
4343 NULL),
4344 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4345 mlx5_ib_stage_delay_drop_init,
4346 mlx5_ib_stage_delay_drop_cleanup),
4347 STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
4348 mlx5_ib_restrack_init,
4349 NULL),
4350};
4351
4352static int mlx5r_mp_probe(struct auxiliary_device *adev,
4353 const struct auxiliary_device_id *id)
4354{
4355 struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
4356 struct mlx5_core_dev *mdev = idev->mdev;
4357 struct mlx5_ib_multiport_info *mpi;
4358 struct mlx5_ib_dev *dev;
4359 bool bound = false;
4360 int err;
4361
4362 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
4363 if (!mpi)
4364 return -ENOMEM;
4365
4366 mpi->mdev = mdev;
4367 err = mlx5_query_nic_vport_system_image_guid(mdev,
4368 &mpi->sys_image_guid);
4369 if (err) {
4370 kfree(mpi);
4371 return err;
4372 }
4373
4374 mutex_lock(&mlx5_ib_multiport_mutex);
4375 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
4376 if (dev->sys_image_guid == mpi->sys_image_guid)
4377 bound = mlx5_ib_bind_slave_port(dev, mpi);
4378
4379 if (bound) {
4380 rdma_roce_rescan_device(&dev->ib_dev);
4381 mpi->ibdev->ib_active = true;
4382 break;
4383 }
4384 }
4385
4386 if (!bound) {
4387 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
4388 dev_dbg(mdev->device,
4389 "no suitable IB device found to bind to, added to unaffiliated list.\n");
4390 }
4391 mutex_unlock(&mlx5_ib_multiport_mutex);
4392
4393 auxiliary_set_drvdata(adev, mpi);
4394 return 0;
4395}
4396
4397static void mlx5r_mp_remove(struct auxiliary_device *adev)
4398{
4399 struct mlx5_ib_multiport_info *mpi;
4400
4401 mpi = auxiliary_get_drvdata(adev);
4402 mutex_lock(&mlx5_ib_multiport_mutex);
4403 if (mpi->ibdev)
4404 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
4405 else
4406 list_del(&mpi->list);
4407 mutex_unlock(&mlx5_ib_multiport_mutex);
4408 kfree(mpi);
4409}
4410
4411static int mlx5r_probe(struct auxiliary_device *adev,
4412 const struct auxiliary_device_id *id)
4413{
4414 struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
4415 struct mlx5_core_dev *mdev = idev->mdev;
4416 const struct mlx5_ib_profile *profile;
4417 int port_type_cap, num_ports, ret;
4418 enum rdma_link_layer ll;
4419 struct mlx5_ib_dev *dev;
4420
4421 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4422 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4423
4424 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
4425 MLX5_CAP_GEN(mdev, num_vhca_ports));
4426 dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
4427 if (!dev)
4428 return -ENOMEM;
4429 dev->port = kcalloc(num_ports, sizeof(*dev->port),
4430 GFP_KERNEL);
4431 if (!dev->port) {
4432 ib_dealloc_device(&dev->ib_dev);
4433 return -ENOMEM;
4434 }
4435
4436 dev->mdev = mdev;
4437 dev->num_ports = num_ports;
4438
4439 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
4440 profile = &raw_eth_profile;
4441 else
4442 profile = &pf_profile;
4443
4444 ret = __mlx5_ib_add(dev, profile);
4445 if (ret) {
4446 kfree(dev->port);
4447 ib_dealloc_device(&dev->ib_dev);
4448 return ret;
4449 }
4450
4451 auxiliary_set_drvdata(adev, dev);
4452 return 0;
4453}
4454
4455static void mlx5r_remove(struct auxiliary_device *adev)
4456{
4457 struct mlx5_ib_dev *dev;
4458
4459 dev = auxiliary_get_drvdata(adev);
4460 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
4461}
4462
4463static const struct auxiliary_device_id mlx5r_mp_id_table[] = {
4464 { .name = MLX5_ADEV_NAME ".multiport", },
4465 {},
4466};
4467
4468static const struct auxiliary_device_id mlx5r_id_table[] = {
4469 { .name = MLX5_ADEV_NAME ".rdma", },
4470 {},
4471};
4472
4473MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table);
4474MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table);
4475
4476static struct auxiliary_driver mlx5r_mp_driver = {
4477 .name = "multiport",
4478 .probe = mlx5r_mp_probe,
4479 .remove = mlx5r_mp_remove,
4480 .id_table = mlx5r_mp_id_table,
4481};
4482
4483static struct auxiliary_driver mlx5r_driver = {
4484 .name = "rdma",
4485 .probe = mlx5r_probe,
4486 .remove = mlx5r_remove,
4487 .id_table = mlx5r_id_table,
4488};
4489
4490static int __init mlx5_ib_init(void)
4491{
4492 int ret;
4493
4494 xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
4495 if (!xlt_emergency_page)
4496 return -ENOMEM;
4497
4498 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
4499 if (!mlx5_ib_event_wq) {
4500 free_page((unsigned long)xlt_emergency_page);
4501 return -ENOMEM;
4502 }
4503
4504 ret = mlx5_ib_qp_event_init();
4505 if (ret)
4506 goto qp_event_err;
4507
4508 mlx5_ib_odp_init();
4509 ret = mlx5r_rep_init();
4510 if (ret)
4511 goto rep_err;
4512 ret = auxiliary_driver_register(&mlx5r_mp_driver);
4513 if (ret)
4514 goto mp_err;
4515 ret = auxiliary_driver_register(&mlx5r_driver);
4516 if (ret)
4517 goto drv_err;
4518 return 0;
4519
4520drv_err:
4521 auxiliary_driver_unregister(&mlx5r_mp_driver);
4522mp_err:
4523 mlx5r_rep_cleanup();
4524rep_err:
4525 mlx5_ib_qp_event_cleanup();
4526qp_event_err:
4527 destroy_workqueue(mlx5_ib_event_wq);
4528 free_page((unsigned long)xlt_emergency_page);
4529 return ret;
4530}
4531
4532static void __exit mlx5_ib_cleanup(void)
4533{
4534 auxiliary_driver_unregister(&mlx5r_driver);
4535 auxiliary_driver_unregister(&mlx5r_mp_driver);
4536 mlx5r_rep_cleanup();
4537
4538 mlx5_ib_qp_event_cleanup();
4539 destroy_workqueue(mlx5_ib_event_wq);
4540 free_page((unsigned long)xlt_emergency_page);
4541}
4542
4543module_init(mlx5_ib_init);
4544module_exit(mlx5_ib_cleanup);
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 */
6
7#include <linux/debugfs.h>
8#include <linux/highmem.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/pci.h>
13#include <linux/dma-mapping.h>
14#include <linux/slab.h>
15#include <linux/bitmap.h>
16#include <linux/sched.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/task.h>
19#include <linux/delay.h>
20#include <rdma/ib_user_verbs.h>
21#include <rdma/ib_addr.h>
22#include <rdma/ib_cache.h>
23#include <linux/mlx5/port.h>
24#include <linux/mlx5/vport.h>
25#include <linux/mlx5/fs.h>
26#include <linux/mlx5/eswitch.h>
27#include <linux/list.h>
28#include <rdma/ib_smi.h>
29#include <rdma/ib_umem_odp.h>
30#include <rdma/lag.h>
31#include <linux/in.h>
32#include <linux/etherdevice.h>
33#include "mlx5_ib.h"
34#include "ib_rep.h"
35#include "cmd.h"
36#include "devx.h"
37#include "dm.h"
38#include "fs.h"
39#include "srq.h"
40#include "qp.h"
41#include "wr.h"
42#include "restrack.h"
43#include "counters.h"
44#include "umr.h"
45#include <rdma/uverbs_std_types.h>
46#include <rdma/uverbs_ioctl.h>
47#include <rdma/mlx5_user_ioctl_verbs.h>
48#include <rdma/mlx5_user_ioctl_cmds.h>
49
50#define UVERBS_MODULE_NAME mlx5_ib
51#include <rdma/uverbs_named_ioctl.h>
52
53MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
54MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
55MODULE_LICENSE("Dual BSD/GPL");
56
57struct mlx5_ib_event_work {
58 struct work_struct work;
59 union {
60 struct mlx5_ib_dev *dev;
61 struct mlx5_ib_multiport_info *mpi;
62 };
63 bool is_slave;
64 unsigned int event;
65 void *param;
66};
67
68enum {
69 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
70};
71
72static struct workqueue_struct *mlx5_ib_event_wq;
73static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
74static LIST_HEAD(mlx5_ib_dev_list);
75/*
76 * This mutex should be held when accessing either of the above lists
77 */
78static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
79
80struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
81{
82 struct mlx5_ib_dev *dev;
83
84 mutex_lock(&mlx5_ib_multiport_mutex);
85 dev = mpi->ibdev;
86 mutex_unlock(&mlx5_ib_multiport_mutex);
87 return dev;
88}
89
90static enum rdma_link_layer
91mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
92{
93 switch (port_type_cap) {
94 case MLX5_CAP_PORT_TYPE_IB:
95 return IB_LINK_LAYER_INFINIBAND;
96 case MLX5_CAP_PORT_TYPE_ETH:
97 return IB_LINK_LAYER_ETHERNET;
98 default:
99 return IB_LINK_LAYER_UNSPECIFIED;
100 }
101}
102
103static enum rdma_link_layer
104mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
105{
106 struct mlx5_ib_dev *dev = to_mdev(device);
107 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
108
109 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
110}
111
112static int get_port_state(struct ib_device *ibdev,
113 u32 port_num,
114 enum ib_port_state *state)
115{
116 struct ib_port_attr attr;
117 int ret;
118
119 memset(&attr, 0, sizeof(attr));
120 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
121 if (!ret)
122 *state = attr.state;
123 return ret;
124}
125
126static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
127 struct net_device *ndev,
128 struct net_device *upper,
129 u32 *port_num)
130{
131 struct net_device *rep_ndev;
132 struct mlx5_ib_port *port;
133 int i;
134
135 for (i = 0; i < dev->num_ports; i++) {
136 port = &dev->port[i];
137 if (!port->rep)
138 continue;
139
140 if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) {
141 *port_num = i + 1;
142 return &port->roce;
143 }
144
145 if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
146 continue;
147
148 read_lock(&port->roce.netdev_lock);
149 rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw,
150 port->rep->vport);
151 if (rep_ndev == ndev) {
152 read_unlock(&port->roce.netdev_lock);
153 *port_num = i + 1;
154 return &port->roce;
155 }
156 read_unlock(&port->roce.netdev_lock);
157 }
158
159 return NULL;
160}
161
162static int mlx5_netdev_event(struct notifier_block *this,
163 unsigned long event, void *ptr)
164{
165 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
166 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
167 u32 port_num = roce->native_port_num;
168 struct mlx5_core_dev *mdev;
169 struct mlx5_ib_dev *ibdev;
170
171 ibdev = roce->dev;
172 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
173 if (!mdev)
174 return NOTIFY_DONE;
175
176 switch (event) {
177 case NETDEV_REGISTER:
178 /* Should already be registered during the load */
179 if (ibdev->is_rep)
180 break;
181 write_lock(&roce->netdev_lock);
182 if (ndev->dev.parent == mdev->device)
183 roce->netdev = ndev;
184 write_unlock(&roce->netdev_lock);
185 break;
186
187 case NETDEV_UNREGISTER:
188 /* In case of reps, ib device goes away before the netdevs */
189 write_lock(&roce->netdev_lock);
190 if (roce->netdev == ndev)
191 roce->netdev = NULL;
192 write_unlock(&roce->netdev_lock);
193 break;
194
195 case NETDEV_CHANGE:
196 case NETDEV_UP:
197 case NETDEV_DOWN: {
198 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
199 struct net_device *upper = NULL;
200
201 if (lag_ndev) {
202 upper = netdev_master_upper_dev_get(lag_ndev);
203 dev_put(lag_ndev);
204 }
205
206 if (ibdev->is_rep)
207 roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
208 if (!roce)
209 return NOTIFY_DONE;
210 if ((upper == ndev ||
211 ((!upper || ibdev->is_rep) && ndev == roce->netdev)) &&
212 ibdev->ib_active) {
213 struct ib_event ibev = { };
214 enum ib_port_state port_state;
215
216 if (get_port_state(&ibdev->ib_dev, port_num,
217 &port_state))
218 goto done;
219
220 if (roce->last_port_state == port_state)
221 goto done;
222
223 roce->last_port_state = port_state;
224 ibev.device = &ibdev->ib_dev;
225 if (port_state == IB_PORT_DOWN)
226 ibev.event = IB_EVENT_PORT_ERR;
227 else if (port_state == IB_PORT_ACTIVE)
228 ibev.event = IB_EVENT_PORT_ACTIVE;
229 else
230 goto done;
231
232 ibev.element.port_num = port_num;
233 ib_dispatch_event(&ibev);
234 }
235 break;
236 }
237
238 default:
239 break;
240 }
241done:
242 mlx5_ib_put_native_port_mdev(ibdev, port_num);
243 return NOTIFY_DONE;
244}
245
246static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
247 u32 port_num)
248{
249 struct mlx5_ib_dev *ibdev = to_mdev(device);
250 struct net_device *ndev;
251 struct mlx5_core_dev *mdev;
252
253 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
254 if (!mdev)
255 return NULL;
256
257 ndev = mlx5_lag_get_roce_netdev(mdev);
258 if (ndev)
259 goto out;
260
261 /* Ensure ndev does not disappear before we invoke dev_hold()
262 */
263 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
264 ndev = ibdev->port[port_num - 1].roce.netdev;
265 if (ndev)
266 dev_hold(ndev);
267 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
268
269out:
270 mlx5_ib_put_native_port_mdev(ibdev, port_num);
271 return ndev;
272}
273
274struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
275 u32 ib_port_num,
276 u32 *native_port_num)
277{
278 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
279 ib_port_num);
280 struct mlx5_core_dev *mdev = NULL;
281 struct mlx5_ib_multiport_info *mpi;
282 struct mlx5_ib_port *port;
283
284 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
285 ll != IB_LINK_LAYER_ETHERNET) {
286 if (native_port_num)
287 *native_port_num = ib_port_num;
288 return ibdev->mdev;
289 }
290
291 if (native_port_num)
292 *native_port_num = 1;
293
294 port = &ibdev->port[ib_port_num - 1];
295 spin_lock(&port->mp.mpi_lock);
296 mpi = ibdev->port[ib_port_num - 1].mp.mpi;
297 if (mpi && !mpi->unaffiliate) {
298 mdev = mpi->mdev;
299 /* If it's the master no need to refcount, it'll exist
300 * as long as the ib_dev exists.
301 */
302 if (!mpi->is_master)
303 mpi->mdev_refcnt++;
304 }
305 spin_unlock(&port->mp.mpi_lock);
306
307 return mdev;
308}
309
310void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
311{
312 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
313 port_num);
314 struct mlx5_ib_multiport_info *mpi;
315 struct mlx5_ib_port *port;
316
317 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
318 return;
319
320 port = &ibdev->port[port_num - 1];
321
322 spin_lock(&port->mp.mpi_lock);
323 mpi = ibdev->port[port_num - 1].mp.mpi;
324 if (mpi->is_master)
325 goto out;
326
327 mpi->mdev_refcnt--;
328 if (mpi->unaffiliate)
329 complete(&mpi->unref_comp);
330out:
331 spin_unlock(&port->mp.mpi_lock);
332}
333
334static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
335 u16 *active_speed, u8 *active_width)
336{
337 switch (eth_proto_oper) {
338 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
339 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
340 case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
341 case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
342 *active_width = IB_WIDTH_1X;
343 *active_speed = IB_SPEED_SDR;
344 break;
345 case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
346 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
347 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
348 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
349 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
350 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
351 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
352 *active_width = IB_WIDTH_1X;
353 *active_speed = IB_SPEED_QDR;
354 break;
355 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
356 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
357 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
358 *active_width = IB_WIDTH_1X;
359 *active_speed = IB_SPEED_EDR;
360 break;
361 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
362 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
363 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
364 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
365 *active_width = IB_WIDTH_4X;
366 *active_speed = IB_SPEED_QDR;
367 break;
368 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
369 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
370 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
371 *active_width = IB_WIDTH_1X;
372 *active_speed = IB_SPEED_HDR;
373 break;
374 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
375 *active_width = IB_WIDTH_4X;
376 *active_speed = IB_SPEED_FDR;
377 break;
378 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
379 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
380 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
381 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
382 *active_width = IB_WIDTH_4X;
383 *active_speed = IB_SPEED_EDR;
384 break;
385 default:
386 return -EINVAL;
387 }
388
389 return 0;
390}
391
392static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
393 u8 *active_width)
394{
395 switch (eth_proto_oper) {
396 case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
397 case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
398 *active_width = IB_WIDTH_1X;
399 *active_speed = IB_SPEED_SDR;
400 break;
401 case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
402 *active_width = IB_WIDTH_1X;
403 *active_speed = IB_SPEED_DDR;
404 break;
405 case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
406 *active_width = IB_WIDTH_1X;
407 *active_speed = IB_SPEED_QDR;
408 break;
409 case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
410 *active_width = IB_WIDTH_4X;
411 *active_speed = IB_SPEED_QDR;
412 break;
413 case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
414 *active_width = IB_WIDTH_1X;
415 *active_speed = IB_SPEED_EDR;
416 break;
417 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
418 *active_width = IB_WIDTH_2X;
419 *active_speed = IB_SPEED_EDR;
420 break;
421 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
422 *active_width = IB_WIDTH_1X;
423 *active_speed = IB_SPEED_HDR;
424 break;
425 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
426 *active_width = IB_WIDTH_4X;
427 *active_speed = IB_SPEED_EDR;
428 break;
429 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
430 *active_width = IB_WIDTH_2X;
431 *active_speed = IB_SPEED_HDR;
432 break;
433 case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
434 *active_width = IB_WIDTH_1X;
435 *active_speed = IB_SPEED_NDR;
436 break;
437 case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
438 *active_width = IB_WIDTH_4X;
439 *active_speed = IB_SPEED_HDR;
440 break;
441 case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
442 *active_width = IB_WIDTH_2X;
443 *active_speed = IB_SPEED_NDR;
444 break;
445 case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
446 *active_width = IB_WIDTH_4X;
447 *active_speed = IB_SPEED_NDR;
448 break;
449 default:
450 return -EINVAL;
451 }
452
453 return 0;
454}
455
456static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
457 u8 *active_width, bool ext)
458{
459 return ext ?
460 translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
461 active_width) :
462 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
463 active_width);
464}
465
466static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
467 struct ib_port_attr *props)
468{
469 struct mlx5_ib_dev *dev = to_mdev(device);
470 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
471 struct mlx5_core_dev *mdev;
472 struct net_device *ndev, *upper;
473 enum ib_mtu ndev_ib_mtu;
474 bool put_mdev = true;
475 u32 eth_prot_oper;
476 u32 mdev_port_num;
477 bool ext;
478 int err;
479
480 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
481 if (!mdev) {
482 /* This means the port isn't affiliated yet. Get the
483 * info for the master port instead.
484 */
485 put_mdev = false;
486 mdev = dev->mdev;
487 mdev_port_num = 1;
488 port_num = 1;
489 }
490
491 /* Possible bad flows are checked before filling out props so in case
492 * of an error it will still be zeroed out.
493 * Use native port in case of reps
494 */
495 if (dev->is_rep)
496 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
497 1);
498 else
499 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
500 mdev_port_num);
501 if (err)
502 goto out;
503 ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
504 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
505
506 props->active_width = IB_WIDTH_4X;
507 props->active_speed = IB_SPEED_QDR;
508
509 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
510 &props->active_width, ext);
511
512 if (!dev->is_rep && dev->mdev->roce.roce_en) {
513 u16 qkey_viol_cntr;
514
515 props->port_cap_flags |= IB_PORT_CM_SUP;
516 props->ip_gids = true;
517 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
518 roce_address_table_size);
519 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
520 props->qkey_viol_cntr = qkey_viol_cntr;
521 }
522 props->max_mtu = IB_MTU_4096;
523 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
524 props->pkey_tbl_len = 1;
525 props->state = IB_PORT_DOWN;
526 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
527
528 /* If this is a stub query for an unaffiliated port stop here */
529 if (!put_mdev)
530 goto out;
531
532 ndev = mlx5_ib_get_netdev(device, port_num);
533 if (!ndev)
534 goto out;
535
536 if (dev->lag_active) {
537 rcu_read_lock();
538 upper = netdev_master_upper_dev_get_rcu(ndev);
539 if (upper) {
540 dev_put(ndev);
541 ndev = upper;
542 dev_hold(ndev);
543 }
544 rcu_read_unlock();
545 }
546
547 if (netif_running(ndev) && netif_carrier_ok(ndev)) {
548 props->state = IB_PORT_ACTIVE;
549 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
550 }
551
552 ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
553
554 dev_put(ndev);
555
556 props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
557out:
558 if (put_mdev)
559 mlx5_ib_put_native_port_mdev(dev, port_num);
560 return err;
561}
562
563static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
564 unsigned int index, const union ib_gid *gid,
565 const struct ib_gid_attr *attr)
566{
567 enum ib_gid_type gid_type;
568 u16 vlan_id = 0xffff;
569 u8 roce_version = 0;
570 u8 roce_l3_type = 0;
571 u8 mac[ETH_ALEN];
572 int ret;
573
574 gid_type = attr->gid_type;
575 if (gid) {
576 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
577 if (ret)
578 return ret;
579 }
580
581 switch (gid_type) {
582 case IB_GID_TYPE_ROCE:
583 roce_version = MLX5_ROCE_VERSION_1;
584 break;
585 case IB_GID_TYPE_ROCE_UDP_ENCAP:
586 roce_version = MLX5_ROCE_VERSION_2;
587 if (gid && ipv6_addr_v4mapped((void *)gid))
588 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
589 else
590 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
591 break;
592
593 default:
594 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
595 }
596
597 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
598 roce_l3_type, gid->raw, mac,
599 vlan_id < VLAN_CFI_MASK, vlan_id,
600 port_num);
601}
602
603static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
604 __always_unused void **context)
605{
606 return set_roce_addr(to_mdev(attr->device), attr->port_num,
607 attr->index, &attr->gid, attr);
608}
609
610static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
611 __always_unused void **context)
612{
613 return set_roce_addr(to_mdev(attr->device), attr->port_num,
614 attr->index, NULL, attr);
615}
616
617__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
618 const struct ib_gid_attr *attr)
619{
620 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
621 return 0;
622
623 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
624}
625
626static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
627{
628 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
629 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
630 return 0;
631}
632
633enum {
634 MLX5_VPORT_ACCESS_METHOD_MAD,
635 MLX5_VPORT_ACCESS_METHOD_HCA,
636 MLX5_VPORT_ACCESS_METHOD_NIC,
637};
638
639static int mlx5_get_vport_access_method(struct ib_device *ibdev)
640{
641 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
642 return MLX5_VPORT_ACCESS_METHOD_MAD;
643
644 if (mlx5_ib_port_link_layer(ibdev, 1) ==
645 IB_LINK_LAYER_ETHERNET)
646 return MLX5_VPORT_ACCESS_METHOD_NIC;
647
648 return MLX5_VPORT_ACCESS_METHOD_HCA;
649}
650
651static void get_atomic_caps(struct mlx5_ib_dev *dev,
652 u8 atomic_size_qp,
653 struct ib_device_attr *props)
654{
655 u8 tmp;
656 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
657 u8 atomic_req_8B_endianness_mode =
658 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
659
660 /* Check if HW supports 8 bytes standard atomic operations and capable
661 * of host endianness respond
662 */
663 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
664 if (((atomic_operations & tmp) == tmp) &&
665 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
666 (atomic_req_8B_endianness_mode)) {
667 props->atomic_cap = IB_ATOMIC_HCA;
668 } else {
669 props->atomic_cap = IB_ATOMIC_NONE;
670 }
671}
672
673static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
674 struct ib_device_attr *props)
675{
676 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
677
678 get_atomic_caps(dev, atomic_size_qp, props);
679}
680
681static int mlx5_query_system_image_guid(struct ib_device *ibdev,
682 __be64 *sys_image_guid)
683{
684 struct mlx5_ib_dev *dev = to_mdev(ibdev);
685 struct mlx5_core_dev *mdev = dev->mdev;
686 u64 tmp;
687 int err;
688
689 switch (mlx5_get_vport_access_method(ibdev)) {
690 case MLX5_VPORT_ACCESS_METHOD_MAD:
691 return mlx5_query_mad_ifc_system_image_guid(ibdev,
692 sys_image_guid);
693
694 case MLX5_VPORT_ACCESS_METHOD_HCA:
695 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
696 break;
697
698 case MLX5_VPORT_ACCESS_METHOD_NIC:
699 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
700 break;
701
702 default:
703 return -EINVAL;
704 }
705
706 if (!err)
707 *sys_image_guid = cpu_to_be64(tmp);
708
709 return err;
710
711}
712
713static int mlx5_query_max_pkeys(struct ib_device *ibdev,
714 u16 *max_pkeys)
715{
716 struct mlx5_ib_dev *dev = to_mdev(ibdev);
717 struct mlx5_core_dev *mdev = dev->mdev;
718
719 switch (mlx5_get_vport_access_method(ibdev)) {
720 case MLX5_VPORT_ACCESS_METHOD_MAD:
721 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
722
723 case MLX5_VPORT_ACCESS_METHOD_HCA:
724 case MLX5_VPORT_ACCESS_METHOD_NIC:
725 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
726 pkey_table_size));
727 return 0;
728
729 default:
730 return -EINVAL;
731 }
732}
733
734static int mlx5_query_vendor_id(struct ib_device *ibdev,
735 u32 *vendor_id)
736{
737 struct mlx5_ib_dev *dev = to_mdev(ibdev);
738
739 switch (mlx5_get_vport_access_method(ibdev)) {
740 case MLX5_VPORT_ACCESS_METHOD_MAD:
741 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
742
743 case MLX5_VPORT_ACCESS_METHOD_HCA:
744 case MLX5_VPORT_ACCESS_METHOD_NIC:
745 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
746
747 default:
748 return -EINVAL;
749 }
750}
751
752static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
753 __be64 *node_guid)
754{
755 u64 tmp;
756 int err;
757
758 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
759 case MLX5_VPORT_ACCESS_METHOD_MAD:
760 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
761
762 case MLX5_VPORT_ACCESS_METHOD_HCA:
763 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
764 break;
765
766 case MLX5_VPORT_ACCESS_METHOD_NIC:
767 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
768 break;
769
770 default:
771 return -EINVAL;
772 }
773
774 if (!err)
775 *node_guid = cpu_to_be64(tmp);
776
777 return err;
778}
779
780struct mlx5_reg_node_desc {
781 u8 desc[IB_DEVICE_NODE_DESC_MAX];
782};
783
784static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
785{
786 struct mlx5_reg_node_desc in;
787
788 if (mlx5_use_mad_ifc(dev))
789 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
790
791 memset(&in, 0, sizeof(in));
792
793 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
794 sizeof(struct mlx5_reg_node_desc),
795 MLX5_REG_NODE_DESC, 0, 0);
796}
797
798static int mlx5_ib_query_device(struct ib_device *ibdev,
799 struct ib_device_attr *props,
800 struct ib_udata *uhw)
801{
802 size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
803 struct mlx5_ib_dev *dev = to_mdev(ibdev);
804 struct mlx5_core_dev *mdev = dev->mdev;
805 int err = -ENOMEM;
806 int max_sq_desc;
807 int max_rq_sg;
808 int max_sq_sg;
809 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
810 bool raw_support = !mlx5_core_mp_enabled(mdev);
811 struct mlx5_ib_query_device_resp resp = {};
812 size_t resp_len;
813 u64 max_tso;
814
815 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
816 if (uhw_outlen && uhw_outlen < resp_len)
817 return -EINVAL;
818
819 resp.response_length = resp_len;
820
821 if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
822 return -EINVAL;
823
824 memset(props, 0, sizeof(*props));
825 err = mlx5_query_system_image_guid(ibdev,
826 &props->sys_image_guid);
827 if (err)
828 return err;
829
830 props->max_pkeys = dev->pkey_table_len;
831
832 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
833 if (err)
834 return err;
835
836 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
837 (fw_rev_min(dev->mdev) << 16) |
838 fw_rev_sub(dev->mdev);
839 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
840 IB_DEVICE_PORT_ACTIVE_EVENT |
841 IB_DEVICE_SYS_IMAGE_GUID |
842 IB_DEVICE_RC_RNR_NAK_GEN;
843
844 if (MLX5_CAP_GEN(mdev, pkv))
845 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
846 if (MLX5_CAP_GEN(mdev, qkv))
847 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
848 if (MLX5_CAP_GEN(mdev, apm))
849 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
850 if (MLX5_CAP_GEN(mdev, xrc))
851 props->device_cap_flags |= IB_DEVICE_XRC;
852 if (MLX5_CAP_GEN(mdev, imaicl)) {
853 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
854 IB_DEVICE_MEM_WINDOW_TYPE_2B;
855 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
856 /* We support 'Gappy' memory registration too */
857 props->kernel_cap_flags |= IBK_SG_GAPS_REG;
858 }
859 /* IB_WR_REG_MR always requires changing the entity size with UMR */
860 if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
861 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
862 if (MLX5_CAP_GEN(mdev, sho)) {
863 props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER;
864 /* At this stage no support for signature handover */
865 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
866 IB_PROT_T10DIF_TYPE_2 |
867 IB_PROT_T10DIF_TYPE_3;
868 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
869 IB_GUARD_T10DIF_CSUM;
870 }
871 if (MLX5_CAP_GEN(mdev, block_lb_mc))
872 props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
873
874 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
875 if (MLX5_CAP_ETH(mdev, csum_cap)) {
876 /* Legacy bit to support old userspace libraries */
877 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
878 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
879 }
880
881 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
882 props->raw_packet_caps |=
883 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
884
885 if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
886 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
887 if (max_tso) {
888 resp.tso_caps.max_tso = 1 << max_tso;
889 resp.tso_caps.supported_qpts |=
890 1 << IB_QPT_RAW_PACKET;
891 resp.response_length += sizeof(resp.tso_caps);
892 }
893 }
894
895 if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
896 resp.rss_caps.rx_hash_function =
897 MLX5_RX_HASH_FUNC_TOEPLITZ;
898 resp.rss_caps.rx_hash_fields_mask =
899 MLX5_RX_HASH_SRC_IPV4 |
900 MLX5_RX_HASH_DST_IPV4 |
901 MLX5_RX_HASH_SRC_IPV6 |
902 MLX5_RX_HASH_DST_IPV6 |
903 MLX5_RX_HASH_SRC_PORT_TCP |
904 MLX5_RX_HASH_DST_PORT_TCP |
905 MLX5_RX_HASH_SRC_PORT_UDP |
906 MLX5_RX_HASH_DST_PORT_UDP |
907 MLX5_RX_HASH_INNER;
908 resp.response_length += sizeof(resp.rss_caps);
909 }
910 } else {
911 if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
912 resp.response_length += sizeof(resp.tso_caps);
913 if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
914 resp.response_length += sizeof(resp.rss_caps);
915 }
916
917 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
918 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
919 props->kernel_cap_flags |= IBK_UD_TSO;
920 }
921
922 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
923 MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
924 raw_support)
925 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
926
927 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
928 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
929 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
930
931 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
932 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
933 raw_support) {
934 /* Legacy bit to support old userspace libraries */
935 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
936 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
937 }
938
939 if (MLX5_CAP_DEV_MEM(mdev, memic)) {
940 props->max_dm_size =
941 MLX5_CAP_DEV_MEM(mdev, max_memic_size);
942 }
943
944 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
945 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
946
947 if (MLX5_CAP_GEN(mdev, end_pad))
948 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
949
950 props->vendor_part_id = mdev->pdev->device;
951 props->hw_ver = mdev->pdev->revision;
952
953 props->max_mr_size = ~0ull;
954 props->page_size_cap = ~(min_page_size - 1);
955 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
956 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
957 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
958 sizeof(struct mlx5_wqe_data_seg);
959 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
960 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
961 sizeof(struct mlx5_wqe_raddr_seg)) /
962 sizeof(struct mlx5_wqe_data_seg);
963 props->max_send_sge = max_sq_sg;
964 props->max_recv_sge = max_rq_sg;
965 props->max_sge_rd = MLX5_MAX_SGE_RD;
966 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
967 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
968 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
969 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
970 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
971 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
972 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
973 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
974 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
975 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
976 props->max_srq_sge = max_rq_sg - 1;
977 props->max_fast_reg_page_list_len =
978 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
979 props->max_pi_fast_reg_page_list_len =
980 props->max_fast_reg_page_list_len / 2;
981 props->max_sgl_rd =
982 MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
983 get_atomic_caps_qp(dev, props);
984 props->masked_atomic_cap = IB_ATOMIC_NONE;
985 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
986 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
987 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
988 props->max_mcast_grp;
989 props->max_ah = INT_MAX;
990 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
991 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
992
993 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
994 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
995 props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
996 props->odp_caps = dev->odp_caps;
997 if (!uhw) {
998 /* ODP for kernel QPs is not implemented for receive
999 * WQEs and SRQ WQEs
1000 */
1001 props->odp_caps.per_transport_caps.rc_odp_caps &=
1002 ~(IB_ODP_SUPPORT_READ |
1003 IB_ODP_SUPPORT_SRQ_RECV);
1004 props->odp_caps.per_transport_caps.uc_odp_caps &=
1005 ~(IB_ODP_SUPPORT_READ |
1006 IB_ODP_SUPPORT_SRQ_RECV);
1007 props->odp_caps.per_transport_caps.ud_odp_caps &=
1008 ~(IB_ODP_SUPPORT_READ |
1009 IB_ODP_SUPPORT_SRQ_RECV);
1010 props->odp_caps.per_transport_caps.xrc_odp_caps &=
1011 ~(IB_ODP_SUPPORT_READ |
1012 IB_ODP_SUPPORT_SRQ_RECV);
1013 }
1014 }
1015
1016 if (mlx5_core_is_vf(mdev))
1017 props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
1018
1019 if (mlx5_ib_port_link_layer(ibdev, 1) ==
1020 IB_LINK_LAYER_ETHERNET && raw_support) {
1021 props->rss_caps.max_rwq_indirection_tables =
1022 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1023 props->rss_caps.max_rwq_indirection_table_size =
1024 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1025 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1026 props->max_wq_type_rq =
1027 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1028 }
1029
1030 if (MLX5_CAP_GEN(mdev, tag_matching)) {
1031 props->tm_caps.max_num_tags =
1032 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1033 props->tm_caps.max_ops =
1034 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1035 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1036 }
1037
1038 if (MLX5_CAP_GEN(mdev, tag_matching) &&
1039 MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1040 props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
1041 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1042 }
1043
1044 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1045 props->cq_caps.max_cq_moderation_count =
1046 MLX5_MAX_CQ_COUNT;
1047 props->cq_caps.max_cq_moderation_period =
1048 MLX5_MAX_CQ_PERIOD;
1049 }
1050
1051 if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
1052 resp.response_length += sizeof(resp.cqe_comp_caps);
1053
1054 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1055 resp.cqe_comp_caps.max_num =
1056 MLX5_CAP_GEN(dev->mdev,
1057 cqe_compression_max_num);
1058
1059 resp.cqe_comp_caps.supported_format =
1060 MLX5_IB_CQE_RES_FORMAT_HASH |
1061 MLX5_IB_CQE_RES_FORMAT_CSUM;
1062
1063 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1064 resp.cqe_comp_caps.supported_format |=
1065 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1066 }
1067 }
1068
1069 if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
1070 raw_support) {
1071 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1072 MLX5_CAP_GEN(mdev, qos)) {
1073 resp.packet_pacing_caps.qp_rate_limit_max =
1074 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1075 resp.packet_pacing_caps.qp_rate_limit_min =
1076 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1077 resp.packet_pacing_caps.supported_qpts |=
1078 1 << IB_QPT_RAW_PACKET;
1079 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1080 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1081 resp.packet_pacing_caps.cap_flags |=
1082 MLX5_IB_PP_SUPPORT_BURST;
1083 }
1084 resp.response_length += sizeof(resp.packet_pacing_caps);
1085 }
1086
1087 if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
1088 uhw_outlen) {
1089 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1090 resp.mlx5_ib_support_multi_pkt_send_wqes =
1091 MLX5_IB_ALLOW_MPW;
1092
1093 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1094 resp.mlx5_ib_support_multi_pkt_send_wqes |=
1095 MLX5_IB_SUPPORT_EMPW;
1096
1097 resp.response_length +=
1098 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1099 }
1100
1101 if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
1102 resp.response_length += sizeof(resp.flags);
1103
1104 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1105 resp.flags |=
1106 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1107
1108 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1109 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1110 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1111 resp.flags |=
1112 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1113
1114 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1115 }
1116
1117 if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
1118 resp.response_length += sizeof(resp.sw_parsing_caps);
1119 if (MLX5_CAP_ETH(mdev, swp)) {
1120 resp.sw_parsing_caps.sw_parsing_offloads |=
1121 MLX5_IB_SW_PARSING;
1122
1123 if (MLX5_CAP_ETH(mdev, swp_csum))
1124 resp.sw_parsing_caps.sw_parsing_offloads |=
1125 MLX5_IB_SW_PARSING_CSUM;
1126
1127 if (MLX5_CAP_ETH(mdev, swp_lso))
1128 resp.sw_parsing_caps.sw_parsing_offloads |=
1129 MLX5_IB_SW_PARSING_LSO;
1130
1131 if (resp.sw_parsing_caps.sw_parsing_offloads)
1132 resp.sw_parsing_caps.supported_qpts =
1133 BIT(IB_QPT_RAW_PACKET);
1134 }
1135 }
1136
1137 if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
1138 raw_support) {
1139 resp.response_length += sizeof(resp.striding_rq_caps);
1140 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1141 resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1142 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1143 resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1144 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1145 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1146 resp.striding_rq_caps
1147 .min_single_wqe_log_num_of_strides =
1148 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1149 else
1150 resp.striding_rq_caps
1151 .min_single_wqe_log_num_of_strides =
1152 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1153 resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1154 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1155 resp.striding_rq_caps.supported_qpts =
1156 BIT(IB_QPT_RAW_PACKET);
1157 }
1158 }
1159
1160 if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
1161 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1162 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1163 resp.tunnel_offloads_caps |=
1164 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1165 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1166 resp.tunnel_offloads_caps |=
1167 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1168 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1169 resp.tunnel_offloads_caps |=
1170 MLX5_IB_TUNNELED_OFFLOADS_GRE;
1171 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1172 resp.tunnel_offloads_caps |=
1173 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1174 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1175 resp.tunnel_offloads_caps |=
1176 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1177 }
1178
1179 if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
1180 resp.response_length += sizeof(resp.dci_streams_caps);
1181
1182 resp.dci_streams_caps.max_log_num_concurent =
1183 MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
1184
1185 resp.dci_streams_caps.max_log_num_errored =
1186 MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
1187 }
1188
1189 if (uhw_outlen) {
1190 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1191
1192 if (err)
1193 return err;
1194 }
1195
1196 return 0;
1197}
1198
1199static void translate_active_width(struct ib_device *ibdev, u16 active_width,
1200 u8 *ib_width)
1201{
1202 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1203
1204 if (active_width & MLX5_PTYS_WIDTH_1X)
1205 *ib_width = IB_WIDTH_1X;
1206 else if (active_width & MLX5_PTYS_WIDTH_2X)
1207 *ib_width = IB_WIDTH_2X;
1208 else if (active_width & MLX5_PTYS_WIDTH_4X)
1209 *ib_width = IB_WIDTH_4X;
1210 else if (active_width & MLX5_PTYS_WIDTH_8X)
1211 *ib_width = IB_WIDTH_8X;
1212 else if (active_width & MLX5_PTYS_WIDTH_12X)
1213 *ib_width = IB_WIDTH_12X;
1214 else {
1215 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1216 active_width);
1217 *ib_width = IB_WIDTH_4X;
1218 }
1219
1220 return;
1221}
1222
1223static int mlx5_mtu_to_ib_mtu(int mtu)
1224{
1225 switch (mtu) {
1226 case 256: return 1;
1227 case 512: return 2;
1228 case 1024: return 3;
1229 case 2048: return 4;
1230 case 4096: return 5;
1231 default:
1232 pr_warn("invalid mtu\n");
1233 return -1;
1234 }
1235}
1236
1237enum ib_max_vl_num {
1238 __IB_MAX_VL_0 = 1,
1239 __IB_MAX_VL_0_1 = 2,
1240 __IB_MAX_VL_0_3 = 3,
1241 __IB_MAX_VL_0_7 = 4,
1242 __IB_MAX_VL_0_14 = 5,
1243};
1244
1245enum mlx5_vl_hw_cap {
1246 MLX5_VL_HW_0 = 1,
1247 MLX5_VL_HW_0_1 = 2,
1248 MLX5_VL_HW_0_2 = 3,
1249 MLX5_VL_HW_0_3 = 4,
1250 MLX5_VL_HW_0_4 = 5,
1251 MLX5_VL_HW_0_5 = 6,
1252 MLX5_VL_HW_0_6 = 7,
1253 MLX5_VL_HW_0_7 = 8,
1254 MLX5_VL_HW_0_14 = 15
1255};
1256
1257static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1258 u8 *max_vl_num)
1259{
1260 switch (vl_hw_cap) {
1261 case MLX5_VL_HW_0:
1262 *max_vl_num = __IB_MAX_VL_0;
1263 break;
1264 case MLX5_VL_HW_0_1:
1265 *max_vl_num = __IB_MAX_VL_0_1;
1266 break;
1267 case MLX5_VL_HW_0_3:
1268 *max_vl_num = __IB_MAX_VL_0_3;
1269 break;
1270 case MLX5_VL_HW_0_7:
1271 *max_vl_num = __IB_MAX_VL_0_7;
1272 break;
1273 case MLX5_VL_HW_0_14:
1274 *max_vl_num = __IB_MAX_VL_0_14;
1275 break;
1276
1277 default:
1278 return -EINVAL;
1279 }
1280
1281 return 0;
1282}
1283
1284static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
1285 struct ib_port_attr *props)
1286{
1287 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1288 struct mlx5_core_dev *mdev = dev->mdev;
1289 struct mlx5_hca_vport_context *rep;
1290 u16 max_mtu;
1291 u16 oper_mtu;
1292 int err;
1293 u16 ib_link_width_oper;
1294 u8 vl_hw_cap;
1295
1296 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1297 if (!rep) {
1298 err = -ENOMEM;
1299 goto out;
1300 }
1301
1302 /* props being zeroed by the caller, avoid zeroing it here */
1303
1304 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1305 if (err)
1306 goto out;
1307
1308 props->lid = rep->lid;
1309 props->lmc = rep->lmc;
1310 props->sm_lid = rep->sm_lid;
1311 props->sm_sl = rep->sm_sl;
1312 props->state = rep->vport_state;
1313 props->phys_state = rep->port_physical_state;
1314 props->port_cap_flags = rep->cap_mask1;
1315 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1316 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1317 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1318 props->bad_pkey_cntr = rep->pkey_violation_counter;
1319 props->qkey_viol_cntr = rep->qkey_violation_counter;
1320 props->subnet_timeout = rep->subnet_timeout;
1321 props->init_type_reply = rep->init_type_reply;
1322
1323 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1324 props->port_cap_flags2 = rep->cap_mask2;
1325
1326 err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1327 &props->active_speed, port);
1328 if (err)
1329 goto out;
1330
1331 translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1332
1333 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1334
1335 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1336
1337 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1338
1339 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1340
1341 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1342 if (err)
1343 goto out;
1344
1345 err = translate_max_vl_num(ibdev, vl_hw_cap,
1346 &props->max_vl_num);
1347out:
1348 kfree(rep);
1349 return err;
1350}
1351
1352int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1353 struct ib_port_attr *props)
1354{
1355 unsigned int count;
1356 int ret;
1357
1358 switch (mlx5_get_vport_access_method(ibdev)) {
1359 case MLX5_VPORT_ACCESS_METHOD_MAD:
1360 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1361 break;
1362
1363 case MLX5_VPORT_ACCESS_METHOD_HCA:
1364 ret = mlx5_query_hca_port(ibdev, port, props);
1365 break;
1366
1367 case MLX5_VPORT_ACCESS_METHOD_NIC:
1368 ret = mlx5_query_port_roce(ibdev, port, props);
1369 break;
1370
1371 default:
1372 ret = -EINVAL;
1373 }
1374
1375 if (!ret && props) {
1376 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1377 struct mlx5_core_dev *mdev;
1378 bool put_mdev = true;
1379
1380 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1381 if (!mdev) {
1382 /* If the port isn't affiliated yet query the master.
1383 * The master and slave will have the same values.
1384 */
1385 mdev = dev->mdev;
1386 port = 1;
1387 put_mdev = false;
1388 }
1389 count = mlx5_core_reserved_gids_count(mdev);
1390 if (put_mdev)
1391 mlx5_ib_put_native_port_mdev(dev, port);
1392 props->gid_tbl_len -= count;
1393 }
1394 return ret;
1395}
1396
1397static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
1398 struct ib_port_attr *props)
1399{
1400 return mlx5_query_port_roce(ibdev, port, props);
1401}
1402
1403static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1404 u16 *pkey)
1405{
1406 /* Default special Pkey for representor device port as per the
1407 * IB specification 1.3 section 10.9.1.2.
1408 */
1409 *pkey = 0xffff;
1410 return 0;
1411}
1412
1413static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
1414 union ib_gid *gid)
1415{
1416 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1417 struct mlx5_core_dev *mdev = dev->mdev;
1418
1419 switch (mlx5_get_vport_access_method(ibdev)) {
1420 case MLX5_VPORT_ACCESS_METHOD_MAD:
1421 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1422
1423 case MLX5_VPORT_ACCESS_METHOD_HCA:
1424 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1425
1426 default:
1427 return -EINVAL;
1428 }
1429
1430}
1431
1432static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
1433 u16 index, u16 *pkey)
1434{
1435 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1436 struct mlx5_core_dev *mdev;
1437 bool put_mdev = true;
1438 u32 mdev_port_num;
1439 int err;
1440
1441 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1442 if (!mdev) {
1443 /* The port isn't affiliated yet, get the PKey from the master
1444 * port. For RoCE the PKey tables will be the same.
1445 */
1446 put_mdev = false;
1447 mdev = dev->mdev;
1448 mdev_port_num = 1;
1449 }
1450
1451 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1452 index, pkey);
1453 if (put_mdev)
1454 mlx5_ib_put_native_port_mdev(dev, port);
1455
1456 return err;
1457}
1458
1459static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1460 u16 *pkey)
1461{
1462 switch (mlx5_get_vport_access_method(ibdev)) {
1463 case MLX5_VPORT_ACCESS_METHOD_MAD:
1464 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1465
1466 case MLX5_VPORT_ACCESS_METHOD_HCA:
1467 case MLX5_VPORT_ACCESS_METHOD_NIC:
1468 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1469 default:
1470 return -EINVAL;
1471 }
1472}
1473
1474static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1475 struct ib_device_modify *props)
1476{
1477 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1478 struct mlx5_reg_node_desc in;
1479 struct mlx5_reg_node_desc out;
1480 int err;
1481
1482 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1483 return -EOPNOTSUPP;
1484
1485 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1486 return 0;
1487
1488 /*
1489 * If possible, pass node desc to FW, so it can generate
1490 * a 144 trap. If cmd fails, just ignore.
1491 */
1492 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1493 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1494 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1495 if (err)
1496 return err;
1497
1498 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1499
1500 return err;
1501}
1502
1503static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
1504 u32 value)
1505{
1506 struct mlx5_hca_vport_context ctx = {};
1507 struct mlx5_core_dev *mdev;
1508 u32 mdev_port_num;
1509 int err;
1510
1511 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1512 if (!mdev)
1513 return -ENODEV;
1514
1515 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1516 if (err)
1517 goto out;
1518
1519 if (~ctx.cap_mask1_perm & mask) {
1520 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1521 mask, ctx.cap_mask1_perm);
1522 err = -EINVAL;
1523 goto out;
1524 }
1525
1526 ctx.cap_mask1 = value;
1527 ctx.cap_mask1_perm = mask;
1528 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1529 0, &ctx);
1530
1531out:
1532 mlx5_ib_put_native_port_mdev(dev, port_num);
1533
1534 return err;
1535}
1536
1537static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1538 struct ib_port_modify *props)
1539{
1540 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1541 struct ib_port_attr attr;
1542 u32 tmp;
1543 int err;
1544 u32 change_mask;
1545 u32 value;
1546 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1547 IB_LINK_LAYER_INFINIBAND);
1548
1549 /* CM layer calls ib_modify_port() regardless of the link layer. For
1550 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1551 */
1552 if (!is_ib)
1553 return 0;
1554
1555 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1556 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1557 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1558 return set_port_caps_atomic(dev, port, change_mask, value);
1559 }
1560
1561 mutex_lock(&dev->cap_mask_mutex);
1562
1563 err = ib_query_port(ibdev, port, &attr);
1564 if (err)
1565 goto out;
1566
1567 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1568 ~props->clr_port_cap_mask;
1569
1570 err = mlx5_set_port_caps(dev->mdev, port, tmp);
1571
1572out:
1573 mutex_unlock(&dev->cap_mask_mutex);
1574 return err;
1575}
1576
1577static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1578{
1579 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1580 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1581}
1582
1583static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1584{
1585 /* Large page with non 4k uar support might limit the dynamic size */
1586 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
1587 return MLX5_MIN_DYN_BFREGS;
1588
1589 return MLX5_MAX_DYN_BFREGS;
1590}
1591
1592static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1593 struct mlx5_ib_alloc_ucontext_req_v2 *req,
1594 struct mlx5_bfreg_info *bfregi)
1595{
1596 int uars_per_sys_page;
1597 int bfregs_per_sys_page;
1598 int ref_bfregs = req->total_num_bfregs;
1599
1600 if (req->total_num_bfregs == 0)
1601 return -EINVAL;
1602
1603 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1604 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1605
1606 if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1607 return -ENOMEM;
1608
1609 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1610 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1611 /* This holds the required static allocation asked by the user */
1612 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1613 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1614 return -EINVAL;
1615
1616 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1617 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1618 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1619 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1620
1621 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1622 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1623 lib_uar_4k ? "yes" : "no", ref_bfregs,
1624 req->total_num_bfregs, bfregi->total_num_bfregs,
1625 bfregi->num_sys_pages);
1626
1627 return 0;
1628}
1629
1630static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1631{
1632 struct mlx5_bfreg_info *bfregi;
1633 int err;
1634 int i;
1635
1636 bfregi = &context->bfregi;
1637 for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1638 err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
1639 context->devx_uid);
1640 if (err)
1641 goto error;
1642
1643 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1644 }
1645
1646 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1647 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1648
1649 return 0;
1650
1651error:
1652 for (--i; i >= 0; i--)
1653 if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1654 context->devx_uid))
1655 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1656
1657 return err;
1658}
1659
1660static void deallocate_uars(struct mlx5_ib_dev *dev,
1661 struct mlx5_ib_ucontext *context)
1662{
1663 struct mlx5_bfreg_info *bfregi;
1664 int i;
1665
1666 bfregi = &context->bfregi;
1667 for (i = 0; i < bfregi->num_sys_pages; i++)
1668 if (i < bfregi->num_static_sys_pages ||
1669 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1670 mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1671 context->devx_uid);
1672}
1673
1674int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1675{
1676 int err = 0;
1677
1678 mutex_lock(&dev->lb.mutex);
1679 if (td)
1680 dev->lb.user_td++;
1681 if (qp)
1682 dev->lb.qps++;
1683
1684 if (dev->lb.user_td == 2 ||
1685 dev->lb.qps == 1) {
1686 if (!dev->lb.enabled) {
1687 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1688 dev->lb.enabled = true;
1689 }
1690 }
1691
1692 mutex_unlock(&dev->lb.mutex);
1693
1694 return err;
1695}
1696
1697void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1698{
1699 mutex_lock(&dev->lb.mutex);
1700 if (td)
1701 dev->lb.user_td--;
1702 if (qp)
1703 dev->lb.qps--;
1704
1705 if (dev->lb.user_td == 1 &&
1706 dev->lb.qps == 0) {
1707 if (dev->lb.enabled) {
1708 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1709 dev->lb.enabled = false;
1710 }
1711 }
1712
1713 mutex_unlock(&dev->lb.mutex);
1714}
1715
1716static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1717 u16 uid)
1718{
1719 int err;
1720
1721 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1722 return 0;
1723
1724 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1725 if (err)
1726 return err;
1727
1728 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1729 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1730 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1731 return err;
1732
1733 return mlx5_ib_enable_lb(dev, true, false);
1734}
1735
1736static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1737 u16 uid)
1738{
1739 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1740 return;
1741
1742 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1743
1744 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1745 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1746 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1747 return;
1748
1749 mlx5_ib_disable_lb(dev, true, false);
1750}
1751
1752static int set_ucontext_resp(struct ib_ucontext *uctx,
1753 struct mlx5_ib_alloc_ucontext_resp *resp)
1754{
1755 struct ib_device *ibdev = uctx->device;
1756 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1757 struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1758 struct mlx5_bfreg_info *bfregi = &context->bfregi;
1759 int err;
1760
1761 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1762 err = mlx5_cmd_dump_fill_mkey(dev->mdev,
1763 &resp->dump_fill_mkey);
1764 if (err)
1765 return err;
1766 resp->comp_mask |=
1767 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1768 }
1769
1770 resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1771 if (dev->wc_support)
1772 resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1773 log_bf_reg_size);
1774 resp->cache_line_size = cache_line_size();
1775 resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1776 resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1777 resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1778 resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1779 resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1780 resp->cqe_version = context->cqe_version;
1781 resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1782 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1783 resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1784 MLX5_CAP_GEN(dev->mdev,
1785 num_of_uars_per_page) : 1;
1786 resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
1787 bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
1788 resp->num_ports = dev->num_ports;
1789 resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1790 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1791
1792 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1793 mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
1794 resp->eth_min_inline++;
1795 }
1796
1797 if (dev->mdev->clock_info)
1798 resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1799
1800 /*
1801 * We don't want to expose information from the PCI bar that is located
1802 * after 4096 bytes, so if the arch only supports larger pages, let's
1803 * pretend we don't support reading the HCA's core clock. This is also
1804 * forced by mmap function.
1805 */
1806 if (PAGE_SIZE <= 4096) {
1807 resp->comp_mask |=
1808 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1809 resp->hca_core_clock_offset =
1810 offsetof(struct mlx5_init_seg,
1811 internal_timer_h) % PAGE_SIZE;
1812 }
1813
1814 if (MLX5_CAP_GEN(dev->mdev, ece_support))
1815 resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
1816
1817 if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
1818 rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
1819 rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
1820 resp->comp_mask |=
1821 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS;
1822
1823 resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
1824
1825 if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
1826 resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
1827
1828 resp->comp_mask |=
1829 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
1830
1831 return 0;
1832}
1833
1834static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1835 struct ib_udata *udata)
1836{
1837 struct ib_device *ibdev = uctx->device;
1838 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1839 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1840 struct mlx5_ib_alloc_ucontext_resp resp = {};
1841 struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1842 struct mlx5_bfreg_info *bfregi;
1843 int ver;
1844 int err;
1845 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1846 max_cqe_version);
1847 bool lib_uar_4k;
1848 bool lib_uar_dyn;
1849
1850 if (!dev->ib_active)
1851 return -EAGAIN;
1852
1853 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1854 ver = 0;
1855 else if (udata->inlen >= min_req_v2)
1856 ver = 2;
1857 else
1858 return -EINVAL;
1859
1860 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1861 if (err)
1862 return err;
1863
1864 if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1865 return -EOPNOTSUPP;
1866
1867 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1868 return -EOPNOTSUPP;
1869
1870 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1871 MLX5_NON_FP_BFREGS_PER_UAR);
1872 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1873 return -EINVAL;
1874
1875 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1876 err = mlx5_ib_devx_create(dev, true);
1877 if (err < 0)
1878 goto out_ctx;
1879 context->devx_uid = err;
1880 }
1881
1882 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1883 lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
1884 bfregi = &context->bfregi;
1885
1886 if (lib_uar_dyn) {
1887 bfregi->lib_uar_dyn = lib_uar_dyn;
1888 goto uar_done;
1889 }
1890
1891 /* updates req->total_num_bfregs */
1892 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1893 if (err)
1894 goto out_devx;
1895
1896 mutex_init(&bfregi->lock);
1897 bfregi->lib_uar_4k = lib_uar_4k;
1898 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1899 GFP_KERNEL);
1900 if (!bfregi->count) {
1901 err = -ENOMEM;
1902 goto out_devx;
1903 }
1904
1905 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1906 sizeof(*bfregi->sys_pages),
1907 GFP_KERNEL);
1908 if (!bfregi->sys_pages) {
1909 err = -ENOMEM;
1910 goto out_count;
1911 }
1912
1913 err = allocate_uars(dev, context);
1914 if (err)
1915 goto out_sys_pages;
1916
1917uar_done:
1918 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1919 context->devx_uid);
1920 if (err)
1921 goto out_uars;
1922
1923 INIT_LIST_HEAD(&context->db_page_list);
1924 mutex_init(&context->db_page_mutex);
1925
1926 context->cqe_version = min_t(__u8,
1927 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1928 req.max_cqe_version);
1929
1930 err = set_ucontext_resp(uctx, &resp);
1931 if (err)
1932 goto out_mdev;
1933
1934 resp.response_length = min(udata->outlen, sizeof(resp));
1935 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1936 if (err)
1937 goto out_mdev;
1938
1939 bfregi->ver = ver;
1940 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1941 context->lib_caps = req.lib_caps;
1942 print_lib_caps(dev, context->lib_caps);
1943
1944 if (mlx5_ib_lag_should_assign_affinity(dev)) {
1945 u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
1946
1947 atomic_set(&context->tx_port_affinity,
1948 atomic_add_return(
1949 1, &dev->port[port].roce.tx_port_affinity));
1950 }
1951
1952 return 0;
1953
1954out_mdev:
1955 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1956
1957out_uars:
1958 deallocate_uars(dev, context);
1959
1960out_sys_pages:
1961 kfree(bfregi->sys_pages);
1962
1963out_count:
1964 kfree(bfregi->count);
1965
1966out_devx:
1967 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1968 mlx5_ib_devx_destroy(dev, context->devx_uid);
1969
1970out_ctx:
1971 return err;
1972}
1973
1974static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext,
1975 struct uverbs_attr_bundle *attrs)
1976{
1977 struct mlx5_ib_alloc_ucontext_resp uctx_resp = {};
1978 int ret;
1979
1980 ret = set_ucontext_resp(ibcontext, &uctx_resp);
1981 if (ret)
1982 return ret;
1983
1984 uctx_resp.response_length =
1985 min_t(size_t,
1986 uverbs_attr_get_len(attrs,
1987 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX),
1988 sizeof(uctx_resp));
1989
1990 ret = uverbs_copy_to_struct_or_zero(attrs,
1991 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
1992 &uctx_resp,
1993 sizeof(uctx_resp));
1994 return ret;
1995}
1996
1997static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1998{
1999 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2000 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2001 struct mlx5_bfreg_info *bfregi;
2002
2003 bfregi = &context->bfregi;
2004 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
2005
2006 deallocate_uars(dev, context);
2007 kfree(bfregi->sys_pages);
2008 kfree(bfregi->count);
2009
2010 if (context->devx_uid)
2011 mlx5_ib_devx_destroy(dev, context->devx_uid);
2012}
2013
2014static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2015 int uar_idx)
2016{
2017 int fw_uars_per_page;
2018
2019 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2020
2021 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2022}
2023
2024static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
2025 int uar_idx)
2026{
2027 unsigned int fw_uars_per_page;
2028
2029 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2030 MLX5_UARS_IN_PAGE : 1;
2031
2032 return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
2033}
2034
2035static int get_command(unsigned long offset)
2036{
2037 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2038}
2039
2040static int get_arg(unsigned long offset)
2041{
2042 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2043}
2044
2045static int get_index(unsigned long offset)
2046{
2047 return get_arg(offset);
2048}
2049
2050/* Index resides in an extra byte to enable larger values than 255 */
2051static int get_extended_index(unsigned long offset)
2052{
2053 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2054}
2055
2056
2057static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2058{
2059}
2060
2061static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2062{
2063 switch (cmd) {
2064 case MLX5_IB_MMAP_WC_PAGE:
2065 return "WC";
2066 case MLX5_IB_MMAP_REGULAR_PAGE:
2067 return "best effort WC";
2068 case MLX5_IB_MMAP_NC_PAGE:
2069 return "NC";
2070 case MLX5_IB_MMAP_DEVICE_MEM:
2071 return "Device Memory";
2072 default:
2073 return NULL;
2074 }
2075}
2076
2077static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2078 struct vm_area_struct *vma,
2079 struct mlx5_ib_ucontext *context)
2080{
2081 if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
2082 !(vma->vm_flags & VM_SHARED))
2083 return -EINVAL;
2084
2085 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2086 return -EOPNOTSUPP;
2087
2088 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
2089 return -EPERM;
2090 vma->vm_flags &= ~VM_MAYWRITE;
2091
2092 if (!dev->mdev->clock_info)
2093 return -EOPNOTSUPP;
2094
2095 return vm_insert_page(vma, vma->vm_start,
2096 virt_to_page(dev->mdev->clock_info));
2097}
2098
2099static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
2100{
2101 struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
2102 struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
2103 struct mlx5_var_table *var_table = &dev->var_table;
2104 struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);
2105
2106 switch (mentry->mmap_flag) {
2107 case MLX5_IB_MMAP_TYPE_MEMIC:
2108 case MLX5_IB_MMAP_TYPE_MEMIC_OP:
2109 mlx5_ib_dm_mmap_free(dev, mentry);
2110 break;
2111 case MLX5_IB_MMAP_TYPE_VAR:
2112 mutex_lock(&var_table->bitmap_lock);
2113 clear_bit(mentry->page_idx, var_table->bitmap);
2114 mutex_unlock(&var_table->bitmap_lock);
2115 kfree(mentry);
2116 break;
2117 case MLX5_IB_MMAP_TYPE_UAR_WC:
2118 case MLX5_IB_MMAP_TYPE_UAR_NC:
2119 mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
2120 context->devx_uid);
2121 kfree(mentry);
2122 break;
2123 default:
2124 WARN_ON(true);
2125 }
2126}
2127
2128static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2129 struct vm_area_struct *vma,
2130 struct mlx5_ib_ucontext *context)
2131{
2132 struct mlx5_bfreg_info *bfregi = &context->bfregi;
2133 int err;
2134 unsigned long idx;
2135 phys_addr_t pfn;
2136 pgprot_t prot;
2137 u32 bfreg_dyn_idx = 0;
2138 u32 uar_index;
2139 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2140 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2141 bfregi->num_static_sys_pages;
2142
2143 if (bfregi->lib_uar_dyn)
2144 return -EINVAL;
2145
2146 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2147 return -EINVAL;
2148
2149 if (dyn_uar)
2150 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2151 else
2152 idx = get_index(vma->vm_pgoff);
2153
2154 if (idx >= max_valid_idx) {
2155 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2156 idx, max_valid_idx);
2157 return -EINVAL;
2158 }
2159
2160 switch (cmd) {
2161 case MLX5_IB_MMAP_WC_PAGE:
2162 case MLX5_IB_MMAP_ALLOC_WC:
2163 case MLX5_IB_MMAP_REGULAR_PAGE:
2164 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2165 prot = pgprot_writecombine(vma->vm_page_prot);
2166 break;
2167 case MLX5_IB_MMAP_NC_PAGE:
2168 prot = pgprot_noncached(vma->vm_page_prot);
2169 break;
2170 default:
2171 return -EINVAL;
2172 }
2173
2174 if (dyn_uar) {
2175 int uars_per_page;
2176
2177 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2178 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2179 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2180 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2181 bfreg_dyn_idx, bfregi->total_num_bfregs);
2182 return -EINVAL;
2183 }
2184
2185 mutex_lock(&bfregi->lock);
2186 /* Fail if uar already allocated, first bfreg index of each
2187 * page holds its count.
2188 */
2189 if (bfregi->count[bfreg_dyn_idx]) {
2190 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2191 mutex_unlock(&bfregi->lock);
2192 return -EINVAL;
2193 }
2194
2195 bfregi->count[bfreg_dyn_idx]++;
2196 mutex_unlock(&bfregi->lock);
2197
2198 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
2199 context->devx_uid);
2200 if (err) {
2201 mlx5_ib_warn(dev, "UAR alloc failed\n");
2202 goto free_bfreg;
2203 }
2204 } else {
2205 uar_index = bfregi->sys_pages[idx];
2206 }
2207
2208 pfn = uar_index2pfn(dev, uar_index);
2209 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2210
2211 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2212 prot, NULL);
2213 if (err) {
2214 mlx5_ib_err(dev,
2215 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2216 err, mmap_cmd2str(cmd));
2217 goto err;
2218 }
2219
2220 if (dyn_uar)
2221 bfregi->sys_pages[idx] = uar_index;
2222 return 0;
2223
2224err:
2225 if (!dyn_uar)
2226 return err;
2227
2228 mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
2229
2230free_bfreg:
2231 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2232
2233 return err;
2234}
2235
2236static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
2237{
2238 unsigned long idx;
2239 u8 command;
2240
2241 command = get_command(vma->vm_pgoff);
2242 idx = get_extended_index(vma->vm_pgoff);
2243
2244 return (command << 16 | idx);
2245}
2246
2247static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
2248 struct vm_area_struct *vma,
2249 struct ib_ucontext *ucontext)
2250{
2251 struct mlx5_user_mmap_entry *mentry;
2252 struct rdma_user_mmap_entry *entry;
2253 unsigned long pgoff;
2254 pgprot_t prot;
2255 phys_addr_t pfn;
2256 int ret;
2257
2258 pgoff = mlx5_vma_to_pgoff(vma);
2259 entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
2260 if (!entry)
2261 return -EINVAL;
2262
2263 mentry = to_mmmap(entry);
2264 pfn = (mentry->address >> PAGE_SHIFT);
2265 if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
2266 mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
2267 prot = pgprot_noncached(vma->vm_page_prot);
2268 else
2269 prot = pgprot_writecombine(vma->vm_page_prot);
2270 ret = rdma_user_mmap_io(ucontext, vma, pfn,
2271 entry->npages * PAGE_SIZE,
2272 prot,
2273 entry);
2274 rdma_user_mmap_entry_put(&mentry->rdma_entry);
2275 return ret;
2276}
2277
2278static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
2279{
2280 u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
2281 u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
2282
2283 return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
2284 (index & 0xFF)) << PAGE_SHIFT;
2285}
2286
2287static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2288{
2289 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2290 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2291 unsigned long command;
2292 phys_addr_t pfn;
2293
2294 command = get_command(vma->vm_pgoff);
2295 switch (command) {
2296 case MLX5_IB_MMAP_WC_PAGE:
2297 case MLX5_IB_MMAP_ALLOC_WC:
2298 if (!dev->wc_support)
2299 return -EPERM;
2300 fallthrough;
2301 case MLX5_IB_MMAP_NC_PAGE:
2302 case MLX5_IB_MMAP_REGULAR_PAGE:
2303 return uar_mmap(dev, command, vma, context);
2304
2305 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2306 return -ENOSYS;
2307
2308 case MLX5_IB_MMAP_CORE_CLOCK:
2309 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2310 return -EINVAL;
2311
2312 if (vma->vm_flags & VM_WRITE)
2313 return -EPERM;
2314 vma->vm_flags &= ~VM_MAYWRITE;
2315
2316 /* Don't expose to user-space information it shouldn't have */
2317 if (PAGE_SIZE > 4096)
2318 return -EOPNOTSUPP;
2319
2320 pfn = (dev->mdev->iseg_base +
2321 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2322 PAGE_SHIFT;
2323 return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
2324 PAGE_SIZE,
2325 pgprot_noncached(vma->vm_page_prot),
2326 NULL);
2327 case MLX5_IB_MMAP_CLOCK_INFO:
2328 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2329
2330 default:
2331 return mlx5_ib_mmap_offset(dev, vma, ibcontext);
2332 }
2333
2334 return 0;
2335}
2336
2337static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
2338{
2339 struct mlx5_ib_pd *pd = to_mpd(ibpd);
2340 struct ib_device *ibdev = ibpd->device;
2341 struct mlx5_ib_alloc_pd_resp resp;
2342 int err;
2343 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2344 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2345 u16 uid = 0;
2346 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
2347 udata, struct mlx5_ib_ucontext, ibucontext);
2348
2349 uid = context ? context->devx_uid : 0;
2350 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2351 MLX5_SET(alloc_pd_in, in, uid, uid);
2352 err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2353 if (err)
2354 return err;
2355
2356 pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2357 pd->uid = uid;
2358 if (udata) {
2359 resp.pdn = pd->pdn;
2360 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2361 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2362 return -EFAULT;
2363 }
2364 }
2365
2366 return 0;
2367}
2368
2369static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
2370{
2371 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2372 struct mlx5_ib_pd *mpd = to_mpd(pd);
2373
2374 return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2375}
2376
2377static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2378{
2379 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2380 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
2381 int err;
2382 u16 uid;
2383
2384 uid = ibqp->pd ?
2385 to_mpd(ibqp->pd)->uid : 0;
2386
2387 if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
2388 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2389 return -EOPNOTSUPP;
2390 }
2391
2392 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2393 if (err)
2394 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2395 ibqp->qp_num, gid->raw);
2396
2397 return err;
2398}
2399
2400static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2401{
2402 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2403 int err;
2404 u16 uid;
2405
2406 uid = ibqp->pd ?
2407 to_mpd(ibqp->pd)->uid : 0;
2408 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2409 if (err)
2410 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2411 ibqp->qp_num, gid->raw);
2412
2413 return err;
2414}
2415
2416static int init_node_data(struct mlx5_ib_dev *dev)
2417{
2418 int err;
2419
2420 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2421 if (err)
2422 return err;
2423
2424 dev->mdev->rev_id = dev->mdev->pdev->revision;
2425
2426 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2427}
2428
2429static ssize_t fw_pages_show(struct device *device,
2430 struct device_attribute *attr, char *buf)
2431{
2432 struct mlx5_ib_dev *dev =
2433 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2434
2435 return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
2436}
2437static DEVICE_ATTR_RO(fw_pages);
2438
2439static ssize_t reg_pages_show(struct device *device,
2440 struct device_attribute *attr, char *buf)
2441{
2442 struct mlx5_ib_dev *dev =
2443 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2444
2445 return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2446}
2447static DEVICE_ATTR_RO(reg_pages);
2448
2449static ssize_t hca_type_show(struct device *device,
2450 struct device_attribute *attr, char *buf)
2451{
2452 struct mlx5_ib_dev *dev =
2453 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2454
2455 return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
2456}
2457static DEVICE_ATTR_RO(hca_type);
2458
2459static ssize_t hw_rev_show(struct device *device,
2460 struct device_attribute *attr, char *buf)
2461{
2462 struct mlx5_ib_dev *dev =
2463 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2464
2465 return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
2466}
2467static DEVICE_ATTR_RO(hw_rev);
2468
2469static ssize_t board_id_show(struct device *device,
2470 struct device_attribute *attr, char *buf)
2471{
2472 struct mlx5_ib_dev *dev =
2473 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
2474
2475 return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
2476 dev->mdev->board_id);
2477}
2478static DEVICE_ATTR_RO(board_id);
2479
2480static struct attribute *mlx5_class_attributes[] = {
2481 &dev_attr_hw_rev.attr,
2482 &dev_attr_hca_type.attr,
2483 &dev_attr_board_id.attr,
2484 &dev_attr_fw_pages.attr,
2485 &dev_attr_reg_pages.attr,
2486 NULL,
2487};
2488
2489static const struct attribute_group mlx5_attr_group = {
2490 .attrs = mlx5_class_attributes,
2491};
2492
2493static void pkey_change_handler(struct work_struct *work)
2494{
2495 struct mlx5_ib_port_resources *ports =
2496 container_of(work, struct mlx5_ib_port_resources,
2497 pkey_change_work);
2498
2499 if (!ports->gsi)
2500 /*
2501 * We got this event before device was fully configured
2502 * and MAD registration code wasn't called/finished yet.
2503 */
2504 return;
2505
2506 mlx5_ib_gsi_pkey_change(ports->gsi);
2507}
2508
2509static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
2510{
2511 struct mlx5_ib_qp *mqp;
2512 struct mlx5_ib_cq *send_mcq, *recv_mcq;
2513 struct mlx5_core_cq *mcq;
2514 struct list_head cq_armed_list;
2515 unsigned long flags_qp;
2516 unsigned long flags_cq;
2517 unsigned long flags;
2518
2519 INIT_LIST_HEAD(&cq_armed_list);
2520
2521 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2522 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2523 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2524 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2525 if (mqp->sq.tail != mqp->sq.head) {
2526 send_mcq = to_mcq(mqp->ibqp.send_cq);
2527 spin_lock_irqsave(&send_mcq->lock, flags_cq);
2528 if (send_mcq->mcq.comp &&
2529 mqp->ibqp.send_cq->comp_handler) {
2530 if (!send_mcq->mcq.reset_notify_added) {
2531 send_mcq->mcq.reset_notify_added = 1;
2532 list_add_tail(&send_mcq->mcq.reset_notify,
2533 &cq_armed_list);
2534 }
2535 }
2536 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2537 }
2538 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2539 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2540 /* no handling is needed for SRQ */
2541 if (!mqp->ibqp.srq) {
2542 if (mqp->rq.tail != mqp->rq.head) {
2543 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2544 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2545 if (recv_mcq->mcq.comp &&
2546 mqp->ibqp.recv_cq->comp_handler) {
2547 if (!recv_mcq->mcq.reset_notify_added) {
2548 recv_mcq->mcq.reset_notify_added = 1;
2549 list_add_tail(&recv_mcq->mcq.reset_notify,
2550 &cq_armed_list);
2551 }
2552 }
2553 spin_unlock_irqrestore(&recv_mcq->lock,
2554 flags_cq);
2555 }
2556 }
2557 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2558 }
2559 /*At that point all inflight post send were put to be executed as of we
2560 * lock/unlock above locks Now need to arm all involved CQs.
2561 */
2562 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2563 mcq->comp(mcq, NULL);
2564 }
2565 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2566}
2567
2568static void delay_drop_handler(struct work_struct *work)
2569{
2570 int err;
2571 struct mlx5_ib_delay_drop *delay_drop =
2572 container_of(work, struct mlx5_ib_delay_drop,
2573 delay_drop_work);
2574
2575 atomic_inc(&delay_drop->events_cnt);
2576
2577 mutex_lock(&delay_drop->lock);
2578 err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
2579 if (err) {
2580 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
2581 delay_drop->timeout);
2582 delay_drop->activate = false;
2583 }
2584 mutex_unlock(&delay_drop->lock);
2585}
2586
2587static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2588 struct ib_event *ibev)
2589{
2590 u32 port = (eqe->data.port.port >> 4) & 0xf;
2591
2592 switch (eqe->sub_type) {
2593 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
2594 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2595 IB_LINK_LAYER_ETHERNET)
2596 schedule_work(&ibdev->delay_drop.delay_drop_work);
2597 break;
2598 default: /* do nothing */
2599 return;
2600 }
2601}
2602
2603static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
2604 struct ib_event *ibev)
2605{
2606 u32 port = (eqe->data.port.port >> 4) & 0xf;
2607
2608 ibev->element.port_num = port;
2609
2610 switch (eqe->sub_type) {
2611 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
2612 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
2613 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
2614 /* In RoCE, port up/down events are handled in
2615 * mlx5_netdev_event().
2616 */
2617 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2618 IB_LINK_LAYER_ETHERNET)
2619 return -EINVAL;
2620
2621 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
2622 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2623 break;
2624
2625 case MLX5_PORT_CHANGE_SUBTYPE_LID:
2626 ibev->event = IB_EVENT_LID_CHANGE;
2627 break;
2628
2629 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
2630 ibev->event = IB_EVENT_PKEY_CHANGE;
2631 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
2632 break;
2633
2634 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
2635 ibev->event = IB_EVENT_GID_CHANGE;
2636 break;
2637
2638 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
2639 ibev->event = IB_EVENT_CLIENT_REREGISTER;
2640 break;
2641 default:
2642 return -EINVAL;
2643 }
2644
2645 return 0;
2646}
2647
2648static void mlx5_ib_handle_event(struct work_struct *_work)
2649{
2650 struct mlx5_ib_event_work *work =
2651 container_of(_work, struct mlx5_ib_event_work, work);
2652 struct mlx5_ib_dev *ibdev;
2653 struct ib_event ibev;
2654 bool fatal = false;
2655
2656 if (work->is_slave) {
2657 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
2658 if (!ibdev)
2659 goto out;
2660 } else {
2661 ibdev = work->dev;
2662 }
2663
2664 switch (work->event) {
2665 case MLX5_DEV_EVENT_SYS_ERROR:
2666 ibev.event = IB_EVENT_DEVICE_FATAL;
2667 mlx5_ib_handle_internal_error(ibdev);
2668 ibev.element.port_num = (u8)(unsigned long)work->param;
2669 fatal = true;
2670 break;
2671 case MLX5_EVENT_TYPE_PORT_CHANGE:
2672 if (handle_port_change(ibdev, work->param, &ibev))
2673 goto out;
2674 break;
2675 case MLX5_EVENT_TYPE_GENERAL_EVENT:
2676 handle_general_event(ibdev, work->param, &ibev);
2677 fallthrough;
2678 default:
2679 goto out;
2680 }
2681
2682 ibev.device = &ibdev->ib_dev;
2683
2684 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
2685 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
2686 goto out;
2687 }
2688
2689 if (ibdev->ib_active)
2690 ib_dispatch_event(&ibev);
2691
2692 if (fatal)
2693 ibdev->ib_active = false;
2694out:
2695 kfree(work);
2696}
2697
2698static int mlx5_ib_event(struct notifier_block *nb,
2699 unsigned long event, void *param)
2700{
2701 struct mlx5_ib_event_work *work;
2702
2703 work = kmalloc(sizeof(*work), GFP_ATOMIC);
2704 if (!work)
2705 return NOTIFY_DONE;
2706
2707 INIT_WORK(&work->work, mlx5_ib_handle_event);
2708 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
2709 work->is_slave = false;
2710 work->param = param;
2711 work->event = event;
2712
2713 queue_work(mlx5_ib_event_wq, &work->work);
2714
2715 return NOTIFY_OK;
2716}
2717
2718static int mlx5_ib_event_slave_port(struct notifier_block *nb,
2719 unsigned long event, void *param)
2720{
2721 struct mlx5_ib_event_work *work;
2722
2723 work = kmalloc(sizeof(*work), GFP_ATOMIC);
2724 if (!work)
2725 return NOTIFY_DONE;
2726
2727 INIT_WORK(&work->work, mlx5_ib_handle_event);
2728 work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
2729 work->is_slave = true;
2730 work->param = param;
2731 work->event = event;
2732 queue_work(mlx5_ib_event_wq, &work->work);
2733
2734 return NOTIFY_OK;
2735}
2736
2737static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2738{
2739 struct mlx5_hca_vport_context vport_ctx;
2740 int err;
2741 int port;
2742
2743 if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
2744 return 0;
2745
2746 for (port = 1; port <= dev->num_ports; port++) {
2747 if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2748 dev->port_caps[port - 1].has_smi = true;
2749 continue;
2750 }
2751 err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
2752 &vport_ctx);
2753 if (err) {
2754 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
2755 port, err);
2756 return err;
2757 }
2758 dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
2759 }
2760
2761 return 0;
2762}
2763
2764static void get_ext_port_caps(struct mlx5_ib_dev *dev)
2765{
2766 unsigned int port;
2767
2768 rdma_for_each_port (&dev->ib_dev, port)
2769 mlx5_query_ext_port_caps(dev, port);
2770}
2771
2772static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
2773{
2774 switch (umr_fence_cap) {
2775 case MLX5_CAP_UMR_FENCE_NONE:
2776 return MLX5_FENCE_MODE_NONE;
2777 case MLX5_CAP_UMR_FENCE_SMALL:
2778 return MLX5_FENCE_MODE_INITIATOR_SMALL;
2779 default:
2780 return MLX5_FENCE_MODE_STRONG_ORDERING;
2781 }
2782}
2783
2784static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
2785{
2786 struct mlx5_ib_resources *devr = &dev->devr;
2787 struct ib_srq_init_attr attr;
2788 struct ib_device *ibdev;
2789 struct ib_cq_init_attr cq_attr = {.cqe = 1};
2790 int port;
2791 int ret = 0;
2792
2793 ibdev = &dev->ib_dev;
2794
2795 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2796 return -EOPNOTSUPP;
2797
2798 devr->p0 = ib_alloc_pd(ibdev, 0);
2799 if (IS_ERR(devr->p0))
2800 return PTR_ERR(devr->p0);
2801
2802 devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
2803 if (IS_ERR(devr->c0)) {
2804 ret = PTR_ERR(devr->c0);
2805 goto error1;
2806 }
2807
2808 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
2809 if (ret)
2810 goto error2;
2811
2812 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
2813 if (ret)
2814 goto error3;
2815
2816 memset(&attr, 0, sizeof(attr));
2817 attr.attr.max_sge = 1;
2818 attr.attr.max_wr = 1;
2819 attr.srq_type = IB_SRQT_XRC;
2820 attr.ext.cq = devr->c0;
2821
2822 devr->s0 = ib_create_srq(devr->p0, &attr);
2823 if (IS_ERR(devr->s0)) {
2824 ret = PTR_ERR(devr->s0);
2825 goto err_create;
2826 }
2827
2828 memset(&attr, 0, sizeof(attr));
2829 attr.attr.max_sge = 1;
2830 attr.attr.max_wr = 1;
2831 attr.srq_type = IB_SRQT_BASIC;
2832
2833 devr->s1 = ib_create_srq(devr->p0, &attr);
2834 if (IS_ERR(devr->s1)) {
2835 ret = PTR_ERR(devr->s1);
2836 goto error6;
2837 }
2838
2839 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2840 INIT_WORK(&devr->ports[port].pkey_change_work,
2841 pkey_change_handler);
2842
2843 return 0;
2844
2845error6:
2846 ib_destroy_srq(devr->s0);
2847err_create:
2848 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2849error3:
2850 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2851error2:
2852 ib_destroy_cq(devr->c0);
2853error1:
2854 ib_dealloc_pd(devr->p0);
2855 return ret;
2856}
2857
2858static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
2859{
2860 struct mlx5_ib_resources *devr = &dev->devr;
2861 int port;
2862
2863 /*
2864 * Make sure no change P_Key work items are still executing.
2865 *
2866 * At this stage, the mlx5_ib_event should be unregistered
2867 * and it ensures that no new works are added.
2868 */
2869 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
2870 cancel_work_sync(&devr->ports[port].pkey_change_work);
2871
2872 ib_destroy_srq(devr->s1);
2873 ib_destroy_srq(devr->s0);
2874 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2875 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2876 ib_destroy_cq(devr->c0);
2877 ib_dealloc_pd(devr->p0);
2878}
2879
2880static u32 get_core_cap_flags(struct ib_device *ibdev,
2881 struct mlx5_hca_vport_context *rep)
2882{
2883 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2884 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2885 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2886 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2887 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
2888 u32 ret = 0;
2889
2890 if (rep->grh_required)
2891 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
2892
2893 if (ll == IB_LINK_LAYER_INFINIBAND)
2894 return ret | RDMA_CORE_PORT_IBA_IB;
2895
2896 if (raw_support)
2897 ret |= RDMA_CORE_PORT_RAW_PACKET;
2898
2899 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
2900 return ret;
2901
2902 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
2903 return ret;
2904
2905 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
2906 ret |= RDMA_CORE_PORT_IBA_ROCE;
2907
2908 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
2909 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2910
2911 return ret;
2912}
2913
2914static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
2915 struct ib_port_immutable *immutable)
2916{
2917 struct ib_port_attr attr;
2918 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2919 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
2920 struct mlx5_hca_vport_context rep = {0};
2921 int err;
2922
2923 err = ib_query_port(ibdev, port_num, &attr);
2924 if (err)
2925 return err;
2926
2927 if (ll == IB_LINK_LAYER_INFINIBAND) {
2928 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
2929 &rep);
2930 if (err)
2931 return err;
2932 }
2933
2934 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2935 immutable->gid_tbl_len = attr.gid_tbl_len;
2936 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
2937 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2938
2939 return 0;
2940}
2941
2942static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
2943 struct ib_port_immutable *immutable)
2944{
2945 struct ib_port_attr attr;
2946 int err;
2947
2948 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
2949
2950 err = ib_query_port(ibdev, port_num, &attr);
2951 if (err)
2952 return err;
2953
2954 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2955 immutable->gid_tbl_len = attr.gid_tbl_len;
2956 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
2957
2958 return 0;
2959}
2960
2961static void get_dev_fw_str(struct ib_device *ibdev, char *str)
2962{
2963 struct mlx5_ib_dev *dev =
2964 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
2965 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
2966 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
2967 fw_rev_sub(dev->mdev));
2968}
2969
2970static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
2971{
2972 struct mlx5_core_dev *mdev = dev->mdev;
2973 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
2974 MLX5_FLOW_NAMESPACE_LAG);
2975 struct mlx5_flow_table *ft;
2976 int err;
2977
2978 if (!ns || !mlx5_lag_is_active(mdev))
2979 return 0;
2980
2981 err = mlx5_cmd_create_vport_lag(mdev);
2982 if (err)
2983 return err;
2984
2985 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
2986 if (IS_ERR(ft)) {
2987 err = PTR_ERR(ft);
2988 goto err_destroy_vport_lag;
2989 }
2990
2991 dev->flow_db->lag_demux_ft = ft;
2992 dev->lag_ports = mlx5_lag_get_num_ports(mdev);
2993 dev->lag_active = true;
2994 return 0;
2995
2996err_destroy_vport_lag:
2997 mlx5_cmd_destroy_vport_lag(mdev);
2998 return err;
2999}
3000
3001static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
3002{
3003 struct mlx5_core_dev *mdev = dev->mdev;
3004
3005 if (dev->lag_active) {
3006 dev->lag_active = false;
3007
3008 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
3009 dev->flow_db->lag_demux_ft = NULL;
3010
3011 mlx5_cmd_destroy_vport_lag(mdev);
3012 }
3013}
3014
3015static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
3016{
3017 int err;
3018
3019 dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
3020 err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
3021 if (err) {
3022 dev->port[port_num].roce.nb.notifier_call = NULL;
3023 return err;
3024 }
3025
3026 return 0;
3027}
3028
3029static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
3030{
3031 if (dev->port[port_num].roce.nb.notifier_call) {
3032 unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
3033 dev->port[port_num].roce.nb.notifier_call = NULL;
3034 }
3035}
3036
3037static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
3038{
3039 int err;
3040
3041 if (!dev->is_rep && dev->profile != &raw_eth_profile) {
3042 err = mlx5_nic_vport_enable_roce(dev->mdev);
3043 if (err)
3044 return err;
3045 }
3046
3047 err = mlx5_eth_lag_init(dev);
3048 if (err)
3049 goto err_disable_roce;
3050
3051 return 0;
3052
3053err_disable_roce:
3054 if (!dev->is_rep && dev->profile != &raw_eth_profile)
3055 mlx5_nic_vport_disable_roce(dev->mdev);
3056
3057 return err;
3058}
3059
3060static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3061{
3062 mlx5_eth_lag_cleanup(dev);
3063 if (!dev->is_rep && dev->profile != &raw_eth_profile)
3064 mlx5_nic_vport_disable_roce(dev->mdev);
3065}
3066
3067static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
3068 enum rdma_netdev_t type,
3069 struct rdma_netdev_alloc_params *params)
3070{
3071 if (type != RDMA_NETDEV_IPOIB)
3072 return -EOPNOTSUPP;
3073
3074 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3075}
3076
3077static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
3078 size_t count, loff_t *pos)
3079{
3080 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3081 char lbuf[20];
3082 int len;
3083
3084 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
3085 return simple_read_from_buffer(buf, count, pos, lbuf, len);
3086}
3087
3088static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
3089 size_t count, loff_t *pos)
3090{
3091 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3092 u32 timeout;
3093 u32 var;
3094
3095 if (kstrtouint_from_user(buf, count, 0, &var))
3096 return -EFAULT;
3097
3098 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
3099 1000);
3100 if (timeout != var)
3101 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3102 timeout);
3103
3104 delay_drop->timeout = timeout;
3105
3106 return count;
3107}
3108
3109static const struct file_operations fops_delay_drop_timeout = {
3110 .owner = THIS_MODULE,
3111 .open = simple_open,
3112 .write = delay_drop_timeout_write,
3113 .read = delay_drop_timeout_read,
3114};
3115
3116static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
3117 struct mlx5_ib_multiport_info *mpi)
3118{
3119 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3120 struct mlx5_ib_port *port = &ibdev->port[port_num];
3121 int comps;
3122 int err;
3123 int i;
3124
3125 lockdep_assert_held(&mlx5_ib_multiport_mutex);
3126
3127 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
3128
3129 spin_lock(&port->mp.mpi_lock);
3130 if (!mpi->ibdev) {
3131 spin_unlock(&port->mp.mpi_lock);
3132 return;
3133 }
3134
3135 mpi->ibdev = NULL;
3136
3137 spin_unlock(&port->mp.mpi_lock);
3138 if (mpi->mdev_events.notifier_call)
3139 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
3140 mpi->mdev_events.notifier_call = NULL;
3141 mlx5_remove_netdev_notifier(ibdev, port_num);
3142 spin_lock(&port->mp.mpi_lock);
3143
3144 comps = mpi->mdev_refcnt;
3145 if (comps) {
3146 mpi->unaffiliate = true;
3147 init_completion(&mpi->unref_comp);
3148 spin_unlock(&port->mp.mpi_lock);
3149
3150 for (i = 0; i < comps; i++)
3151 wait_for_completion(&mpi->unref_comp);
3152
3153 spin_lock(&port->mp.mpi_lock);
3154 mpi->unaffiliate = false;
3155 }
3156
3157 port->mp.mpi = NULL;
3158
3159 spin_unlock(&port->mp.mpi_lock);
3160
3161 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3162
3163 mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
3164 /* Log an error, still needed to cleanup the pointers and add
3165 * it back to the list.
3166 */
3167 if (err)
3168 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
3169 port_num + 1);
3170
3171 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3172}
3173
3174static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
3175 struct mlx5_ib_multiport_info *mpi)
3176{
3177 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3178 int err;
3179
3180 lockdep_assert_held(&mlx5_ib_multiport_mutex);
3181
3182 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3183 if (ibdev->port[port_num].mp.mpi) {
3184 mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
3185 port_num + 1);
3186 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3187 return false;
3188 }
3189
3190 ibdev->port[port_num].mp.mpi = mpi;
3191 mpi->ibdev = ibdev;
3192 mpi->mdev_events.notifier_call = NULL;
3193 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3194
3195 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3196 if (err)
3197 goto unbind;
3198
3199 err = mlx5_add_netdev_notifier(ibdev, port_num);
3200 if (err) {
3201 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
3202 port_num + 1);
3203 goto unbind;
3204 }
3205
3206 mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
3207 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
3208
3209 mlx5_ib_init_cong_debugfs(ibdev, port_num);
3210
3211 return true;
3212
3213unbind:
3214 mlx5_ib_unbind_slave_port(ibdev, mpi);
3215 return false;
3216}
3217
3218static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
3219{
3220 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3221 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3222 port_num + 1);
3223 struct mlx5_ib_multiport_info *mpi;
3224 int err;
3225 u32 i;
3226
3227 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3228 return 0;
3229
3230 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
3231 &dev->sys_image_guid);
3232 if (err)
3233 return err;
3234
3235 err = mlx5_nic_vport_enable_roce(dev->mdev);
3236 if (err)
3237 return err;
3238
3239 mutex_lock(&mlx5_ib_multiport_mutex);
3240 for (i = 0; i < dev->num_ports; i++) {
3241 bool bound = false;
3242
3243 /* build a stub multiport info struct for the native port. */
3244 if (i == port_num) {
3245 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
3246 if (!mpi) {
3247 mutex_unlock(&mlx5_ib_multiport_mutex);
3248 mlx5_nic_vport_disable_roce(dev->mdev);
3249 return -ENOMEM;
3250 }
3251
3252 mpi->is_master = true;
3253 mpi->mdev = dev->mdev;
3254 mpi->sys_image_guid = dev->sys_image_guid;
3255 dev->port[i].mp.mpi = mpi;
3256 mpi->ibdev = dev;
3257 mpi = NULL;
3258 continue;
3259 }
3260
3261 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
3262 list) {
3263 if (dev->sys_image_guid == mpi->sys_image_guid &&
3264 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
3265 bound = mlx5_ib_bind_slave_port(dev, mpi);
3266 }
3267
3268 if (bound) {
3269 dev_dbg(mpi->mdev->device,
3270 "removing port from unaffiliated list.\n");
3271 mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
3272 list_del(&mpi->list);
3273 break;
3274 }
3275 }
3276 if (!bound)
3277 mlx5_ib_dbg(dev, "no free port found for port %d\n",
3278 i + 1);
3279 }
3280
3281 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
3282 mutex_unlock(&mlx5_ib_multiport_mutex);
3283 return err;
3284}
3285
3286static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
3287{
3288 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3289 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
3290 port_num + 1);
3291 u32 i;
3292
3293 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3294 return;
3295
3296 mutex_lock(&mlx5_ib_multiport_mutex);
3297 for (i = 0; i < dev->num_ports; i++) {
3298 if (dev->port[i].mp.mpi) {
3299 /* Destroy the native port stub */
3300 if (i == port_num) {
3301 kfree(dev->port[i].mp.mpi);
3302 dev->port[i].mp.mpi = NULL;
3303 } else {
3304 mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
3305 i + 1);
3306 list_add_tail(&dev->port[i].mp.mpi->list,
3307 &mlx5_ib_unaffiliated_port_list);
3308 mlx5_ib_unbind_slave_port(dev,
3309 dev->port[i].mp.mpi);
3310 }
3311 }
3312 }
3313
3314 mlx5_ib_dbg(dev, "removing from devlist\n");
3315 list_del(&dev->ib_dev_list);
3316 mutex_unlock(&mlx5_ib_multiport_mutex);
3317
3318 mlx5_nic_vport_disable_roce(dev->mdev);
3319}
3320
3321static int mmap_obj_cleanup(struct ib_uobject *uobject,
3322 enum rdma_remove_reason why,
3323 struct uverbs_attr_bundle *attrs)
3324{
3325 struct mlx5_user_mmap_entry *obj = uobject->object;
3326
3327 rdma_user_mmap_entry_remove(&obj->rdma_entry);
3328 return 0;
3329}
3330
3331static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
3332 struct mlx5_user_mmap_entry *entry,
3333 size_t length)
3334{
3335 return rdma_user_mmap_entry_insert_range(
3336 &c->ibucontext, &entry->rdma_entry, length,
3337 (MLX5_IB_MMAP_OFFSET_START << 16),
3338 ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
3339}
3340
3341static struct mlx5_user_mmap_entry *
3342alloc_var_entry(struct mlx5_ib_ucontext *c)
3343{
3344 struct mlx5_user_mmap_entry *entry;
3345 struct mlx5_var_table *var_table;
3346 u32 page_idx;
3347 int err;
3348
3349 var_table = &to_mdev(c->ibucontext.device)->var_table;
3350 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3351 if (!entry)
3352 return ERR_PTR(-ENOMEM);
3353
3354 mutex_lock(&var_table->bitmap_lock);
3355 page_idx = find_first_zero_bit(var_table->bitmap,
3356 var_table->num_var_hw_entries);
3357 if (page_idx >= var_table->num_var_hw_entries) {
3358 err = -ENOSPC;
3359 mutex_unlock(&var_table->bitmap_lock);
3360 goto end;
3361 }
3362
3363 set_bit(page_idx, var_table->bitmap);
3364 mutex_unlock(&var_table->bitmap_lock);
3365
3366 entry->address = var_table->hw_start_addr +
3367 (page_idx * var_table->stride_size);
3368 entry->page_idx = page_idx;
3369 entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
3370
3371 err = mlx5_rdma_user_mmap_entry_insert(c, entry,
3372 var_table->stride_size);
3373 if (err)
3374 goto err_insert;
3375
3376 return entry;
3377
3378err_insert:
3379 mutex_lock(&var_table->bitmap_lock);
3380 clear_bit(page_idx, var_table->bitmap);
3381 mutex_unlock(&var_table->bitmap_lock);
3382end:
3383 kfree(entry);
3384 return ERR_PTR(err);
3385}
3386
3387static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
3388 struct uverbs_attr_bundle *attrs)
3389{
3390 struct ib_uobject *uobj = uverbs_attr_get_uobject(
3391 attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
3392 struct mlx5_ib_ucontext *c;
3393 struct mlx5_user_mmap_entry *entry;
3394 u64 mmap_offset;
3395 u32 length;
3396 int err;
3397
3398 c = to_mucontext(ib_uverbs_get_ucontext(attrs));
3399 if (IS_ERR(c))
3400 return PTR_ERR(c);
3401
3402 entry = alloc_var_entry(c);
3403 if (IS_ERR(entry))
3404 return PTR_ERR(entry);
3405
3406 mmap_offset = mlx5_entry_to_mmap_offset(entry);
3407 length = entry->rdma_entry.npages * PAGE_SIZE;
3408 uobj->object = entry;
3409 uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
3410
3411 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
3412 &mmap_offset, sizeof(mmap_offset));
3413 if (err)
3414 return err;
3415
3416 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
3417 &entry->page_idx, sizeof(entry->page_idx));
3418 if (err)
3419 return err;
3420
3421 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
3422 &length, sizeof(length));
3423 return err;
3424}
3425
3426DECLARE_UVERBS_NAMED_METHOD(
3427 MLX5_IB_METHOD_VAR_OBJ_ALLOC,
3428 UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
3429 MLX5_IB_OBJECT_VAR,
3430 UVERBS_ACCESS_NEW,
3431 UA_MANDATORY),
3432 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
3433 UVERBS_ATTR_TYPE(u32),
3434 UA_MANDATORY),
3435 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
3436 UVERBS_ATTR_TYPE(u32),
3437 UA_MANDATORY),
3438 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
3439 UVERBS_ATTR_TYPE(u64),
3440 UA_MANDATORY));
3441
3442DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3443 MLX5_IB_METHOD_VAR_OBJ_DESTROY,
3444 UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
3445 MLX5_IB_OBJECT_VAR,
3446 UVERBS_ACCESS_DESTROY,
3447 UA_MANDATORY));
3448
3449DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
3450 UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
3451 &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
3452 &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
3453
3454static bool var_is_supported(struct ib_device *device)
3455{
3456 struct mlx5_ib_dev *dev = to_mdev(device);
3457
3458 return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3459 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
3460}
3461
3462static struct mlx5_user_mmap_entry *
3463alloc_uar_entry(struct mlx5_ib_ucontext *c,
3464 enum mlx5_ib_uapi_uar_alloc_type alloc_type)
3465{
3466 struct mlx5_user_mmap_entry *entry;
3467 struct mlx5_ib_dev *dev;
3468 u32 uar_index;
3469 int err;
3470
3471 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3472 if (!entry)
3473 return ERR_PTR(-ENOMEM);
3474
3475 dev = to_mdev(c->ibucontext.device);
3476 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
3477 if (err)
3478 goto end;
3479
3480 entry->page_idx = uar_index;
3481 entry->address = uar_index2paddress(dev, uar_index);
3482 if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
3483 entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
3484 else
3485 entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
3486
3487 err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
3488 if (err)
3489 goto err_insert;
3490
3491 return entry;
3492
3493err_insert:
3494 mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
3495end:
3496 kfree(entry);
3497 return ERR_PTR(err);
3498}
3499
3500static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
3501 struct uverbs_attr_bundle *attrs)
3502{
3503 struct ib_uobject *uobj = uverbs_attr_get_uobject(
3504 attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
3505 enum mlx5_ib_uapi_uar_alloc_type alloc_type;
3506 struct mlx5_ib_ucontext *c;
3507 struct mlx5_user_mmap_entry *entry;
3508 u64 mmap_offset;
3509 u32 length;
3510 int err;
3511
3512 c = to_mucontext(ib_uverbs_get_ucontext(attrs));
3513 if (IS_ERR(c))
3514 return PTR_ERR(c);
3515
3516 err = uverbs_get_const(&alloc_type, attrs,
3517 MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
3518 if (err)
3519 return err;
3520
3521 if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
3522 alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
3523 return -EOPNOTSUPP;
3524
3525 if (!to_mdev(c->ibucontext.device)->wc_support &&
3526 alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
3527 return -EOPNOTSUPP;
3528
3529 entry = alloc_uar_entry(c, alloc_type);
3530 if (IS_ERR(entry))
3531 return PTR_ERR(entry);
3532
3533 mmap_offset = mlx5_entry_to_mmap_offset(entry);
3534 length = entry->rdma_entry.npages * PAGE_SIZE;
3535 uobj->object = entry;
3536 uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
3537
3538 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
3539 &mmap_offset, sizeof(mmap_offset));
3540 if (err)
3541 return err;
3542
3543 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
3544 &entry->page_idx, sizeof(entry->page_idx));
3545 if (err)
3546 return err;
3547
3548 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
3549 &length, sizeof(length));
3550 return err;
3551}
3552
3553DECLARE_UVERBS_NAMED_METHOD(
3554 MLX5_IB_METHOD_UAR_OBJ_ALLOC,
3555 UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
3556 MLX5_IB_OBJECT_UAR,
3557 UVERBS_ACCESS_NEW,
3558 UA_MANDATORY),
3559 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
3560 enum mlx5_ib_uapi_uar_alloc_type,
3561 UA_MANDATORY),
3562 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
3563 UVERBS_ATTR_TYPE(u32),
3564 UA_MANDATORY),
3565 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
3566 UVERBS_ATTR_TYPE(u32),
3567 UA_MANDATORY),
3568 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
3569 UVERBS_ATTR_TYPE(u64),
3570 UA_MANDATORY));
3571
3572DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3573 MLX5_IB_METHOD_UAR_OBJ_DESTROY,
3574 UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
3575 MLX5_IB_OBJECT_UAR,
3576 UVERBS_ACCESS_DESTROY,
3577 UA_MANDATORY));
3578
3579DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
3580 UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
3581 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
3582 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
3583
3584ADD_UVERBS_ATTRIBUTES_SIMPLE(
3585 mlx5_ib_query_context,
3586 UVERBS_OBJECT_DEVICE,
3587 UVERBS_METHOD_QUERY_CONTEXT,
3588 UVERBS_ATTR_PTR_OUT(
3589 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
3590 UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp,
3591 dump_fill_mkey),
3592 UA_MANDATORY));
3593
3594static const struct uapi_definition mlx5_ib_defs[] = {
3595 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
3596 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
3597 UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
3598 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
3599 UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
3600
3601 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
3602 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
3603 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
3604 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
3605 {}
3606};
3607
3608static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
3609{
3610 mlx5_ib_cleanup_multiport_master(dev);
3611 WARN_ON(!xa_empty(&dev->odp_mkeys));
3612 mutex_destroy(&dev->cap_mask_mutex);
3613 WARN_ON(!xa_empty(&dev->sig_mrs));
3614 WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
3615}
3616
3617static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
3618{
3619 struct mlx5_core_dev *mdev = dev->mdev;
3620 int err;
3621 int i;
3622
3623 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
3624 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
3625 dev->ib_dev.phys_port_cnt = dev->num_ports;
3626 dev->ib_dev.dev.parent = mdev->device;
3627 dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
3628
3629 for (i = 0; i < dev->num_ports; i++) {
3630 spin_lock_init(&dev->port[i].mp.mpi_lock);
3631 rwlock_init(&dev->port[i].roce.netdev_lock);
3632 dev->port[i].roce.dev = dev;
3633 dev->port[i].roce.native_port_num = i + 1;
3634 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
3635 }
3636
3637 err = mlx5_ib_init_multiport_master(dev);
3638 if (err)
3639 return err;
3640
3641 err = set_has_smi_cap(dev);
3642 if (err)
3643 goto err_mp;
3644
3645 err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
3646 if (err)
3647 goto err_mp;
3648
3649 if (mlx5_use_mad_ifc(dev))
3650 get_ext_port_caps(dev);
3651
3652 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
3653
3654 mutex_init(&dev->cap_mask_mutex);
3655 INIT_LIST_HEAD(&dev->qp_list);
3656 spin_lock_init(&dev->reset_flow_resource_lock);
3657 xa_init(&dev->odp_mkeys);
3658 xa_init(&dev->sig_mrs);
3659 atomic_set(&dev->mkey_var, 0);
3660
3661 spin_lock_init(&dev->dm.lock);
3662 dev->dm.dev = mdev;
3663 return 0;
3664
3665err_mp:
3666 mlx5_ib_cleanup_multiport_master(dev);
3667 return err;
3668}
3669
3670static int mlx5_ib_enable_driver(struct ib_device *dev)
3671{
3672 struct mlx5_ib_dev *mdev = to_mdev(dev);
3673 int ret;
3674
3675 ret = mlx5_ib_test_wc(mdev);
3676 mlx5_ib_dbg(mdev, "Write-Combining %s",
3677 mdev->wc_support ? "supported" : "not supported");
3678
3679 return ret;
3680}
3681
3682static const struct ib_device_ops mlx5_ib_dev_ops = {
3683 .owner = THIS_MODULE,
3684 .driver_id = RDMA_DRIVER_MLX5,
3685 .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
3686
3687 .add_gid = mlx5_ib_add_gid,
3688 .alloc_mr = mlx5_ib_alloc_mr,
3689 .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
3690 .alloc_pd = mlx5_ib_alloc_pd,
3691 .alloc_ucontext = mlx5_ib_alloc_ucontext,
3692 .attach_mcast = mlx5_ib_mcg_attach,
3693 .check_mr_status = mlx5_ib_check_mr_status,
3694 .create_ah = mlx5_ib_create_ah,
3695 .create_cq = mlx5_ib_create_cq,
3696 .create_qp = mlx5_ib_create_qp,
3697 .create_srq = mlx5_ib_create_srq,
3698 .create_user_ah = mlx5_ib_create_ah,
3699 .dealloc_pd = mlx5_ib_dealloc_pd,
3700 .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
3701 .del_gid = mlx5_ib_del_gid,
3702 .dereg_mr = mlx5_ib_dereg_mr,
3703 .destroy_ah = mlx5_ib_destroy_ah,
3704 .destroy_cq = mlx5_ib_destroy_cq,
3705 .destroy_qp = mlx5_ib_destroy_qp,
3706 .destroy_srq = mlx5_ib_destroy_srq,
3707 .detach_mcast = mlx5_ib_mcg_detach,
3708 .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
3709 .drain_rq = mlx5_ib_drain_rq,
3710 .drain_sq = mlx5_ib_drain_sq,
3711 .device_group = &mlx5_attr_group,
3712 .enable_driver = mlx5_ib_enable_driver,
3713 .get_dev_fw_str = get_dev_fw_str,
3714 .get_dma_mr = mlx5_ib_get_dma_mr,
3715 .get_link_layer = mlx5_ib_port_link_layer,
3716 .map_mr_sg = mlx5_ib_map_mr_sg,
3717 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
3718 .mmap = mlx5_ib_mmap,
3719 .mmap_free = mlx5_ib_mmap_free,
3720 .modify_cq = mlx5_ib_modify_cq,
3721 .modify_device = mlx5_ib_modify_device,
3722 .modify_port = mlx5_ib_modify_port,
3723 .modify_qp = mlx5_ib_modify_qp,
3724 .modify_srq = mlx5_ib_modify_srq,
3725 .poll_cq = mlx5_ib_poll_cq,
3726 .post_recv = mlx5_ib_post_recv_nodrain,
3727 .post_send = mlx5_ib_post_send_nodrain,
3728 .post_srq_recv = mlx5_ib_post_srq_recv,
3729 .process_mad = mlx5_ib_process_mad,
3730 .query_ah = mlx5_ib_query_ah,
3731 .query_device = mlx5_ib_query_device,
3732 .query_gid = mlx5_ib_query_gid,
3733 .query_pkey = mlx5_ib_query_pkey,
3734 .query_qp = mlx5_ib_query_qp,
3735 .query_srq = mlx5_ib_query_srq,
3736 .query_ucontext = mlx5_ib_query_ucontext,
3737 .reg_user_mr = mlx5_ib_reg_user_mr,
3738 .reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
3739 .req_notify_cq = mlx5_ib_arm_cq,
3740 .rereg_user_mr = mlx5_ib_rereg_user_mr,
3741 .resize_cq = mlx5_ib_resize_cq,
3742
3743 INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
3744 INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
3745 INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
3746 INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
3747 INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
3748 INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
3749 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
3750};
3751
3752static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
3753 .rdma_netdev_get_params = mlx5_ib_rn_get_params,
3754};
3755
3756static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
3757 .get_vf_config = mlx5_ib_get_vf_config,
3758 .get_vf_guid = mlx5_ib_get_vf_guid,
3759 .get_vf_stats = mlx5_ib_get_vf_stats,
3760 .set_vf_guid = mlx5_ib_set_vf_guid,
3761 .set_vf_link_state = mlx5_ib_set_vf_link_state,
3762};
3763
3764static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
3765 .alloc_mw = mlx5_ib_alloc_mw,
3766 .dealloc_mw = mlx5_ib_dealloc_mw,
3767
3768 INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
3769};
3770
3771static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
3772 .alloc_xrcd = mlx5_ib_alloc_xrcd,
3773 .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
3774
3775 INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
3776};
3777
3778static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
3779{
3780 struct mlx5_core_dev *mdev = dev->mdev;
3781 struct mlx5_var_table *var_table = &dev->var_table;
3782 u8 log_doorbell_bar_size;
3783 u8 log_doorbell_stride;
3784 u64 bar_size;
3785
3786 log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3787 log_doorbell_bar_size);
3788 log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3789 log_doorbell_stride);
3790 var_table->hw_start_addr = dev->mdev->bar_addr +
3791 MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
3792 doorbell_bar_offset);
3793 bar_size = (1ULL << log_doorbell_bar_size) * 4096;
3794 var_table->stride_size = 1ULL << log_doorbell_stride;
3795 var_table->num_var_hw_entries = div_u64(bar_size,
3796 var_table->stride_size);
3797 mutex_init(&var_table->bitmap_lock);
3798 var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
3799 GFP_KERNEL);
3800 return (var_table->bitmap) ? 0 : -ENOMEM;
3801}
3802
3803static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
3804{
3805 bitmap_free(dev->var_table.bitmap);
3806}
3807
3808static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
3809{
3810 struct mlx5_core_dev *mdev = dev->mdev;
3811 int err;
3812
3813 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
3814 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
3815 ib_set_device_ops(&dev->ib_dev,
3816 &mlx5_ib_dev_ipoib_enhanced_ops);
3817
3818 if (mlx5_core_is_pf(mdev))
3819 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
3820
3821 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
3822
3823 if (MLX5_CAP_GEN(mdev, imaicl))
3824 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
3825
3826 if (MLX5_CAP_GEN(mdev, xrc))
3827 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
3828
3829 if (MLX5_CAP_DEV_MEM(mdev, memic) ||
3830 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3831 MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
3832 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
3833
3834 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
3835
3836 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
3837 dev->ib_dev.driver_def = mlx5_ib_defs;
3838
3839 err = init_node_data(dev);
3840 if (err)
3841 return err;
3842
3843 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3844 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
3845 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
3846 mutex_init(&dev->lb.mutex);
3847
3848 if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3849 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
3850 err = mlx5_ib_init_var_table(dev);
3851 if (err)
3852 return err;
3853 }
3854
3855 dev->ib_dev.use_cq_dim = true;
3856
3857 return 0;
3858}
3859
3860static const struct ib_device_ops mlx5_ib_dev_port_ops = {
3861 .get_port_immutable = mlx5_port_immutable,
3862 .query_port = mlx5_ib_query_port,
3863};
3864
3865static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
3866{
3867 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
3868 return 0;
3869}
3870
3871static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
3872 .get_port_immutable = mlx5_port_rep_immutable,
3873 .query_port = mlx5_ib_rep_query_port,
3874 .query_pkey = mlx5_ib_rep_query_pkey,
3875};
3876
3877static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
3878{
3879 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
3880 return 0;
3881}
3882
3883static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
3884 .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
3885 .create_wq = mlx5_ib_create_wq,
3886 .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
3887 .destroy_wq = mlx5_ib_destroy_wq,
3888 .get_netdev = mlx5_ib_get_netdev,
3889 .modify_wq = mlx5_ib_modify_wq,
3890
3891 INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
3892 ib_rwq_ind_tbl),
3893};
3894
3895static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
3896{
3897 struct mlx5_core_dev *mdev = dev->mdev;
3898 enum rdma_link_layer ll;
3899 int port_type_cap;
3900 u32 port_num = 0;
3901 int err;
3902
3903 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3904 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3905
3906 if (ll == IB_LINK_LAYER_ETHERNET) {
3907 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
3908
3909 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3910
3911 /* Register only for native ports */
3912 err = mlx5_add_netdev_notifier(dev, port_num);
3913 if (err)
3914 return err;
3915
3916 err = mlx5_enable_eth(dev);
3917 if (err)
3918 goto cleanup;
3919 }
3920
3921 return 0;
3922cleanup:
3923 mlx5_remove_netdev_notifier(dev, port_num);
3924 return err;
3925}
3926
3927static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
3928{
3929 struct mlx5_core_dev *mdev = dev->mdev;
3930 enum rdma_link_layer ll;
3931 int port_type_cap;
3932 u32 port_num;
3933
3934 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
3935 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
3936
3937 if (ll == IB_LINK_LAYER_ETHERNET) {
3938 mlx5_disable_eth(dev);
3939
3940 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3941 mlx5_remove_netdev_notifier(dev, port_num);
3942 }
3943}
3944
3945static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
3946{
3947 mlx5_ib_init_cong_debugfs(dev,
3948 mlx5_core_native_port_num(dev->mdev) - 1);
3949 return 0;
3950}
3951
3952static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
3953{
3954 mlx5_ib_cleanup_cong_debugfs(dev,
3955 mlx5_core_native_port_num(dev->mdev) - 1);
3956}
3957
3958static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
3959{
3960 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
3961 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
3962}
3963
3964static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
3965{
3966 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
3967}
3968
3969static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
3970{
3971 int err;
3972
3973 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
3974 if (err)
3975 return err;
3976
3977 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
3978 if (err)
3979 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3980
3981 return err;
3982}
3983
3984static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
3985{
3986 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3987 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3988}
3989
3990static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
3991{
3992 const char *name;
3993
3994 if (!mlx5_lag_is_active(dev->mdev))
3995 name = "mlx5_%d";
3996 else
3997 name = "mlx5_bond_%d";
3998 return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
3999}
4000
4001static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4002{
4003 int err;
4004
4005 err = mlx5_mkey_cache_cleanup(dev);
4006 if (err)
4007 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4008
4009 mlx5r_umr_resource_cleanup(dev);
4010}
4011
4012static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4013{
4014 ib_unregister_device(&dev->ib_dev);
4015}
4016
4017static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4018{
4019 int ret;
4020
4021 ret = mlx5r_umr_resource_init(dev);
4022 if (ret)
4023 return ret;
4024
4025 ret = mlx5_mkey_cache_init(dev);
4026 if (ret) {
4027 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4028 mlx5r_umr_resource_cleanup(dev);
4029 }
4030 return ret;
4031}
4032
4033static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
4034{
4035 struct dentry *root;
4036
4037 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4038 return 0;
4039
4040 mutex_init(&dev->delay_drop.lock);
4041 dev->delay_drop.dev = dev;
4042 dev->delay_drop.activate = false;
4043 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
4044 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
4045 atomic_set(&dev->delay_drop.rqs_cnt, 0);
4046 atomic_set(&dev->delay_drop.events_cnt, 0);
4047
4048 if (!mlx5_debugfs_root)
4049 return 0;
4050
4051 root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
4052 dev->delay_drop.dir_debugfs = root;
4053
4054 debugfs_create_atomic_t("num_timeout_events", 0400, root,
4055 &dev->delay_drop.events_cnt);
4056 debugfs_create_atomic_t("num_rqs", 0400, root,
4057 &dev->delay_drop.rqs_cnt);
4058 debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
4059 &fops_delay_drop_timeout);
4060 return 0;
4061}
4062
4063static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
4064{
4065 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4066 return;
4067
4068 cancel_work_sync(&dev->delay_drop.delay_drop_work);
4069 if (!dev->delay_drop.dir_debugfs)
4070 return;
4071
4072 debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
4073 dev->delay_drop.dir_debugfs = NULL;
4074}
4075
4076static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
4077{
4078 dev->mdev_events.notifier_call = mlx5_ib_event;
4079 mlx5_notifier_register(dev->mdev, &dev->mdev_events);
4080 return 0;
4081}
4082
4083static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
4084{
4085 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4086}
4087
4088void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4089 const struct mlx5_ib_profile *profile,
4090 int stage)
4091{
4092 dev->ib_active = false;
4093
4094 /* Number of stages to cleanup */
4095 while (stage) {
4096 stage--;
4097 if (profile->stage[stage].cleanup)
4098 profile->stage[stage].cleanup(dev);
4099 }
4100
4101 kfree(dev->port);
4102 ib_dealloc_device(&dev->ib_dev);
4103}
4104
4105int __mlx5_ib_add(struct mlx5_ib_dev *dev,
4106 const struct mlx5_ib_profile *profile)
4107{
4108 int err;
4109 int i;
4110
4111 dev->profile = profile;
4112
4113 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
4114 if (profile->stage[i].init) {
4115 err = profile->stage[i].init(dev);
4116 if (err)
4117 goto err_out;
4118 }
4119 }
4120
4121 dev->ib_active = true;
4122 return 0;
4123
4124err_out:
4125 /* Clean up stages which were initialized */
4126 while (i) {
4127 i--;
4128 if (profile->stage[i].cleanup)
4129 profile->stage[i].cleanup(dev);
4130 }
4131 return -ENOMEM;
4132}
4133
4134static const struct mlx5_ib_profile pf_profile = {
4135 STAGE_CREATE(MLX5_IB_STAGE_INIT,
4136 mlx5_ib_stage_init_init,
4137 mlx5_ib_stage_init_cleanup),
4138 STAGE_CREATE(MLX5_IB_STAGE_FS,
4139 mlx5_ib_fs_init,
4140 mlx5_ib_fs_cleanup),
4141 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4142 mlx5_ib_stage_caps_init,
4143 mlx5_ib_stage_caps_cleanup),
4144 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
4145 mlx5_ib_stage_non_default_cb,
4146 NULL),
4147 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
4148 mlx5_ib_roce_init,
4149 mlx5_ib_roce_cleanup),
4150 STAGE_CREATE(MLX5_IB_STAGE_QP,
4151 mlx5_init_qp_table,
4152 mlx5_cleanup_qp_table),
4153 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
4154 mlx5_init_srq_table,
4155 mlx5_cleanup_srq_table),
4156 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4157 mlx5_ib_dev_res_init,
4158 mlx5_ib_dev_res_cleanup),
4159 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
4160 mlx5_ib_stage_dev_notifier_init,
4161 mlx5_ib_stage_dev_notifier_cleanup),
4162 STAGE_CREATE(MLX5_IB_STAGE_ODP,
4163 mlx5_ib_odp_init_one,
4164 mlx5_ib_odp_cleanup_one),
4165 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
4166 mlx5_ib_counters_init,
4167 mlx5_ib_counters_cleanup),
4168 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
4169 mlx5_ib_stage_cong_debugfs_init,
4170 mlx5_ib_stage_cong_debugfs_cleanup),
4171 STAGE_CREATE(MLX5_IB_STAGE_UAR,
4172 mlx5_ib_stage_uar_init,
4173 mlx5_ib_stage_uar_cleanup),
4174 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4175 mlx5_ib_stage_bfrag_init,
4176 mlx5_ib_stage_bfrag_cleanup),
4177 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
4178 NULL,
4179 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
4180 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
4181 mlx5_ib_devx_init,
4182 mlx5_ib_devx_cleanup),
4183 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4184 mlx5_ib_stage_ib_reg_init,
4185 mlx5_ib_stage_ib_reg_cleanup),
4186 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
4187 mlx5_ib_stage_post_ib_reg_umr_init,
4188 NULL),
4189 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4190 mlx5_ib_stage_delay_drop_init,
4191 mlx5_ib_stage_delay_drop_cleanup),
4192 STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
4193 mlx5_ib_restrack_init,
4194 NULL),
4195};
4196
4197const struct mlx5_ib_profile raw_eth_profile = {
4198 STAGE_CREATE(MLX5_IB_STAGE_INIT,
4199 mlx5_ib_stage_init_init,
4200 mlx5_ib_stage_init_cleanup),
4201 STAGE_CREATE(MLX5_IB_STAGE_FS,
4202 mlx5_ib_fs_init,
4203 mlx5_ib_fs_cleanup),
4204 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4205 mlx5_ib_stage_caps_init,
4206 mlx5_ib_stage_caps_cleanup),
4207 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
4208 mlx5_ib_stage_raw_eth_non_default_cb,
4209 NULL),
4210 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
4211 mlx5_ib_roce_init,
4212 mlx5_ib_roce_cleanup),
4213 STAGE_CREATE(MLX5_IB_STAGE_QP,
4214 mlx5_init_qp_table,
4215 mlx5_cleanup_qp_table),
4216 STAGE_CREATE(MLX5_IB_STAGE_SRQ,
4217 mlx5_init_srq_table,
4218 mlx5_cleanup_srq_table),
4219 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4220 mlx5_ib_dev_res_init,
4221 mlx5_ib_dev_res_cleanup),
4222 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
4223 mlx5_ib_stage_dev_notifier_init,
4224 mlx5_ib_stage_dev_notifier_cleanup),
4225 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
4226 mlx5_ib_counters_init,
4227 mlx5_ib_counters_cleanup),
4228 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
4229 mlx5_ib_stage_cong_debugfs_init,
4230 mlx5_ib_stage_cong_debugfs_cleanup),
4231 STAGE_CREATE(MLX5_IB_STAGE_UAR,
4232 mlx5_ib_stage_uar_init,
4233 mlx5_ib_stage_uar_cleanup),
4234 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4235 mlx5_ib_stage_bfrag_init,
4236 mlx5_ib_stage_bfrag_cleanup),
4237 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
4238 NULL,
4239 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
4240 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
4241 mlx5_ib_devx_init,
4242 mlx5_ib_devx_cleanup),
4243 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4244 mlx5_ib_stage_ib_reg_init,
4245 mlx5_ib_stage_ib_reg_cleanup),
4246 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
4247 mlx5_ib_stage_post_ib_reg_umr_init,
4248 NULL),
4249 STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
4250 mlx5_ib_restrack_init,
4251 NULL),
4252};
4253
4254static int mlx5r_mp_probe(struct auxiliary_device *adev,
4255 const struct auxiliary_device_id *id)
4256{
4257 struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
4258 struct mlx5_core_dev *mdev = idev->mdev;
4259 struct mlx5_ib_multiport_info *mpi;
4260 struct mlx5_ib_dev *dev;
4261 bool bound = false;
4262 int err;
4263
4264 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
4265 if (!mpi)
4266 return -ENOMEM;
4267
4268 mpi->mdev = mdev;
4269 err = mlx5_query_nic_vport_system_image_guid(mdev,
4270 &mpi->sys_image_guid);
4271 if (err) {
4272 kfree(mpi);
4273 return err;
4274 }
4275
4276 mutex_lock(&mlx5_ib_multiport_mutex);
4277 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
4278 if (dev->sys_image_guid == mpi->sys_image_guid)
4279 bound = mlx5_ib_bind_slave_port(dev, mpi);
4280
4281 if (bound) {
4282 rdma_roce_rescan_device(&dev->ib_dev);
4283 mpi->ibdev->ib_active = true;
4284 break;
4285 }
4286 }
4287
4288 if (!bound) {
4289 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
4290 dev_dbg(mdev->device,
4291 "no suitable IB device found to bind to, added to unaffiliated list.\n");
4292 }
4293 mutex_unlock(&mlx5_ib_multiport_mutex);
4294
4295 auxiliary_set_drvdata(adev, mpi);
4296 return 0;
4297}
4298
4299static void mlx5r_mp_remove(struct auxiliary_device *adev)
4300{
4301 struct mlx5_ib_multiport_info *mpi;
4302
4303 mpi = auxiliary_get_drvdata(adev);
4304 mutex_lock(&mlx5_ib_multiport_mutex);
4305 if (mpi->ibdev)
4306 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
4307 else
4308 list_del(&mpi->list);
4309 mutex_unlock(&mlx5_ib_multiport_mutex);
4310 kfree(mpi);
4311}
4312
4313static int mlx5r_probe(struct auxiliary_device *adev,
4314 const struct auxiliary_device_id *id)
4315{
4316 struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
4317 struct mlx5_core_dev *mdev = idev->mdev;
4318 const struct mlx5_ib_profile *profile;
4319 int port_type_cap, num_ports, ret;
4320 enum rdma_link_layer ll;
4321 struct mlx5_ib_dev *dev;
4322
4323 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4324 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4325
4326 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
4327 MLX5_CAP_GEN(mdev, num_vhca_ports));
4328 dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
4329 if (!dev)
4330 return -ENOMEM;
4331 dev->port = kcalloc(num_ports, sizeof(*dev->port),
4332 GFP_KERNEL);
4333 if (!dev->port) {
4334 ib_dealloc_device(&dev->ib_dev);
4335 return -ENOMEM;
4336 }
4337
4338 dev->mdev = mdev;
4339 dev->num_ports = num_ports;
4340
4341 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
4342 profile = &raw_eth_profile;
4343 else
4344 profile = &pf_profile;
4345
4346 ret = __mlx5_ib_add(dev, profile);
4347 if (ret) {
4348 kfree(dev->port);
4349 ib_dealloc_device(&dev->ib_dev);
4350 return ret;
4351 }
4352
4353 auxiliary_set_drvdata(adev, dev);
4354 return 0;
4355}
4356
4357static void mlx5r_remove(struct auxiliary_device *adev)
4358{
4359 struct mlx5_ib_dev *dev;
4360
4361 dev = auxiliary_get_drvdata(adev);
4362 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
4363}
4364
4365static const struct auxiliary_device_id mlx5r_mp_id_table[] = {
4366 { .name = MLX5_ADEV_NAME ".multiport", },
4367 {},
4368};
4369
4370static const struct auxiliary_device_id mlx5r_id_table[] = {
4371 { .name = MLX5_ADEV_NAME ".rdma", },
4372 {},
4373};
4374
4375MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table);
4376MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table);
4377
4378static struct auxiliary_driver mlx5r_mp_driver = {
4379 .name = "multiport",
4380 .probe = mlx5r_mp_probe,
4381 .remove = mlx5r_mp_remove,
4382 .id_table = mlx5r_mp_id_table,
4383};
4384
4385static struct auxiliary_driver mlx5r_driver = {
4386 .name = "rdma",
4387 .probe = mlx5r_probe,
4388 .remove = mlx5r_remove,
4389 .id_table = mlx5r_id_table,
4390};
4391
4392static int __init mlx5_ib_init(void)
4393{
4394 int ret;
4395
4396 xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
4397 if (!xlt_emergency_page)
4398 return -ENOMEM;
4399
4400 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
4401 if (!mlx5_ib_event_wq) {
4402 free_page((unsigned long)xlt_emergency_page);
4403 return -ENOMEM;
4404 }
4405
4406 mlx5_ib_odp_init();
4407 ret = mlx5r_rep_init();
4408 if (ret)
4409 goto rep_err;
4410 ret = auxiliary_driver_register(&mlx5r_mp_driver);
4411 if (ret)
4412 goto mp_err;
4413 ret = auxiliary_driver_register(&mlx5r_driver);
4414 if (ret)
4415 goto drv_err;
4416 return 0;
4417
4418drv_err:
4419 auxiliary_driver_unregister(&mlx5r_mp_driver);
4420mp_err:
4421 mlx5r_rep_cleanup();
4422rep_err:
4423 destroy_workqueue(mlx5_ib_event_wq);
4424 free_page((unsigned long)xlt_emergency_page);
4425 return ret;
4426}
4427
4428static void __exit mlx5_ib_cleanup(void)
4429{
4430 auxiliary_driver_unregister(&mlx5r_driver);
4431 auxiliary_driver_unregister(&mlx5r_mp_driver);
4432 mlx5r_rep_cleanup();
4433
4434 destroy_workqueue(mlx5_ib_event_wq);
4435 free_page((unsigned long)xlt_emergency_page);
4436}
4437
4438module_init(mlx5_ib_init);
4439module_exit(mlx5_ib_cleanup);