Loading...
1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/acpi.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <rdma/ib_addr.h>
37#include <rdma/ib_smi.h>
38#include <rdma/ib_user_verbs.h>
39#include <rdma/ib_cache.h>
40#include "hns_roce_common.h"
41#include "hns_roce_device.h"
42#include "hns_roce_hem.h"
43
44static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
45 const u8 *addr)
46{
47 u8 phy_port;
48 u32 i;
49
50 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
51 return 0;
52
53 if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
54 return 0;
55
56 for (i = 0; i < ETH_ALEN; i++)
57 hr_dev->dev_addr[port][i] = addr[i];
58
59 phy_port = hr_dev->iboe.phy_port[port];
60 return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
61}
62
63static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
64{
65 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
66 u32 port = attr->port_num - 1;
67 int ret;
68
69 if (port >= hr_dev->caps.num_ports)
70 return -EINVAL;
71
72 ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
73
74 return ret;
75}
76
77static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
78{
79 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
80 u32 port = attr->port_num - 1;
81 int ret;
82
83 if (port >= hr_dev->caps.num_ports)
84 return -EINVAL;
85
86 ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
87
88 return ret;
89}
90
91static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
92 unsigned long event)
93{
94 struct device *dev = hr_dev->dev;
95 struct net_device *netdev;
96 int ret = 0;
97
98 netdev = hr_dev->iboe.netdevs[port];
99 if (!netdev) {
100 dev_err(dev, "can't find netdev on port(%u)!\n", port);
101 return -ENODEV;
102 }
103
104 switch (event) {
105 case NETDEV_UP:
106 case NETDEV_CHANGE:
107 case NETDEV_REGISTER:
108 case NETDEV_CHANGEADDR:
109 ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
110 break;
111 case NETDEV_DOWN:
112 /*
113 * In v1 engine, only support all ports closed together.
114 */
115 break;
116 default:
117 dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
118 break;
119 }
120
121 return ret;
122}
123
124static int hns_roce_netdev_event(struct notifier_block *self,
125 unsigned long event, void *ptr)
126{
127 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
128 struct hns_roce_ib_iboe *iboe = NULL;
129 struct hns_roce_dev *hr_dev = NULL;
130 int ret;
131 u32 port;
132
133 hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
134 iboe = &hr_dev->iboe;
135
136 for (port = 0; port < hr_dev->caps.num_ports; port++) {
137 if (dev == iboe->netdevs[port]) {
138 ret = handle_en_event(hr_dev, port, event);
139 if (ret)
140 return NOTIFY_DONE;
141 break;
142 }
143 }
144
145 return NOTIFY_DONE;
146}
147
148static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
149{
150 int ret;
151 u8 i;
152
153 for (i = 0; i < hr_dev->caps.num_ports; i++) {
154 ret = hns_roce_set_mac(hr_dev, i,
155 hr_dev->iboe.netdevs[i]->dev_addr);
156 if (ret)
157 return ret;
158 }
159
160 return 0;
161}
162
163static int hns_roce_query_device(struct ib_device *ib_dev,
164 struct ib_device_attr *props,
165 struct ib_udata *uhw)
166{
167 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
168
169 memset(props, 0, sizeof(*props));
170
171 props->fw_ver = hr_dev->caps.fw_ver;
172 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
173 props->max_mr_size = (u64)(~(0ULL));
174 props->page_size_cap = hr_dev->caps.page_size_cap;
175 props->vendor_id = hr_dev->vendor_id;
176 props->vendor_part_id = hr_dev->vendor_part_id;
177 props->hw_ver = hr_dev->hw_rev;
178 props->max_qp = hr_dev->caps.num_qps;
179 props->max_qp_wr = hr_dev->caps.max_wqes;
180 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
181 IB_DEVICE_RC_RNR_NAK_GEN;
182 props->max_send_sge = hr_dev->caps.max_sq_sg;
183 props->max_recv_sge = hr_dev->caps.max_rq_sg;
184 props->max_sge_rd = 1;
185 props->max_cq = hr_dev->caps.num_cqs;
186 props->max_cqe = hr_dev->caps.max_cqes;
187 props->max_mr = hr_dev->caps.num_mtpts;
188 props->max_pd = hr_dev->caps.num_pds;
189 props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
190 props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
191 props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
192 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
193 props->max_pkeys = 1;
194 props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
195 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
196 props->max_srq = hr_dev->caps.num_srqs;
197 props->max_srq_wr = hr_dev->caps.max_srq_wrs;
198 props->max_srq_sge = hr_dev->caps.max_srq_sges;
199 }
200
201 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR &&
202 hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
203 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
204 props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
205 }
206
207 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
208 props->device_cap_flags |= IB_DEVICE_XRC;
209
210 return 0;
211}
212
213static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
214 struct ib_port_attr *props)
215{
216 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
217 struct device *dev = hr_dev->dev;
218 struct net_device *net_dev;
219 unsigned long flags;
220 enum ib_mtu mtu;
221 u32 port;
222 int ret;
223
224 port = port_num - 1;
225
226 /* props being zeroed by the caller, avoid zeroing it here */
227
228 props->max_mtu = hr_dev->caps.max_mtu;
229 props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
230 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
231 IB_PORT_VENDOR_CLASS_SUP |
232 IB_PORT_BOOT_MGMT_SUP;
233 props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
234 props->pkey_tbl_len = 1;
235 ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed,
236 &props->active_width);
237 if (ret)
238 ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
239
240 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
241
242 net_dev = hr_dev->iboe.netdevs[port];
243 if (!net_dev) {
244 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
245 dev_err(dev, "find netdev %u failed!\n", port);
246 return -EINVAL;
247 }
248
249 mtu = iboe_get_mtu(net_dev->mtu);
250 props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
251 props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
252 IB_PORT_ACTIVE :
253 IB_PORT_DOWN;
254 props->phys_state = props->state == IB_PORT_ACTIVE ?
255 IB_PORT_PHYS_STATE_LINK_UP :
256 IB_PORT_PHYS_STATE_DISABLED;
257
258 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
259
260 return 0;
261}
262
263static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
264 u32 port_num)
265{
266 return IB_LINK_LAYER_ETHERNET;
267}
268
269static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
270 u16 *pkey)
271{
272 if (index > 0)
273 return -EINVAL;
274
275 *pkey = PKEY_ID;
276
277 return 0;
278}
279
280static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
281 struct ib_device_modify *props)
282{
283 unsigned long flags;
284
285 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
286 return -EOPNOTSUPP;
287
288 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
289 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
290 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
291 spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
292 }
293
294 return 0;
295}
296
297struct hns_user_mmap_entry *
298hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
299 size_t length,
300 enum hns_roce_mmap_type mmap_type)
301{
302 struct hns_user_mmap_entry *entry;
303 int ret;
304
305 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
306 if (!entry)
307 return NULL;
308
309 entry->address = address;
310 entry->mmap_type = mmap_type;
311
312 switch (mmap_type) {
313 /* pgoff 0 must be used by DB for compatibility */
314 case HNS_ROCE_MMAP_TYPE_DB:
315 ret = rdma_user_mmap_entry_insert_exact(
316 ucontext, &entry->rdma_entry, length, 0);
317 break;
318 case HNS_ROCE_MMAP_TYPE_DWQE:
319 ret = rdma_user_mmap_entry_insert_range(
320 ucontext, &entry->rdma_entry, length, 1,
321 U32_MAX);
322 break;
323 default:
324 ret = -EINVAL;
325 break;
326 }
327
328 if (ret) {
329 kfree(entry);
330 return NULL;
331 }
332
333 return entry;
334}
335
336static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
337{
338 if (context->db_mmap_entry)
339 rdma_user_mmap_entry_remove(
340 &context->db_mmap_entry->rdma_entry);
341}
342
343static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
344{
345 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
346 u64 address;
347
348 address = context->uar.pfn << PAGE_SHIFT;
349 context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
350 uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
351 if (!context->db_mmap_entry)
352 return -ENOMEM;
353
354 return 0;
355}
356
357static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
358 struct ib_udata *udata)
359{
360 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
361 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
362 struct hns_roce_ib_alloc_ucontext_resp resp = {};
363 struct hns_roce_ib_alloc_ucontext ucmd = {};
364 int ret = -EAGAIN;
365
366 if (!hr_dev->active)
367 goto error_out;
368
369 resp.qp_tab_size = hr_dev->caps.num_qps;
370 resp.srq_tab_size = hr_dev->caps.num_srqs;
371
372 ret = ib_copy_from_udata(&ucmd, udata,
373 min(udata->inlen, sizeof(ucmd)));
374 if (ret)
375 goto error_out;
376
377 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
378 context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;
379
380 if (context->config & HNS_ROCE_EXSGE_FLAGS) {
381 resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS;
382 resp.max_inline_data = hr_dev->caps.max_sq_inline;
383 }
384
385 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
386 context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS;
387 if (context->config & HNS_ROCE_RQ_INLINE_FLAGS)
388 resp.config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS;
389 }
390
391 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) {
392 context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS;
393 if (context->config & HNS_ROCE_CQE_INLINE_FLAGS)
394 resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
395 }
396
397 ret = hns_roce_uar_alloc(hr_dev, &context->uar);
398 if (ret)
399 goto error_out;
400
401 ret = hns_roce_alloc_uar_entry(uctx);
402 if (ret)
403 goto error_fail_uar_entry;
404
405 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
406 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
407 INIT_LIST_HEAD(&context->page_list);
408 mutex_init(&context->page_mutex);
409 }
410
411 resp.cqe_size = hr_dev->caps.cqe_sz;
412
413 ret = ib_copy_to_udata(udata, &resp,
414 min(udata->outlen, sizeof(resp)));
415 if (ret)
416 goto error_fail_copy_to_udata;
417
418 return 0;
419
420error_fail_copy_to_udata:
421 hns_roce_dealloc_uar_entry(context);
422
423error_fail_uar_entry:
424 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
425
426error_out:
427 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT]);
428
429 return ret;
430}
431
432static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
433{
434 struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
435 struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
436
437 hns_roce_dealloc_uar_entry(context);
438
439 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
440}
441
442static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
443{
444 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
445 struct rdma_user_mmap_entry *rdma_entry;
446 struct hns_user_mmap_entry *entry;
447 phys_addr_t pfn;
448 pgprot_t prot;
449 int ret;
450
451 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
452 if (!rdma_entry) {
453 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
454 return -EINVAL;
455 }
456
457 entry = to_hns_mmap(rdma_entry);
458 pfn = entry->address >> PAGE_SHIFT;
459
460 switch (entry->mmap_type) {
461 case HNS_ROCE_MMAP_TYPE_DB:
462 case HNS_ROCE_MMAP_TYPE_DWQE:
463 prot = pgprot_device(vma->vm_page_prot);
464 break;
465 default:
466 ret = -EINVAL;
467 goto out;
468 }
469
470 ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
471 prot, rdma_entry);
472
473out:
474 rdma_user_mmap_entry_put(rdma_entry);
475 if (ret)
476 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
477
478 return ret;
479}
480
481static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
482{
483 struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
484
485 kfree(entry);
486}
487
488static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
489 struct ib_port_immutable *immutable)
490{
491 struct ib_port_attr attr;
492 int ret;
493
494 ret = ib_query_port(ib_dev, port_num, &attr);
495 if (ret)
496 return ret;
497
498 immutable->pkey_tbl_len = attr.pkey_tbl_len;
499 immutable->gid_tbl_len = attr.gid_tbl_len;
500
501 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
502 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
503 if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
504 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
505
506 return 0;
507}
508
509static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
510{
511}
512
513static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
514{
515 u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
516 unsigned int major, minor, sub_minor;
517
518 major = upper_32_bits(fw_ver);
519 minor = high_16_bits(lower_32_bits(fw_ver));
520 sub_minor = low_16_bits(fw_ver);
521
522 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
523 sub_minor);
524}
525
526#define HNS_ROCE_HW_CNT(ename, cname) \
527 [HNS_ROCE_HW_##ename##_CNT].name = cname
528
529static const struct rdma_stat_desc hns_roce_port_stats_descs[] = {
530 HNS_ROCE_HW_CNT(RX_RC_PKT, "rx_rc_pkt"),
531 HNS_ROCE_HW_CNT(RX_UC_PKT, "rx_uc_pkt"),
532 HNS_ROCE_HW_CNT(RX_UD_PKT, "rx_ud_pkt"),
533 HNS_ROCE_HW_CNT(RX_XRC_PKT, "rx_xrc_pkt"),
534 HNS_ROCE_HW_CNT(RX_PKT, "rx_pkt"),
535 HNS_ROCE_HW_CNT(RX_ERR_PKT, "rx_err_pkt"),
536 HNS_ROCE_HW_CNT(RX_CNP_PKT, "rx_cnp_pkt"),
537 HNS_ROCE_HW_CNT(TX_RC_PKT, "tx_rc_pkt"),
538 HNS_ROCE_HW_CNT(TX_UC_PKT, "tx_uc_pkt"),
539 HNS_ROCE_HW_CNT(TX_UD_PKT, "tx_ud_pkt"),
540 HNS_ROCE_HW_CNT(TX_XRC_PKT, "tx_xrc_pkt"),
541 HNS_ROCE_HW_CNT(TX_PKT, "tx_pkt"),
542 HNS_ROCE_HW_CNT(TX_ERR_PKT, "tx_err_pkt"),
543 HNS_ROCE_HW_CNT(TX_CNP_PKT, "tx_cnp_pkt"),
544 HNS_ROCE_HW_CNT(TRP_GET_MPT_ERR_PKT, "trp_get_mpt_err_pkt"),
545 HNS_ROCE_HW_CNT(TRP_GET_IRRL_ERR_PKT, "trp_get_irrl_err_pkt"),
546 HNS_ROCE_HW_CNT(ECN_DB, "ecn_doorbell"),
547 HNS_ROCE_HW_CNT(RX_BUF, "rx_buffer"),
548 HNS_ROCE_HW_CNT(TRP_RX_SOF, "trp_rx_sof"),
549 HNS_ROCE_HW_CNT(CQ_CQE, "cq_cqe"),
550 HNS_ROCE_HW_CNT(CQ_POE, "cq_poe"),
551 HNS_ROCE_HW_CNT(CQ_NOTIFY, "cq_notify"),
552};
553
554static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
555 struct ib_device *device, u32 port_num)
556{
557 struct hns_roce_dev *hr_dev = to_hr_dev(device);
558
559 if (port_num > hr_dev->caps.num_ports) {
560 ibdev_err(device, "invalid port num.\n");
561 return NULL;
562 }
563
564 return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
565 ARRAY_SIZE(hns_roce_port_stats_descs),
566 RDMA_HW_STATS_DEFAULT_LIFESPAN);
567}
568
569static int hns_roce_get_hw_stats(struct ib_device *device,
570 struct rdma_hw_stats *stats,
571 u32 port, int index)
572{
573 struct hns_roce_dev *hr_dev = to_hr_dev(device);
574 int num_counters = HNS_ROCE_HW_CNT_TOTAL;
575 int ret;
576
577 if (port == 0)
578 return 0;
579
580 if (port > hr_dev->caps.num_ports)
581 return -EINVAL;
582
583 ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
584 &num_counters);
585 if (ret) {
586 ibdev_err(device, "failed to query hw counter, ret = %d\n",
587 ret);
588 return ret;
589 }
590
591 return num_counters;
592}
593
594static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
595{
596 struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
597
598 hr_dev->active = false;
599 unregister_netdevice_notifier(&iboe->nb);
600 ib_unregister_device(&hr_dev->ib_dev);
601}
602
603static const struct ib_device_ops hns_roce_dev_ops = {
604 .owner = THIS_MODULE,
605 .driver_id = RDMA_DRIVER_HNS,
606 .uverbs_abi_ver = 1,
607 .uverbs_no_driver_id_binding = 1,
608
609 .get_dev_fw_str = hns_roce_get_fw_ver,
610 .add_gid = hns_roce_add_gid,
611 .alloc_pd = hns_roce_alloc_pd,
612 .alloc_ucontext = hns_roce_alloc_ucontext,
613 .create_ah = hns_roce_create_ah,
614 .create_user_ah = hns_roce_create_ah,
615 .create_cq = hns_roce_create_cq,
616 .create_qp = hns_roce_create_qp,
617 .dealloc_pd = hns_roce_dealloc_pd,
618 .dealloc_ucontext = hns_roce_dealloc_ucontext,
619 .del_gid = hns_roce_del_gid,
620 .dereg_mr = hns_roce_dereg_mr,
621 .destroy_ah = hns_roce_destroy_ah,
622 .destroy_cq = hns_roce_destroy_cq,
623 .disassociate_ucontext = hns_roce_disassociate_ucontext,
624 .get_dma_mr = hns_roce_get_dma_mr,
625 .get_link_layer = hns_roce_get_link_layer,
626 .get_port_immutable = hns_roce_port_immutable,
627 .mmap = hns_roce_mmap,
628 .mmap_free = hns_roce_free_mmap,
629 .modify_device = hns_roce_modify_device,
630 .modify_qp = hns_roce_modify_qp,
631 .query_ah = hns_roce_query_ah,
632 .query_device = hns_roce_query_device,
633 .query_pkey = hns_roce_query_pkey,
634 .query_port = hns_roce_query_port,
635 .reg_user_mr = hns_roce_reg_user_mr,
636
637 INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
638 INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
639 INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
640 INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp),
641 INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
642};
643
644static const struct ib_device_ops hns_roce_dev_hw_stats_ops = {
645 .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
646 .get_hw_stats = hns_roce_get_hw_stats,
647};
648
649static const struct ib_device_ops hns_roce_dev_mr_ops = {
650 .rereg_user_mr = hns_roce_rereg_user_mr,
651};
652
653static const struct ib_device_ops hns_roce_dev_mw_ops = {
654 .alloc_mw = hns_roce_alloc_mw,
655 .dealloc_mw = hns_roce_dealloc_mw,
656
657 INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
658};
659
660static const struct ib_device_ops hns_roce_dev_frmr_ops = {
661 .alloc_mr = hns_roce_alloc_mr,
662 .map_mr_sg = hns_roce_map_mr_sg,
663};
664
665static const struct ib_device_ops hns_roce_dev_srq_ops = {
666 .create_srq = hns_roce_create_srq,
667 .destroy_srq = hns_roce_destroy_srq,
668
669 INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
670};
671
672static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
673 .alloc_xrcd = hns_roce_alloc_xrcd,
674 .dealloc_xrcd = hns_roce_dealloc_xrcd,
675
676 INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
677};
678
679static const struct ib_device_ops hns_roce_dev_restrack_ops = {
680 .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
681 .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
682 .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
683 .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
684 .fill_res_mr_entry = hns_roce_fill_res_mr_entry,
685 .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
686 .fill_res_srq_entry = hns_roce_fill_res_srq_entry,
687 .fill_res_srq_entry_raw = hns_roce_fill_res_srq_entry_raw,
688};
689
690static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
691{
692 int ret;
693 struct hns_roce_ib_iboe *iboe = NULL;
694 struct ib_device *ib_dev = NULL;
695 struct device *dev = hr_dev->dev;
696 unsigned int i;
697
698 iboe = &hr_dev->iboe;
699 spin_lock_init(&iboe->lock);
700
701 ib_dev = &hr_dev->ib_dev;
702
703 ib_dev->node_type = RDMA_NODE_IB_CA;
704 ib_dev->dev.parent = dev;
705
706 ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
707 ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
708 ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
709
710 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
711 ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
712
713 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
714 ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
715
716 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
717 ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
718
719 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
720 ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
721 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
722 }
723
724 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
725 ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
726
727 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 &&
728 !hr_dev->is_vf)
729 ib_set_device_ops(ib_dev, &hns_roce_dev_hw_stats_ops);
730
731 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
732 ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
733 ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
734 for (i = 0; i < hr_dev->caps.num_ports; i++) {
735 if (!hr_dev->iboe.netdevs[i])
736 continue;
737
738 ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
739 i + 1);
740 if (ret)
741 return ret;
742 }
743 dma_set_max_seg_size(dev, UINT_MAX);
744 ret = ib_register_device(ib_dev, "hns_%d", dev);
745 if (ret) {
746 dev_err(dev, "ib_register_device failed!\n");
747 return ret;
748 }
749
750 ret = hns_roce_setup_mtu_mac(hr_dev);
751 if (ret) {
752 dev_err(dev, "setup_mtu_mac failed!\n");
753 goto error_failed_setup_mtu_mac;
754 }
755
756 iboe->nb.notifier_call = hns_roce_netdev_event;
757 ret = register_netdevice_notifier(&iboe->nb);
758 if (ret) {
759 dev_err(dev, "register_netdevice_notifier failed!\n");
760 goto error_failed_setup_mtu_mac;
761 }
762
763 hr_dev->active = true;
764 return 0;
765
766error_failed_setup_mtu_mac:
767 ib_unregister_device(ib_dev);
768
769 return ret;
770}
771
772static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
773{
774 struct device *dev = hr_dev->dev;
775 int ret;
776
777 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
778 HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
779 hr_dev->caps.num_mtpts);
780 if (ret) {
781 dev_err(dev, "failed to init MTPT context memory, aborting.\n");
782 return ret;
783 }
784
785 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
786 HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
787 hr_dev->caps.num_qps);
788 if (ret) {
789 dev_err(dev, "failed to init QP context memory, aborting.\n");
790 goto err_unmap_dmpt;
791 }
792
793 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
794 HEM_TYPE_IRRL,
795 hr_dev->caps.irrl_entry_sz *
796 hr_dev->caps.max_qp_init_rdma,
797 hr_dev->caps.num_qps);
798 if (ret) {
799 dev_err(dev, "failed to init irrl_table memory, aborting.\n");
800 goto err_unmap_qp;
801 }
802
803 if (hr_dev->caps.trrl_entry_sz) {
804 ret = hns_roce_init_hem_table(hr_dev,
805 &hr_dev->qp_table.trrl_table,
806 HEM_TYPE_TRRL,
807 hr_dev->caps.trrl_entry_sz *
808 hr_dev->caps.max_qp_dest_rdma,
809 hr_dev->caps.num_qps);
810 if (ret) {
811 dev_err(dev,
812 "failed to init trrl_table memory, aborting.\n");
813 goto err_unmap_irrl;
814 }
815 }
816
817 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
818 HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
819 hr_dev->caps.num_cqs);
820 if (ret) {
821 dev_err(dev, "failed to init CQ context memory, aborting.\n");
822 goto err_unmap_trrl;
823 }
824
825 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
826 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
827 HEM_TYPE_SRQC,
828 hr_dev->caps.srqc_entry_sz,
829 hr_dev->caps.num_srqs);
830 if (ret) {
831 dev_err(dev,
832 "failed to init SRQ context memory, aborting.\n");
833 goto err_unmap_cq;
834 }
835 }
836
837 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
838 ret = hns_roce_init_hem_table(hr_dev,
839 &hr_dev->qp_table.sccc_table,
840 HEM_TYPE_SCCC,
841 hr_dev->caps.sccc_sz,
842 hr_dev->caps.num_qps);
843 if (ret) {
844 dev_err(dev,
845 "failed to init SCC context memory, aborting.\n");
846 goto err_unmap_srq;
847 }
848 }
849
850 if (hr_dev->caps.qpc_timer_entry_sz) {
851 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
852 HEM_TYPE_QPC_TIMER,
853 hr_dev->caps.qpc_timer_entry_sz,
854 hr_dev->caps.qpc_timer_bt_num);
855 if (ret) {
856 dev_err(dev,
857 "failed to init QPC timer memory, aborting.\n");
858 goto err_unmap_ctx;
859 }
860 }
861
862 if (hr_dev->caps.cqc_timer_entry_sz) {
863 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
864 HEM_TYPE_CQC_TIMER,
865 hr_dev->caps.cqc_timer_entry_sz,
866 hr_dev->caps.cqc_timer_bt_num);
867 if (ret) {
868 dev_err(dev,
869 "failed to init CQC timer memory, aborting.\n");
870 goto err_unmap_qpc_timer;
871 }
872 }
873
874 if (hr_dev->caps.gmv_entry_sz) {
875 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
876 HEM_TYPE_GMV,
877 hr_dev->caps.gmv_entry_sz,
878 hr_dev->caps.gmv_entry_num);
879 if (ret) {
880 dev_err(dev,
881 "failed to init gmv table memory, ret = %d\n",
882 ret);
883 goto err_unmap_cqc_timer;
884 }
885 }
886
887 return 0;
888
889err_unmap_cqc_timer:
890 if (hr_dev->caps.cqc_timer_entry_sz)
891 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table);
892
893err_unmap_qpc_timer:
894 if (hr_dev->caps.qpc_timer_entry_sz)
895 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
896
897err_unmap_ctx:
898 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
899 hns_roce_cleanup_hem_table(hr_dev,
900 &hr_dev->qp_table.sccc_table);
901err_unmap_srq:
902 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
903 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
904
905err_unmap_cq:
906 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
907
908err_unmap_trrl:
909 if (hr_dev->caps.trrl_entry_sz)
910 hns_roce_cleanup_hem_table(hr_dev,
911 &hr_dev->qp_table.trrl_table);
912
913err_unmap_irrl:
914 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
915
916err_unmap_qp:
917 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
918
919err_unmap_dmpt:
920 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
921
922 return ret;
923}
924
925/**
926 * hns_roce_setup_hca - setup host channel adapter
927 * @hr_dev: pointer to hns roce device
928 * Return : int
929 */
930static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
931{
932 struct device *dev = hr_dev->dev;
933 int ret;
934
935 spin_lock_init(&hr_dev->sm_lock);
936
937 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
938 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
939 INIT_LIST_HEAD(&hr_dev->pgdir_list);
940 mutex_init(&hr_dev->pgdir_mutex);
941 }
942
943 hns_roce_init_uar_table(hr_dev);
944
945 ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
946 if (ret) {
947 dev_err(dev, "failed to allocate priv_uar.\n");
948 goto err_uar_table_free;
949 }
950
951 ret = hns_roce_init_qp_table(hr_dev);
952 if (ret) {
953 dev_err(dev, "failed to init qp_table.\n");
954 goto err_uar_table_free;
955 }
956
957 hns_roce_init_pd_table(hr_dev);
958
959 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
960 hns_roce_init_xrcd_table(hr_dev);
961
962 hns_roce_init_mr_table(hr_dev);
963
964 hns_roce_init_cq_table(hr_dev);
965
966 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
967 hns_roce_init_srq_table(hr_dev);
968
969 return 0;
970
971err_uar_table_free:
972 ida_destroy(&hr_dev->uar_ida.ida);
973 return ret;
974}
975
976static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
977{
978 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
979 unsigned long flags;
980
981 spin_lock_irqsave(&hr_cq->lock, flags);
982 if (cq->comp_handler) {
983 if (!hr_cq->is_armed) {
984 hr_cq->is_armed = 1;
985 list_add_tail(&hr_cq->node, cq_list);
986 }
987 }
988 spin_unlock_irqrestore(&hr_cq->lock, flags);
989}
990
991void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
992{
993 struct hns_roce_qp *hr_qp;
994 struct hns_roce_cq *hr_cq;
995 struct list_head cq_list;
996 unsigned long flags_qp;
997 unsigned long flags;
998
999 INIT_LIST_HEAD(&cq_list);
1000
1001 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
1002 list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
1003 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
1004 if (hr_qp->sq.tail != hr_qp->sq.head)
1005 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
1006 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
1007
1008 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
1009 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
1010 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
1011 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
1012 }
1013
1014 list_for_each_entry(hr_cq, &cq_list, node)
1015 hns_roce_cq_completion(hr_dev, hr_cq->cqn);
1016
1017 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
1018}
1019
1020static int hns_roce_alloc_dfx_cnt(struct hns_roce_dev *hr_dev)
1021{
1022 hr_dev->dfx_cnt = kvcalloc(HNS_ROCE_DFX_CNT_TOTAL, sizeof(atomic64_t),
1023 GFP_KERNEL);
1024 if (!hr_dev->dfx_cnt)
1025 return -ENOMEM;
1026
1027 return 0;
1028}
1029
1030static void hns_roce_dealloc_dfx_cnt(struct hns_roce_dev *hr_dev)
1031{
1032 kvfree(hr_dev->dfx_cnt);
1033}
1034
1035int hns_roce_init(struct hns_roce_dev *hr_dev)
1036{
1037 struct device *dev = hr_dev->dev;
1038 int ret;
1039
1040 hr_dev->is_reset = false;
1041
1042 ret = hns_roce_alloc_dfx_cnt(hr_dev);
1043 if (ret)
1044 return ret;
1045
1046 if (hr_dev->hw->cmq_init) {
1047 ret = hr_dev->hw->cmq_init(hr_dev);
1048 if (ret) {
1049 dev_err(dev, "init RoCE Command Queue failed!\n");
1050 goto error_failed_alloc_dfx_cnt;
1051 }
1052 }
1053
1054 ret = hr_dev->hw->hw_profile(hr_dev);
1055 if (ret) {
1056 dev_err(dev, "get RoCE engine profile failed!\n");
1057 goto error_failed_cmd_init;
1058 }
1059
1060 ret = hns_roce_cmd_init(hr_dev);
1061 if (ret) {
1062 dev_err(dev, "cmd init failed!\n");
1063 goto error_failed_cmd_init;
1064 }
1065
1066 /* EQ depends on poll mode, event mode depends on EQ */
1067 ret = hr_dev->hw->init_eq(hr_dev);
1068 if (ret) {
1069 dev_err(dev, "eq init failed!\n");
1070 goto error_failed_eq_table;
1071 }
1072
1073 if (hr_dev->cmd_mod) {
1074 ret = hns_roce_cmd_use_events(hr_dev);
1075 if (ret)
1076 dev_warn(dev,
1077 "Cmd event mode failed, set back to poll!\n");
1078 }
1079
1080 ret = hns_roce_init_hem(hr_dev);
1081 if (ret) {
1082 dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
1083 goto error_failed_init_hem;
1084 }
1085
1086 ret = hns_roce_setup_hca(hr_dev);
1087 if (ret) {
1088 dev_err(dev, "setup hca failed!\n");
1089 goto error_failed_setup_hca;
1090 }
1091
1092 if (hr_dev->hw->hw_init) {
1093 ret = hr_dev->hw->hw_init(hr_dev);
1094 if (ret) {
1095 dev_err(dev, "hw_init failed!\n");
1096 goto error_failed_engine_init;
1097 }
1098 }
1099
1100 INIT_LIST_HEAD(&hr_dev->qp_list);
1101 spin_lock_init(&hr_dev->qp_list_lock);
1102 INIT_LIST_HEAD(&hr_dev->dip_list);
1103 spin_lock_init(&hr_dev->dip_list_lock);
1104
1105 ret = hns_roce_register_device(hr_dev);
1106 if (ret)
1107 goto error_failed_register_device;
1108
1109 hns_roce_register_debugfs(hr_dev);
1110
1111 return 0;
1112
1113error_failed_register_device:
1114 if (hr_dev->hw->hw_exit)
1115 hr_dev->hw->hw_exit(hr_dev);
1116
1117error_failed_engine_init:
1118 hns_roce_cleanup_bitmap(hr_dev);
1119
1120error_failed_setup_hca:
1121 hns_roce_cleanup_hem(hr_dev);
1122
1123error_failed_init_hem:
1124 if (hr_dev->cmd_mod)
1125 hns_roce_cmd_use_polling(hr_dev);
1126 hr_dev->hw->cleanup_eq(hr_dev);
1127
1128error_failed_eq_table:
1129 hns_roce_cmd_cleanup(hr_dev);
1130
1131error_failed_cmd_init:
1132 if (hr_dev->hw->cmq_exit)
1133 hr_dev->hw->cmq_exit(hr_dev);
1134
1135error_failed_alloc_dfx_cnt:
1136 hns_roce_dealloc_dfx_cnt(hr_dev);
1137
1138 return ret;
1139}
1140
1141void hns_roce_exit(struct hns_roce_dev *hr_dev)
1142{
1143 hns_roce_unregister_debugfs(hr_dev);
1144 hns_roce_unregister_device(hr_dev);
1145
1146 if (hr_dev->hw->hw_exit)
1147 hr_dev->hw->hw_exit(hr_dev);
1148 hns_roce_cleanup_bitmap(hr_dev);
1149 hns_roce_cleanup_hem(hr_dev);
1150
1151 if (hr_dev->cmd_mod)
1152 hns_roce_cmd_use_polling(hr_dev);
1153
1154 hr_dev->hw->cleanup_eq(hr_dev);
1155 hns_roce_cmd_cleanup(hr_dev);
1156 if (hr_dev->hw->cmq_exit)
1157 hr_dev->hw->cmq_exit(hr_dev);
1158 hns_roce_dealloc_dfx_cnt(hr_dev);
1159}
1160
1161MODULE_LICENSE("Dual BSD/GPL");
1162MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
1163MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
1164MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
1165MODULE_DESCRIPTION("HNS RoCE Driver");
1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/acpi.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <rdma/ib_addr.h>
37#include <rdma/ib_smi.h>
38#include <rdma/ib_user_verbs.h>
39#include <rdma/ib_cache.h>
40#include "hnae3.h"
41#include "hns_roce_common.h"
42#include "hns_roce_device.h"
43#include "hns_roce_hem.h"
44#include "hns_roce_hw_v2.h"
45
46static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
47 const u8 *addr)
48{
49 u8 phy_port;
50 u32 i;
51
52 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
53 return 0;
54
55 if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
56 return 0;
57
58 for (i = 0; i < ETH_ALEN; i++)
59 hr_dev->dev_addr[port][i] = addr[i];
60
61 phy_port = hr_dev->iboe.phy_port[port];
62 return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
63}
64
65static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
66{
67 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
68 u32 port = attr->port_num - 1;
69 int ret;
70
71 if (port >= hr_dev->caps.num_ports)
72 return -EINVAL;
73
74 ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
75
76 return ret;
77}
78
79static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
80{
81 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
82 u32 port = attr->port_num - 1;
83 int ret;
84
85 if (port >= hr_dev->caps.num_ports)
86 return -EINVAL;
87
88 ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
89
90 return ret;
91}
92
93static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
94 unsigned long event)
95{
96 struct device *dev = hr_dev->dev;
97 struct net_device *netdev;
98 int ret = 0;
99
100 netdev = hr_dev->iboe.netdevs[port];
101 if (!netdev) {
102 dev_err(dev, "can't find netdev on port(%u)!\n", port);
103 return -ENODEV;
104 }
105
106 switch (event) {
107 case NETDEV_UP:
108 case NETDEV_CHANGE:
109 case NETDEV_REGISTER:
110 case NETDEV_CHANGEADDR:
111 ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
112 break;
113 case NETDEV_DOWN:
114 /*
115 * In v1 engine, only support all ports closed together.
116 */
117 break;
118 default:
119 dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
120 break;
121 }
122
123 return ret;
124}
125
126static int hns_roce_netdev_event(struct notifier_block *self,
127 unsigned long event, void *ptr)
128{
129 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
130 struct hns_roce_ib_iboe *iboe = NULL;
131 struct hns_roce_dev *hr_dev = NULL;
132 int ret;
133 u32 port;
134
135 hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
136 iboe = &hr_dev->iboe;
137
138 for (port = 0; port < hr_dev->caps.num_ports; port++) {
139 if (dev == iboe->netdevs[port]) {
140 ret = handle_en_event(hr_dev, port, event);
141 if (ret)
142 return NOTIFY_DONE;
143 break;
144 }
145 }
146
147 return NOTIFY_DONE;
148}
149
150static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
151{
152 int ret;
153 u8 i;
154
155 for (i = 0; i < hr_dev->caps.num_ports; i++) {
156 ret = hns_roce_set_mac(hr_dev, i,
157 hr_dev->iboe.netdevs[i]->dev_addr);
158 if (ret)
159 return ret;
160 }
161
162 return 0;
163}
164
165static int hns_roce_query_device(struct ib_device *ib_dev,
166 struct ib_device_attr *props,
167 struct ib_udata *uhw)
168{
169 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
170
171 memset(props, 0, sizeof(*props));
172
173 props->fw_ver = hr_dev->caps.fw_ver;
174 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
175 props->max_mr_size = (u64)(~(0ULL));
176 props->page_size_cap = hr_dev->caps.page_size_cap;
177 props->vendor_id = hr_dev->vendor_id;
178 props->vendor_part_id = hr_dev->vendor_part_id;
179 props->hw_ver = hr_dev->hw_rev;
180 props->max_qp = hr_dev->caps.num_qps;
181 props->max_qp_wr = hr_dev->caps.max_wqes;
182 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
183 IB_DEVICE_RC_RNR_NAK_GEN;
184 props->max_send_sge = hr_dev->caps.max_sq_sg;
185 props->max_recv_sge = hr_dev->caps.max_rq_sg;
186 props->max_sge_rd = 1;
187 props->max_cq = hr_dev->caps.num_cqs;
188 props->max_cqe = hr_dev->caps.max_cqes;
189 props->max_mr = hr_dev->caps.num_mtpts;
190 props->max_pd = hr_dev->caps.num_pds;
191 props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
192 props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
193 props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
194 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
195 props->max_pkeys = 1;
196 props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
197 props->max_ah = INT_MAX;
198 props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD;
199 props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT;
200 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
201 props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
202
203 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
204 props->max_srq = hr_dev->caps.num_srqs;
205 props->max_srq_wr = hr_dev->caps.max_srq_wrs;
206 props->max_srq_sge = hr_dev->caps.max_srq_sges;
207 }
208
209 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR &&
210 hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
211 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
212 props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
213 }
214
215 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
216 props->device_cap_flags |= IB_DEVICE_XRC;
217
218 return 0;
219}
220
221static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
222 struct ib_port_attr *props)
223{
224 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
225 struct device *dev = hr_dev->dev;
226 struct net_device *net_dev;
227 unsigned long flags;
228 enum ib_mtu mtu;
229 u32 port;
230 int ret;
231
232 port = port_num - 1;
233
234 /* props being zeroed by the caller, avoid zeroing it here */
235
236 props->max_mtu = hr_dev->caps.max_mtu;
237 props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
238 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
239 IB_PORT_VENDOR_CLASS_SUP |
240 IB_PORT_BOOT_MGMT_SUP;
241 props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
242 props->pkey_tbl_len = 1;
243 ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed,
244 &props->active_width);
245 if (ret)
246 ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
247
248 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
249
250 net_dev = hr_dev->iboe.netdevs[port];
251 if (!net_dev) {
252 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
253 dev_err(dev, "find netdev %u failed!\n", port);
254 return -EINVAL;
255 }
256
257 mtu = iboe_get_mtu(net_dev->mtu);
258 props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
259 props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
260 IB_PORT_ACTIVE :
261 IB_PORT_DOWN;
262 props->phys_state = props->state == IB_PORT_ACTIVE ?
263 IB_PORT_PHYS_STATE_LINK_UP :
264 IB_PORT_PHYS_STATE_DISABLED;
265
266 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
267
268 return 0;
269}
270
271static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
272 u32 port_num)
273{
274 return IB_LINK_LAYER_ETHERNET;
275}
276
277static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
278 u16 *pkey)
279{
280 if (index > 0)
281 return -EINVAL;
282
283 *pkey = PKEY_ID;
284
285 return 0;
286}
287
288static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
289 struct ib_device_modify *props)
290{
291 unsigned long flags;
292
293 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
294 return -EOPNOTSUPP;
295
296 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
297 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
298 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
299 spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
300 }
301
302 return 0;
303}
304
305struct hns_user_mmap_entry *
306hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
307 size_t length,
308 enum hns_roce_mmap_type mmap_type)
309{
310 struct hns_user_mmap_entry *entry;
311 int ret;
312
313 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
314 if (!entry)
315 return NULL;
316
317 entry->address = address;
318 entry->mmap_type = mmap_type;
319
320 switch (mmap_type) {
321 /* pgoff 0 must be used by DB for compatibility */
322 case HNS_ROCE_MMAP_TYPE_DB:
323 ret = rdma_user_mmap_entry_insert_exact(
324 ucontext, &entry->rdma_entry, length, 0);
325 break;
326 case HNS_ROCE_MMAP_TYPE_DWQE:
327 ret = rdma_user_mmap_entry_insert_range(
328 ucontext, &entry->rdma_entry, length, 1,
329 U32_MAX);
330 break;
331 default:
332 ret = -EINVAL;
333 break;
334 }
335
336 if (ret) {
337 kfree(entry);
338 return NULL;
339 }
340
341 return entry;
342}
343
344static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
345{
346 if (context->db_mmap_entry)
347 rdma_user_mmap_entry_remove(
348 &context->db_mmap_entry->rdma_entry);
349}
350
351static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
352{
353 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
354 u64 address;
355
356 address = context->uar.pfn << PAGE_SHIFT;
357 context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
358 uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
359 if (!context->db_mmap_entry)
360 return -ENOMEM;
361
362 return 0;
363}
364
365static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
366 struct ib_udata *udata)
367{
368 struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
369 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
370 struct hns_roce_ib_alloc_ucontext_resp resp = {};
371 struct hns_roce_ib_alloc_ucontext ucmd = {};
372 int ret = -EAGAIN;
373
374 if (!hr_dev->active)
375 goto error_out;
376
377 resp.qp_tab_size = hr_dev->caps.num_qps;
378 resp.srq_tab_size = hr_dev->caps.num_srqs;
379
380 ret = ib_copy_from_udata(&ucmd, udata,
381 min(udata->inlen, sizeof(ucmd)));
382 if (ret)
383 goto error_out;
384
385 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
386 context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;
387
388 if (context->config & HNS_ROCE_EXSGE_FLAGS) {
389 resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS;
390 resp.max_inline_data = hr_dev->caps.max_sq_inline;
391 }
392
393 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
394 context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS;
395 if (context->config & HNS_ROCE_RQ_INLINE_FLAGS)
396 resp.config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS;
397 }
398
399 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) {
400 context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS;
401 if (context->config & HNS_ROCE_CQE_INLINE_FLAGS)
402 resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
403 }
404
405 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
406 resp.congest_type = hr_dev->caps.cong_cap;
407
408 ret = hns_roce_uar_alloc(hr_dev, &context->uar);
409 if (ret)
410 goto error_out;
411
412 ret = hns_roce_alloc_uar_entry(uctx);
413 if (ret)
414 goto error_fail_uar_entry;
415
416 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
417 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
418 INIT_LIST_HEAD(&context->page_list);
419 mutex_init(&context->page_mutex);
420 }
421
422 resp.cqe_size = hr_dev->caps.cqe_sz;
423
424 ret = ib_copy_to_udata(udata, &resp,
425 min(udata->outlen, sizeof(resp)));
426 if (ret)
427 goto error_fail_copy_to_udata;
428
429 return 0;
430
431error_fail_copy_to_udata:
432 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
433 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
434 mutex_destroy(&context->page_mutex);
435 hns_roce_dealloc_uar_entry(context);
436
437error_fail_uar_entry:
438 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
439
440error_out:
441 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT]);
442
443 return ret;
444}
445
446static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
447{
448 struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
449 struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
450
451 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
452 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
453 mutex_destroy(&context->page_mutex);
454
455 hns_roce_dealloc_uar_entry(context);
456
457 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
458}
459
460static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
461{
462 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
463 struct rdma_user_mmap_entry *rdma_entry;
464 struct hns_user_mmap_entry *entry;
465 phys_addr_t pfn;
466 pgprot_t prot;
467 int ret;
468
469 if (hr_dev->dis_db) {
470 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
471 return -EPERM;
472 }
473
474 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
475 if (!rdma_entry) {
476 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
477 return -EINVAL;
478 }
479
480 entry = to_hns_mmap(rdma_entry);
481 pfn = entry->address >> PAGE_SHIFT;
482
483 switch (entry->mmap_type) {
484 case HNS_ROCE_MMAP_TYPE_DB:
485 case HNS_ROCE_MMAP_TYPE_DWQE:
486 prot = pgprot_device(vma->vm_page_prot);
487 break;
488 default:
489 ret = -EINVAL;
490 goto out;
491 }
492
493 ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
494 prot, rdma_entry);
495
496out:
497 rdma_user_mmap_entry_put(rdma_entry);
498 if (ret)
499 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
500
501 return ret;
502}
503
504static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
505{
506 struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
507
508 kfree(entry);
509}
510
511static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
512 struct ib_port_immutable *immutable)
513{
514 struct ib_port_attr attr;
515 int ret;
516
517 ret = ib_query_port(ib_dev, port_num, &attr);
518 if (ret)
519 return ret;
520
521 immutable->pkey_tbl_len = attr.pkey_tbl_len;
522 immutable->gid_tbl_len = attr.gid_tbl_len;
523
524 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
525 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
526 if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
527 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
528
529 return 0;
530}
531
532static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
533{
534}
535
536static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
537{
538 u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
539 unsigned int major, minor, sub_minor;
540
541 major = upper_32_bits(fw_ver);
542 minor = high_16_bits(lower_32_bits(fw_ver));
543 sub_minor = low_16_bits(fw_ver);
544
545 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
546 sub_minor);
547}
548
549#define HNS_ROCE_HW_CNT(ename, cname) \
550 [HNS_ROCE_HW_##ename##_CNT].name = cname
551
552static const struct rdma_stat_desc hns_roce_port_stats_descs[] = {
553 HNS_ROCE_HW_CNT(RX_RC_PKT, "rx_rc_pkt"),
554 HNS_ROCE_HW_CNT(RX_UC_PKT, "rx_uc_pkt"),
555 HNS_ROCE_HW_CNT(RX_UD_PKT, "rx_ud_pkt"),
556 HNS_ROCE_HW_CNT(RX_XRC_PKT, "rx_xrc_pkt"),
557 HNS_ROCE_HW_CNT(RX_PKT, "rx_pkt"),
558 HNS_ROCE_HW_CNT(RX_ERR_PKT, "rx_err_pkt"),
559 HNS_ROCE_HW_CNT(RX_CNP_PKT, "rx_cnp_pkt"),
560 HNS_ROCE_HW_CNT(TX_RC_PKT, "tx_rc_pkt"),
561 HNS_ROCE_HW_CNT(TX_UC_PKT, "tx_uc_pkt"),
562 HNS_ROCE_HW_CNT(TX_UD_PKT, "tx_ud_pkt"),
563 HNS_ROCE_HW_CNT(TX_XRC_PKT, "tx_xrc_pkt"),
564 HNS_ROCE_HW_CNT(TX_PKT, "tx_pkt"),
565 HNS_ROCE_HW_CNT(TX_ERR_PKT, "tx_err_pkt"),
566 HNS_ROCE_HW_CNT(TX_CNP_PKT, "tx_cnp_pkt"),
567 HNS_ROCE_HW_CNT(TRP_GET_MPT_ERR_PKT, "trp_get_mpt_err_pkt"),
568 HNS_ROCE_HW_CNT(TRP_GET_IRRL_ERR_PKT, "trp_get_irrl_err_pkt"),
569 HNS_ROCE_HW_CNT(ECN_DB, "ecn_doorbell"),
570 HNS_ROCE_HW_CNT(RX_BUF, "rx_buffer"),
571 HNS_ROCE_HW_CNT(TRP_RX_SOF, "trp_rx_sof"),
572 HNS_ROCE_HW_CNT(CQ_CQE, "cq_cqe"),
573 HNS_ROCE_HW_CNT(CQ_POE, "cq_poe"),
574 HNS_ROCE_HW_CNT(CQ_NOTIFY, "cq_notify"),
575};
576
577static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
578 struct ib_device *device, u32 port_num)
579{
580 struct hns_roce_dev *hr_dev = to_hr_dev(device);
581
582 if (port_num > hr_dev->caps.num_ports) {
583 ibdev_err(device, "invalid port num.\n");
584 return NULL;
585 }
586
587 return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
588 ARRAY_SIZE(hns_roce_port_stats_descs),
589 RDMA_HW_STATS_DEFAULT_LIFESPAN);
590}
591
592static int hns_roce_get_hw_stats(struct ib_device *device,
593 struct rdma_hw_stats *stats,
594 u32 port, int index)
595{
596 struct hns_roce_dev *hr_dev = to_hr_dev(device);
597 int num_counters = HNS_ROCE_HW_CNT_TOTAL;
598 int ret;
599
600 if (port == 0)
601 return 0;
602
603 if (port > hr_dev->caps.num_ports)
604 return -EINVAL;
605
606 ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
607 &num_counters);
608 if (ret) {
609 ibdev_err(device, "failed to query hw counter, ret = %d\n",
610 ret);
611 return ret;
612 }
613
614 return num_counters;
615}
616
617static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
618{
619 struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
620
621 hr_dev->active = false;
622 unregister_netdevice_notifier(&iboe->nb);
623 ib_unregister_device(&hr_dev->ib_dev);
624}
625
626static const struct ib_device_ops hns_roce_dev_ops = {
627 .owner = THIS_MODULE,
628 .driver_id = RDMA_DRIVER_HNS,
629 .uverbs_abi_ver = 1,
630 .uverbs_no_driver_id_binding = 1,
631
632 .get_dev_fw_str = hns_roce_get_fw_ver,
633 .add_gid = hns_roce_add_gid,
634 .alloc_pd = hns_roce_alloc_pd,
635 .alloc_ucontext = hns_roce_alloc_ucontext,
636 .create_ah = hns_roce_create_ah,
637 .create_user_ah = hns_roce_create_ah,
638 .create_cq = hns_roce_create_cq,
639 .create_qp = hns_roce_create_qp,
640 .dealloc_pd = hns_roce_dealloc_pd,
641 .dealloc_ucontext = hns_roce_dealloc_ucontext,
642 .del_gid = hns_roce_del_gid,
643 .dereg_mr = hns_roce_dereg_mr,
644 .destroy_ah = hns_roce_destroy_ah,
645 .destroy_cq = hns_roce_destroy_cq,
646 .disassociate_ucontext = hns_roce_disassociate_ucontext,
647 .get_dma_mr = hns_roce_get_dma_mr,
648 .get_link_layer = hns_roce_get_link_layer,
649 .get_port_immutable = hns_roce_port_immutable,
650 .mmap = hns_roce_mmap,
651 .mmap_free = hns_roce_free_mmap,
652 .modify_device = hns_roce_modify_device,
653 .modify_qp = hns_roce_modify_qp,
654 .query_ah = hns_roce_query_ah,
655 .query_device = hns_roce_query_device,
656 .query_pkey = hns_roce_query_pkey,
657 .query_port = hns_roce_query_port,
658 .reg_user_mr = hns_roce_reg_user_mr,
659
660 INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
661 INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
662 INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
663 INIT_RDMA_OBJ_SIZE(ib_qp, hns_roce_qp, ibqp),
664 INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
665};
666
667static const struct ib_device_ops hns_roce_dev_hw_stats_ops = {
668 .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
669 .get_hw_stats = hns_roce_get_hw_stats,
670};
671
672static const struct ib_device_ops hns_roce_dev_mr_ops = {
673 .rereg_user_mr = hns_roce_rereg_user_mr,
674};
675
676static const struct ib_device_ops hns_roce_dev_mw_ops = {
677 .alloc_mw = hns_roce_alloc_mw,
678 .dealloc_mw = hns_roce_dealloc_mw,
679
680 INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
681};
682
683static const struct ib_device_ops hns_roce_dev_frmr_ops = {
684 .alloc_mr = hns_roce_alloc_mr,
685 .map_mr_sg = hns_roce_map_mr_sg,
686};
687
688static const struct ib_device_ops hns_roce_dev_srq_ops = {
689 .create_srq = hns_roce_create_srq,
690 .destroy_srq = hns_roce_destroy_srq,
691
692 INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
693};
694
695static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
696 .alloc_xrcd = hns_roce_alloc_xrcd,
697 .dealloc_xrcd = hns_roce_dealloc_xrcd,
698
699 INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
700};
701
702static const struct ib_device_ops hns_roce_dev_restrack_ops = {
703 .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
704 .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
705 .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
706 .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
707 .fill_res_mr_entry = hns_roce_fill_res_mr_entry,
708 .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
709 .fill_res_srq_entry = hns_roce_fill_res_srq_entry,
710 .fill_res_srq_entry_raw = hns_roce_fill_res_srq_entry_raw,
711};
712
713static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
714{
715 int ret;
716 struct hns_roce_ib_iboe *iboe = NULL;
717 struct ib_device *ib_dev = NULL;
718 struct device *dev = hr_dev->dev;
719 unsigned int i;
720
721 iboe = &hr_dev->iboe;
722 spin_lock_init(&iboe->lock);
723
724 ib_dev = &hr_dev->ib_dev;
725
726 ib_dev->node_type = RDMA_NODE_IB_CA;
727 ib_dev->dev.parent = dev;
728
729 ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
730 ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
731 ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
732
733 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
734 ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
735
736 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
737 ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
738
739 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
740 ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
741
742 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
743 ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
744 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
745 }
746
747 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
748 ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
749
750 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 &&
751 !hr_dev->is_vf)
752 ib_set_device_ops(ib_dev, &hns_roce_dev_hw_stats_ops);
753
754 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
755 ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
756 ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
757 for (i = 0; i < hr_dev->caps.num_ports; i++) {
758 if (!hr_dev->iboe.netdevs[i])
759 continue;
760
761 ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
762 i + 1);
763 if (ret)
764 return ret;
765 }
766 dma_set_max_seg_size(dev, UINT_MAX);
767 ret = ib_register_device(ib_dev, "hns_%d", dev);
768 if (ret) {
769 dev_err(dev, "ib_register_device failed!\n");
770 return ret;
771 }
772
773 ret = hns_roce_setup_mtu_mac(hr_dev);
774 if (ret) {
775 dev_err(dev, "setup_mtu_mac failed!\n");
776 goto error_failed_setup_mtu_mac;
777 }
778
779 iboe->nb.notifier_call = hns_roce_netdev_event;
780 ret = register_netdevice_notifier(&iboe->nb);
781 if (ret) {
782 dev_err(dev, "register_netdevice_notifier failed!\n");
783 goto error_failed_setup_mtu_mac;
784 }
785
786 hr_dev->active = true;
787 return 0;
788
789error_failed_setup_mtu_mac:
790 ib_unregister_device(ib_dev);
791
792 return ret;
793}
794
795static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
796{
797 struct device *dev = hr_dev->dev;
798 int ret;
799
800 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
801 HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
802 hr_dev->caps.num_mtpts);
803 if (ret) {
804 dev_err(dev, "failed to init MTPT context memory, aborting.\n");
805 return ret;
806 }
807
808 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
809 HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
810 hr_dev->caps.num_qps);
811 if (ret) {
812 dev_err(dev, "failed to init QP context memory, aborting.\n");
813 goto err_unmap_dmpt;
814 }
815
816 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
817 HEM_TYPE_IRRL,
818 hr_dev->caps.irrl_entry_sz *
819 hr_dev->caps.max_qp_init_rdma,
820 hr_dev->caps.num_qps);
821 if (ret) {
822 dev_err(dev, "failed to init irrl_table memory, aborting.\n");
823 goto err_unmap_qp;
824 }
825
826 if (hr_dev->caps.trrl_entry_sz) {
827 ret = hns_roce_init_hem_table(hr_dev,
828 &hr_dev->qp_table.trrl_table,
829 HEM_TYPE_TRRL,
830 hr_dev->caps.trrl_entry_sz *
831 hr_dev->caps.max_qp_dest_rdma,
832 hr_dev->caps.num_qps);
833 if (ret) {
834 dev_err(dev,
835 "failed to init trrl_table memory, aborting.\n");
836 goto err_unmap_irrl;
837 }
838 }
839
840 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
841 HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
842 hr_dev->caps.num_cqs);
843 if (ret) {
844 dev_err(dev, "failed to init CQ context memory, aborting.\n");
845 goto err_unmap_trrl;
846 }
847
848 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
849 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
850 HEM_TYPE_SRQC,
851 hr_dev->caps.srqc_entry_sz,
852 hr_dev->caps.num_srqs);
853 if (ret) {
854 dev_err(dev,
855 "failed to init SRQ context memory, aborting.\n");
856 goto err_unmap_cq;
857 }
858 }
859
860 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
861 ret = hns_roce_init_hem_table(hr_dev,
862 &hr_dev->qp_table.sccc_table,
863 HEM_TYPE_SCCC,
864 hr_dev->caps.sccc_sz,
865 hr_dev->caps.num_qps);
866 if (ret) {
867 dev_err(dev,
868 "failed to init SCC context memory, aborting.\n");
869 goto err_unmap_srq;
870 }
871 }
872
873 if (hr_dev->caps.qpc_timer_entry_sz) {
874 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
875 HEM_TYPE_QPC_TIMER,
876 hr_dev->caps.qpc_timer_entry_sz,
877 hr_dev->caps.qpc_timer_bt_num);
878 if (ret) {
879 dev_err(dev,
880 "failed to init QPC timer memory, aborting.\n");
881 goto err_unmap_ctx;
882 }
883 }
884
885 if (hr_dev->caps.cqc_timer_entry_sz) {
886 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
887 HEM_TYPE_CQC_TIMER,
888 hr_dev->caps.cqc_timer_entry_sz,
889 hr_dev->caps.cqc_timer_bt_num);
890 if (ret) {
891 dev_err(dev,
892 "failed to init CQC timer memory, aborting.\n");
893 goto err_unmap_qpc_timer;
894 }
895 }
896
897 if (hr_dev->caps.gmv_entry_sz) {
898 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
899 HEM_TYPE_GMV,
900 hr_dev->caps.gmv_entry_sz,
901 hr_dev->caps.gmv_entry_num);
902 if (ret) {
903 dev_err(dev,
904 "failed to init gmv table memory, ret = %d\n",
905 ret);
906 goto err_unmap_cqc_timer;
907 }
908 }
909
910 return 0;
911
912err_unmap_cqc_timer:
913 if (hr_dev->caps.cqc_timer_entry_sz)
914 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table);
915
916err_unmap_qpc_timer:
917 if (hr_dev->caps.qpc_timer_entry_sz)
918 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
919
920err_unmap_ctx:
921 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
922 hns_roce_cleanup_hem_table(hr_dev,
923 &hr_dev->qp_table.sccc_table);
924err_unmap_srq:
925 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
926 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
927
928err_unmap_cq:
929 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
930
931err_unmap_trrl:
932 if (hr_dev->caps.trrl_entry_sz)
933 hns_roce_cleanup_hem_table(hr_dev,
934 &hr_dev->qp_table.trrl_table);
935
936err_unmap_irrl:
937 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
938
939err_unmap_qp:
940 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
941
942err_unmap_dmpt:
943 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
944
945 return ret;
946}
947
948static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev)
949{
950 hns_roce_cleanup_bitmap(hr_dev);
951
952 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
953 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
954 mutex_destroy(&hr_dev->pgdir_mutex);
955}
956
957/**
958 * hns_roce_setup_hca - setup host channel adapter
959 * @hr_dev: pointer to hns roce device
960 * Return : int
961 */
962static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
963{
964 struct device *dev = hr_dev->dev;
965 int ret;
966
967 spin_lock_init(&hr_dev->sm_lock);
968
969 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
970 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
971 INIT_LIST_HEAD(&hr_dev->pgdir_list);
972 mutex_init(&hr_dev->pgdir_mutex);
973 }
974
975 hns_roce_init_uar_table(hr_dev);
976
977 ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
978 if (ret) {
979 dev_err(dev, "failed to allocate priv_uar.\n");
980 goto err_uar_table_free;
981 }
982
983 ret = hns_roce_init_qp_table(hr_dev);
984 if (ret) {
985 dev_err(dev, "failed to init qp_table.\n");
986 goto err_uar_table_free;
987 }
988
989 hns_roce_init_pd_table(hr_dev);
990
991 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
992 hns_roce_init_xrcd_table(hr_dev);
993
994 hns_roce_init_mr_table(hr_dev);
995
996 hns_roce_init_cq_table(hr_dev);
997
998 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
999 hns_roce_init_srq_table(hr_dev);
1000
1001 return 0;
1002
1003err_uar_table_free:
1004 ida_destroy(&hr_dev->uar_ida.ida);
1005 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
1006 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
1007 mutex_destroy(&hr_dev->pgdir_mutex);
1008
1009 return ret;
1010}
1011
1012static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
1013{
1014 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
1015 unsigned long flags;
1016
1017 spin_lock_irqsave(&hr_cq->lock, flags);
1018 if (cq->comp_handler) {
1019 if (!hr_cq->is_armed) {
1020 hr_cq->is_armed = 1;
1021 list_add_tail(&hr_cq->node, cq_list);
1022 }
1023 }
1024 spin_unlock_irqrestore(&hr_cq->lock, flags);
1025}
1026
1027void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
1028{
1029 struct hns_roce_qp *hr_qp;
1030 struct hns_roce_cq *hr_cq;
1031 struct list_head cq_list;
1032 unsigned long flags_qp;
1033 unsigned long flags;
1034
1035 INIT_LIST_HEAD(&cq_list);
1036
1037 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
1038 list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
1039 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
1040 if (hr_qp->sq.tail != hr_qp->sq.head)
1041 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
1042 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);
1043
1044 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
1045 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
1046 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
1047 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
1048 }
1049
1050 list_for_each_entry(hr_cq, &cq_list, node)
1051 hns_roce_cq_completion(hr_dev, hr_cq->cqn);
1052
1053 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
1054}
1055
1056static int hns_roce_alloc_dfx_cnt(struct hns_roce_dev *hr_dev)
1057{
1058 hr_dev->dfx_cnt = kvcalloc(HNS_ROCE_DFX_CNT_TOTAL, sizeof(atomic64_t),
1059 GFP_KERNEL);
1060 if (!hr_dev->dfx_cnt)
1061 return -ENOMEM;
1062
1063 return 0;
1064}
1065
1066static void hns_roce_dealloc_dfx_cnt(struct hns_roce_dev *hr_dev)
1067{
1068 kvfree(hr_dev->dfx_cnt);
1069}
1070
1071int hns_roce_init(struct hns_roce_dev *hr_dev)
1072{
1073 struct device *dev = hr_dev->dev;
1074 int ret;
1075
1076 hr_dev->is_reset = false;
1077
1078 ret = hns_roce_alloc_dfx_cnt(hr_dev);
1079 if (ret)
1080 return ret;
1081
1082 if (hr_dev->hw->cmq_init) {
1083 ret = hr_dev->hw->cmq_init(hr_dev);
1084 if (ret) {
1085 dev_err(dev, "init RoCE Command Queue failed!\n");
1086 goto error_failed_alloc_dfx_cnt;
1087 }
1088 }
1089
1090 ret = hr_dev->hw->hw_profile(hr_dev);
1091 if (ret) {
1092 dev_err(dev, "get RoCE engine profile failed!\n");
1093 goto error_failed_cmd_init;
1094 }
1095
1096 ret = hns_roce_cmd_init(hr_dev);
1097 if (ret) {
1098 dev_err(dev, "cmd init failed!\n");
1099 goto error_failed_cmd_init;
1100 }
1101
1102 /* EQ depends on poll mode, event mode depends on EQ */
1103 ret = hr_dev->hw->init_eq(hr_dev);
1104 if (ret) {
1105 dev_err(dev, "eq init failed!\n");
1106 goto error_failed_eq_table;
1107 }
1108
1109 if (hr_dev->cmd_mod) {
1110 ret = hns_roce_cmd_use_events(hr_dev);
1111 if (ret)
1112 dev_warn(dev,
1113 "Cmd event mode failed, set back to poll!\n");
1114 }
1115
1116 ret = hns_roce_init_hem(hr_dev);
1117 if (ret) {
1118 dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
1119 goto error_failed_init_hem;
1120 }
1121
1122 ret = hns_roce_setup_hca(hr_dev);
1123 if (ret) {
1124 dev_err(dev, "setup hca failed!\n");
1125 goto error_failed_setup_hca;
1126 }
1127
1128 if (hr_dev->hw->hw_init) {
1129 ret = hr_dev->hw->hw_init(hr_dev);
1130 if (ret) {
1131 dev_err(dev, "hw_init failed!\n");
1132 goto error_failed_engine_init;
1133 }
1134 }
1135
1136 INIT_LIST_HEAD(&hr_dev->qp_list);
1137 spin_lock_init(&hr_dev->qp_list_lock);
1138
1139 ret = hns_roce_register_device(hr_dev);
1140 if (ret)
1141 goto error_failed_register_device;
1142
1143 hns_roce_register_debugfs(hr_dev);
1144
1145 return 0;
1146
1147error_failed_register_device:
1148 if (hr_dev->hw->hw_exit)
1149 hr_dev->hw->hw_exit(hr_dev);
1150
1151error_failed_engine_init:
1152 hns_roce_teardown_hca(hr_dev);
1153
1154error_failed_setup_hca:
1155 hns_roce_cleanup_hem(hr_dev);
1156
1157error_failed_init_hem:
1158 if (hr_dev->cmd_mod)
1159 hns_roce_cmd_use_polling(hr_dev);
1160 hr_dev->hw->cleanup_eq(hr_dev);
1161
1162error_failed_eq_table:
1163 hns_roce_cmd_cleanup(hr_dev);
1164
1165error_failed_cmd_init:
1166 if (hr_dev->hw->cmq_exit)
1167 hr_dev->hw->cmq_exit(hr_dev);
1168
1169error_failed_alloc_dfx_cnt:
1170 hns_roce_dealloc_dfx_cnt(hr_dev);
1171
1172 return ret;
1173}
1174
1175void hns_roce_exit(struct hns_roce_dev *hr_dev)
1176{
1177 hns_roce_unregister_debugfs(hr_dev);
1178 hns_roce_unregister_device(hr_dev);
1179
1180 if (hr_dev->hw->hw_exit)
1181 hr_dev->hw->hw_exit(hr_dev);
1182 hns_roce_teardown_hca(hr_dev);
1183 hns_roce_cleanup_hem(hr_dev);
1184
1185 if (hr_dev->cmd_mod)
1186 hns_roce_cmd_use_polling(hr_dev);
1187
1188 hr_dev->hw->cleanup_eq(hr_dev);
1189 hns_roce_cmd_cleanup(hr_dev);
1190 if (hr_dev->hw->cmq_exit)
1191 hr_dev->hw->cmq_exit(hr_dev);
1192 hns_roce_dealloc_dfx_cnt(hr_dev);
1193}
1194
1195MODULE_LICENSE("Dual BSD/GPL");
1196MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
1197MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
1198MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
1199MODULE_DESCRIPTION("HNS RoCE Driver");