Loading...
1/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2016-2018 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/pci.h>
16#include <linux/netdevice.h>
17#include <linux/rtnetlink.h>
18#include <linux/bitops.h>
19#include <linux/irq.h>
20#include <asm/byteorder.h>
21#include <linux/bitmap.h>
22
23#include "bnxt_hsi.h"
24#include "bnxt.h"
25#include "bnxt_ulp.h"
26
27static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
28 struct bnxt_ulp_ops *ulp_ops, void *handle)
29{
30 struct net_device *dev = edev->net;
31 struct bnxt *bp = netdev_priv(dev);
32 struct bnxt_ulp *ulp;
33
34 ASSERT_RTNL();
35 if (ulp_id >= BNXT_MAX_ULP)
36 return -EINVAL;
37
38 ulp = &edev->ulp_tbl[ulp_id];
39 if (rcu_access_pointer(ulp->ulp_ops)) {
40 netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
41 return -EBUSY;
42 }
43 if (ulp_id == BNXT_ROCE_ULP) {
44 unsigned int max_stat_ctxs;
45
46 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
47 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
48 bp->num_stat_ctxs == max_stat_ctxs)
49 return -ENOMEM;
50 bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
51 BNXT_MIN_ROCE_STAT_CTXS);
52 }
53
54 atomic_set(&ulp->ref_count, 0);
55 ulp->handle = handle;
56 rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
57
58 if (ulp_id == BNXT_ROCE_ULP) {
59 if (test_bit(BNXT_STATE_OPEN, &bp->state))
60 bnxt_hwrm_vnic_cfg(bp, 0);
61 }
62
63 return 0;
64}
65
66static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
67{
68 struct net_device *dev = edev->net;
69 struct bnxt *bp = netdev_priv(dev);
70 struct bnxt_ulp *ulp;
71 int i = 0;
72
73 ASSERT_RTNL();
74 if (ulp_id >= BNXT_MAX_ULP)
75 return -EINVAL;
76
77 ulp = &edev->ulp_tbl[ulp_id];
78 if (!rcu_access_pointer(ulp->ulp_ops)) {
79 netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
80 return -EINVAL;
81 }
82 if (ulp_id == BNXT_ROCE_ULP) {
83 unsigned int max_stat_ctxs;
84
85 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
86 bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
87 if (ulp->msix_requested)
88 edev->en_ops->bnxt_free_msix(edev, ulp_id);
89 }
90 if (ulp->max_async_event_id)
91 bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
92
93 RCU_INIT_POINTER(ulp->ulp_ops, NULL);
94 synchronize_rcu();
95 ulp->max_async_event_id = 0;
96 ulp->async_events_bmap = NULL;
97 while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
98 msleep(100);
99 i++;
100 }
101 return 0;
102}
103
104static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
105{
106 struct bnxt_en_dev *edev = bp->edev;
107 int num_msix, idx, i;
108
109 num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
110 idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
111 for (i = 0; i < num_msix; i++) {
112 ent[i].vector = bp->irq_tbl[idx + i].vector;
113 ent[i].ring_idx = idx + i;
114 ent[i].db_offset = (idx + i) * 0x80;
115 }
116}
117
118static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
119 struct bnxt_msix_entry *ent, int num_msix)
120{
121 struct net_device *dev = edev->net;
122 struct bnxt *bp = netdev_priv(dev);
123 int max_idx, max_cp_rings;
124 int avail_msix, idx;
125 int rc = 0;
126
127 ASSERT_RTNL();
128 if (ulp_id != BNXT_ROCE_ULP)
129 return -EINVAL;
130
131 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
132 return -ENODEV;
133
134 if (edev->ulp_tbl[ulp_id].msix_requested)
135 return -EAGAIN;
136
137 max_cp_rings = bnxt_get_max_func_cp_rings(bp);
138 avail_msix = bnxt_get_avail_msix(bp, num_msix);
139 if (!avail_msix)
140 return -ENOMEM;
141 if (avail_msix > num_msix)
142 avail_msix = num_msix;
143
144 if (bp->flags & BNXT_FLAG_NEW_RM) {
145 idx = bp->cp_nr_rings;
146 } else {
147 max_idx = min_t(int, bp->total_irqs, max_cp_rings);
148 idx = max_idx - avail_msix;
149 }
150 edev->ulp_tbl[ulp_id].msix_base = idx;
151 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
152 if (bp->total_irqs < (idx + avail_msix)) {
153 if (netif_running(dev)) {
154 bnxt_close_nic(bp, true, false);
155 rc = bnxt_open_nic(bp, true, false);
156 } else {
157 rc = bnxt_reserve_rings(bp);
158 }
159 }
160 if (rc) {
161 edev->ulp_tbl[ulp_id].msix_requested = 0;
162 return -EAGAIN;
163 }
164
165 if (bp->flags & BNXT_FLAG_NEW_RM) {
166 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
167
168 avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
170 }
171 bnxt_fill_msix_vecs(bp, ent);
172 bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
173 bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
174 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
175 return avail_msix;
176}
177
178static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
179{
180 struct net_device *dev = edev->net;
181 struct bnxt *bp = netdev_priv(dev);
182 int max_cp_rings, msix_requested;
183
184 ASSERT_RTNL();
185 if (ulp_id != BNXT_ROCE_ULP)
186 return -EINVAL;
187
188 if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
189 return 0;
190
191 max_cp_rings = bnxt_get_max_func_cp_rings(bp);
192 msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
193 bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
194 edev->ulp_tbl[ulp_id].msix_requested = 0;
195 bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
196 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
197 if (netif_running(dev)) {
198 bnxt_close_nic(bp, true, false);
199 bnxt_open_nic(bp, true, false);
200 }
201 return 0;
202}
203
204int bnxt_get_ulp_msix_num(struct bnxt *bp)
205{
206 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
207 struct bnxt_en_dev *edev = bp->edev;
208
209 return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
210 }
211 return 0;
212}
213
214int bnxt_get_ulp_msix_base(struct bnxt *bp)
215{
216 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
217 struct bnxt_en_dev *edev = bp->edev;
218
219 if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
220 return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
221 }
222 return 0;
223}
224
225void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
226{
227 ASSERT_RTNL();
228 if (bnxt_ulp_registered(bp->edev, ulp_id)) {
229 struct bnxt_en_dev *edev = bp->edev;
230 unsigned int msix_req, max;
231
232 msix_req = edev->ulp_tbl[ulp_id].msix_requested;
233 max = bnxt_get_max_func_cp_rings(bp);
234 bnxt_set_max_func_cp_rings(bp, max - msix_req);
235 max = bnxt_get_max_func_stat_ctxs(bp);
236 bnxt_set_max_func_stat_ctxs(bp, max - 1);
237 }
238}
239
240static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
241 struct bnxt_fw_msg *fw_msg)
242{
243 struct net_device *dev = edev->net;
244 struct bnxt *bp = netdev_priv(dev);
245 struct input *req;
246 int rc;
247
248 mutex_lock(&bp->hwrm_cmd_lock);
249 req = fw_msg->msg;
250 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
251 rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
252 fw_msg->timeout);
253 if (!rc) {
254 struct output *resp = bp->hwrm_cmd_resp_addr;
255 u32 len = le16_to_cpu(resp->resp_len);
256
257 if (fw_msg->resp_max_len < len)
258 len = fw_msg->resp_max_len;
259
260 memcpy(fw_msg->resp, resp, len);
261 }
262 mutex_unlock(&bp->hwrm_cmd_lock);
263 return rc;
264}
265
266static void bnxt_ulp_get(struct bnxt_ulp *ulp)
267{
268 atomic_inc(&ulp->ref_count);
269}
270
271static void bnxt_ulp_put(struct bnxt_ulp *ulp)
272{
273 atomic_dec(&ulp->ref_count);
274}
275
276void bnxt_ulp_stop(struct bnxt *bp)
277{
278 struct bnxt_en_dev *edev = bp->edev;
279 struct bnxt_ulp_ops *ops;
280 int i;
281
282 if (!edev)
283 return;
284
285 for (i = 0; i < BNXT_MAX_ULP; i++) {
286 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
287
288 ops = rtnl_dereference(ulp->ulp_ops);
289 if (!ops || !ops->ulp_stop)
290 continue;
291 ops->ulp_stop(ulp->handle);
292 }
293}
294
295void bnxt_ulp_start(struct bnxt *bp)
296{
297 struct bnxt_en_dev *edev = bp->edev;
298 struct bnxt_ulp_ops *ops;
299 int i;
300
301 if (!edev)
302 return;
303
304 for (i = 0; i < BNXT_MAX_ULP; i++) {
305 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
306
307 ops = rtnl_dereference(ulp->ulp_ops);
308 if (!ops || !ops->ulp_start)
309 continue;
310 ops->ulp_start(ulp->handle);
311 }
312}
313
314void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
315{
316 struct bnxt_en_dev *edev = bp->edev;
317 struct bnxt_ulp_ops *ops;
318 int i;
319
320 if (!edev)
321 return;
322
323 for (i = 0; i < BNXT_MAX_ULP; i++) {
324 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
325
326 rcu_read_lock();
327 ops = rcu_dereference(ulp->ulp_ops);
328 if (!ops || !ops->ulp_sriov_config) {
329 rcu_read_unlock();
330 continue;
331 }
332 bnxt_ulp_get(ulp);
333 rcu_read_unlock();
334 ops->ulp_sriov_config(ulp->handle, num_vfs);
335 bnxt_ulp_put(ulp);
336 }
337}
338
339void bnxt_ulp_shutdown(struct bnxt *bp)
340{
341 struct bnxt_en_dev *edev = bp->edev;
342 struct bnxt_ulp_ops *ops;
343 int i;
344
345 if (!edev)
346 return;
347
348 for (i = 0; i < BNXT_MAX_ULP; i++) {
349 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
350
351 ops = rtnl_dereference(ulp->ulp_ops);
352 if (!ops || !ops->ulp_shutdown)
353 continue;
354 ops->ulp_shutdown(ulp->handle);
355 }
356}
357
358void bnxt_ulp_irq_stop(struct bnxt *bp)
359{
360 struct bnxt_en_dev *edev = bp->edev;
361 struct bnxt_ulp_ops *ops;
362
363 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
364 return;
365
366 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
367 struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
368
369 if (!ulp->msix_requested)
370 return;
371
372 ops = rtnl_dereference(ulp->ulp_ops);
373 if (!ops || !ops->ulp_irq_stop)
374 return;
375 ops->ulp_irq_stop(ulp->handle);
376 }
377}
378
379void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
380{
381 struct bnxt_en_dev *edev = bp->edev;
382 struct bnxt_ulp_ops *ops;
383
384 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
385 return;
386
387 if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
388 struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
389 struct bnxt_msix_entry *ent = NULL;
390
391 if (!ulp->msix_requested)
392 return;
393
394 ops = rtnl_dereference(ulp->ulp_ops);
395 if (!ops || !ops->ulp_irq_restart)
396 return;
397
398 if (!err) {
399 ent = kcalloc(ulp->msix_requested, sizeof(*ent),
400 GFP_KERNEL);
401 if (!ent)
402 return;
403 bnxt_fill_msix_vecs(bp, ent);
404 }
405 ops->ulp_irq_restart(ulp->handle, ent);
406 kfree(ent);
407 }
408}
409
410void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
411{
412 u16 event_id = le16_to_cpu(cmpl->event_id);
413 struct bnxt_en_dev *edev = bp->edev;
414 struct bnxt_ulp_ops *ops;
415 int i;
416
417 if (!edev)
418 return;
419
420 rcu_read_lock();
421 for (i = 0; i < BNXT_MAX_ULP; i++) {
422 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
423
424 ops = rcu_dereference(ulp->ulp_ops);
425 if (!ops || !ops->ulp_async_notifier)
426 continue;
427 if (!ulp->async_events_bmap ||
428 event_id > ulp->max_async_event_id)
429 continue;
430
431 /* Read max_async_event_id first before testing the bitmap. */
432 smp_rmb();
433 if (test_bit(event_id, ulp->async_events_bmap))
434 ops->ulp_async_notifier(ulp->handle, cmpl);
435 }
436 rcu_read_unlock();
437}
438
439static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
440 unsigned long *events_bmap, u16 max_id)
441{
442 struct net_device *dev = edev->net;
443 struct bnxt *bp = netdev_priv(dev);
444 struct bnxt_ulp *ulp;
445
446 if (ulp_id >= BNXT_MAX_ULP)
447 return -EINVAL;
448
449 ulp = &edev->ulp_tbl[ulp_id];
450 ulp->async_events_bmap = events_bmap;
451 /* Make sure bnxt_ulp_async_events() sees this order */
452 smp_wmb();
453 ulp->max_async_event_id = max_id;
454 bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
455 return 0;
456}
457
458static const struct bnxt_en_ops bnxt_en_ops_tbl = {
459 .bnxt_register_device = bnxt_register_dev,
460 .bnxt_unregister_device = bnxt_unregister_dev,
461 .bnxt_request_msix = bnxt_req_msix_vecs,
462 .bnxt_free_msix = bnxt_free_msix_vecs,
463 .bnxt_send_fw_msg = bnxt_send_msg,
464 .bnxt_register_fw_async_events = bnxt_register_async_events,
465};
466
467struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
468{
469 struct bnxt *bp = netdev_priv(dev);
470 struct bnxt_en_dev *edev;
471
472 edev = bp->edev;
473 if (!edev) {
474 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
475 if (!edev)
476 return ERR_PTR(-ENOMEM);
477 edev->en_ops = &bnxt_en_ops_tbl;
478 if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
479 edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
480 if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
481 edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
482 edev->net = dev;
483 edev->pdev = bp->pdev;
484 bp->edev = edev;
485 }
486 return bp->edev;
487}
1/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2016-2018 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/pci.h>
16#include <linux/netdevice.h>
17#include <linux/rtnetlink.h>
18#include <linux/bitops.h>
19#include <linux/irq.h>
20#include <asm/byteorder.h>
21#include <linux/bitmap.h>
22#include <linux/auxiliary_bus.h>
23
24#include "bnxt_hsi.h"
25#include "bnxt.h"
26#include "bnxt_hwrm.h"
27#include "bnxt_ulp.h"
28
29static DEFINE_IDA(bnxt_aux_dev_ids);
30
31static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
32{
33 struct bnxt_en_dev *edev = bp->edev;
34 int num_msix, i;
35
36 if (!edev->ulp_tbl->msix_requested) {
37 netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
38 return;
39 }
40 num_msix = edev->ulp_tbl->msix_requested;
41 for (i = 0; i < num_msix; i++) {
42 ent[i].vector = bp->irq_tbl[i].vector;
43 ent[i].ring_idx = i;
44 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
45 ent[i].db_offset = bp->db_offset;
46 else
47 ent[i].db_offset = i * 0x80;
48 }
49}
50
51int bnxt_get_ulp_msix_num(struct bnxt *bp)
52{
53 if (bp->edev)
54 return bp->edev->ulp_num_msix_vec;
55 return 0;
56}
57
58void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
59{
60 if (bp->edev)
61 bp->edev->ulp_num_msix_vec = num;
62}
63
64int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
65{
66 if (bnxt_ulp_registered(bp->edev))
67 return bp->edev->ulp_num_msix_vec;
68 return 0;
69}
70
71int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
72{
73 if (bp->edev)
74 return bp->edev->ulp_num_ctxs;
75 return 0;
76}
77
78void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
79{
80 if (bp->edev)
81 bp->edev->ulp_num_ctxs = num_ulp_ctx;
82}
83
84int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
85{
86 if (bnxt_ulp_registered(bp->edev))
87 return bp->edev->ulp_num_ctxs;
88 return 0;
89}
90
91void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
92{
93 if (bp->edev) {
94 bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
95 /* Reserve one additional stat_ctx for PF0 (except
96 * on 1-port NICs) as it also creates one stat_ctx
97 * for PF1 in case of RoCE bonding.
98 */
99 if (BNXT_PF(bp) && !bp->pf.port_id &&
100 bp->port_count > 1)
101 bp->edev->ulp_num_ctxs++;
102 }
103}
104
105int bnxt_register_dev(struct bnxt_en_dev *edev,
106 struct bnxt_ulp_ops *ulp_ops,
107 void *handle)
108{
109 struct net_device *dev = edev->net;
110 struct bnxt *bp = netdev_priv(dev);
111 unsigned int max_stat_ctxs;
112 struct bnxt_ulp *ulp;
113 int rc = 0;
114
115 rtnl_lock();
116 mutex_lock(&edev->en_dev_lock);
117 if (!bp->irq_tbl) {
118 rc = -ENODEV;
119 goto exit;
120 }
121 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
122 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
123 bp->cp_nr_rings == max_stat_ctxs) {
124 rc = -ENOMEM;
125 goto exit;
126 }
127
128 ulp = edev->ulp_tbl;
129 ulp->handle = handle;
130 rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
131
132 if (test_bit(BNXT_STATE_OPEN, &bp->state))
133 bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
134
135 edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
136
137 bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
138 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
139exit:
140 mutex_unlock(&edev->en_dev_lock);
141 rtnl_unlock();
142 return rc;
143}
144EXPORT_SYMBOL(bnxt_register_dev);
145
146void bnxt_unregister_dev(struct bnxt_en_dev *edev)
147{
148 struct net_device *dev = edev->net;
149 struct bnxt *bp = netdev_priv(dev);
150 struct bnxt_ulp *ulp;
151 int i = 0;
152
153 ulp = edev->ulp_tbl;
154 rtnl_lock();
155 mutex_lock(&edev->en_dev_lock);
156 if (ulp->msix_requested)
157 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
158 edev->ulp_tbl->msix_requested = 0;
159
160 if (ulp->max_async_event_id)
161 bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
162
163 RCU_INIT_POINTER(ulp->ulp_ops, NULL);
164 synchronize_rcu();
165 ulp->max_async_event_id = 0;
166 ulp->async_events_bmap = NULL;
167 while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
168 msleep(100);
169 i++;
170 }
171 mutex_unlock(&edev->en_dev_lock);
172 rtnl_unlock();
173 return;
174}
175EXPORT_SYMBOL(bnxt_unregister_dev);
176
177static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
178{
179 int roce_msix = BNXT_MAX_ROCE_MSIX;
180
181 if (BNXT_VF(bp))
182 roce_msix = BNXT_MAX_ROCE_MSIX_VF;
183 else if (bp->port_partition_type)
184 roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
185
186 /* NQ MSIX vectors should match the number of CPUs plus 1 more for
187 * the CREQ MSIX, up to the default.
188 */
189 return min_t(int, roce_msix, num_online_cpus() + 1);
190}
191
192int bnxt_send_msg(struct bnxt_en_dev *edev,
193 struct bnxt_fw_msg *fw_msg)
194{
195 struct net_device *dev = edev->net;
196 struct bnxt *bp = netdev_priv(dev);
197 struct output *resp;
198 struct input *req;
199 u32 resp_len;
200 int rc;
201
202 if (bp->fw_reset_state)
203 return -EBUSY;
204
205 rc = hwrm_req_init(bp, req, 0 /* don't care */);
206 if (rc)
207 return rc;
208
209 rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
210 if (rc)
211 goto drop_req;
212
213 hwrm_req_timeout(bp, req, fw_msg->timeout);
214 resp = hwrm_req_hold(bp, req);
215 rc = hwrm_req_send(bp, req);
216 resp_len = le16_to_cpu(resp->resp_len);
217 if (resp_len) {
218 if (fw_msg->resp_max_len < resp_len)
219 resp_len = fw_msg->resp_max_len;
220
221 memcpy(fw_msg->resp, resp, resp_len);
222 }
223drop_req:
224 hwrm_req_drop(bp, req);
225 return rc;
226}
227EXPORT_SYMBOL(bnxt_send_msg);
228
229void bnxt_ulp_stop(struct bnxt *bp)
230{
231 struct bnxt_aux_priv *aux_priv = bp->aux_priv;
232 struct bnxt_en_dev *edev = bp->edev;
233
234 if (!edev)
235 return;
236
237 mutex_lock(&edev->en_dev_lock);
238 if (!bnxt_ulp_registered(edev)) {
239 mutex_unlock(&edev->en_dev_lock);
240 return;
241 }
242
243 edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
244 if (aux_priv) {
245 struct auxiliary_device *adev;
246
247 adev = &aux_priv->aux_dev;
248 if (adev->dev.driver) {
249 const struct auxiliary_driver *adrv;
250 pm_message_t pm = {};
251
252 adrv = to_auxiliary_drv(adev->dev.driver);
253 edev->en_state = bp->state;
254 adrv->suspend(adev, pm);
255 }
256 }
257 mutex_unlock(&edev->en_dev_lock);
258}
259
260void bnxt_ulp_start(struct bnxt *bp, int err)
261{
262 struct bnxt_aux_priv *aux_priv = bp->aux_priv;
263 struct bnxt_en_dev *edev = bp->edev;
264
265 if (!edev)
266 return;
267
268 edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
269
270 if (err)
271 return;
272
273 mutex_lock(&edev->en_dev_lock);
274 if (!bnxt_ulp_registered(edev)) {
275 mutex_unlock(&edev->en_dev_lock);
276 return;
277 }
278
279 if (edev->ulp_tbl->msix_requested)
280 bnxt_fill_msix_vecs(bp, edev->msix_entries);
281
282 if (aux_priv) {
283 struct auxiliary_device *adev;
284
285 adev = &aux_priv->aux_dev;
286 if (adev->dev.driver) {
287 const struct auxiliary_driver *adrv;
288
289 adrv = to_auxiliary_drv(adev->dev.driver);
290 edev->en_state = bp->state;
291 adrv->resume(adev);
292 }
293 }
294 mutex_unlock(&edev->en_dev_lock);
295}
296
297void bnxt_ulp_irq_stop(struct bnxt *bp)
298{
299 struct bnxt_en_dev *edev = bp->edev;
300 struct bnxt_ulp_ops *ops;
301
302 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
303 return;
304
305 if (bnxt_ulp_registered(bp->edev)) {
306 struct bnxt_ulp *ulp = edev->ulp_tbl;
307
308 if (!ulp->msix_requested)
309 return;
310
311 ops = rtnl_dereference(ulp->ulp_ops);
312 if (!ops || !ops->ulp_irq_stop)
313 return;
314 ops->ulp_irq_stop(ulp->handle);
315 }
316}
317
318void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
319{
320 struct bnxt_en_dev *edev = bp->edev;
321 struct bnxt_ulp_ops *ops;
322
323 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
324 return;
325
326 if (bnxt_ulp_registered(bp->edev)) {
327 struct bnxt_ulp *ulp = edev->ulp_tbl;
328 struct bnxt_msix_entry *ent = NULL;
329
330 if (!ulp->msix_requested)
331 return;
332
333 ops = rtnl_dereference(ulp->ulp_ops);
334 if (!ops || !ops->ulp_irq_restart)
335 return;
336
337 if (!err) {
338 ent = kcalloc(ulp->msix_requested, sizeof(*ent),
339 GFP_KERNEL);
340 if (!ent)
341 return;
342 bnxt_fill_msix_vecs(bp, ent);
343 }
344 ops->ulp_irq_restart(ulp->handle, ent);
345 kfree(ent);
346 }
347}
348
349int bnxt_register_async_events(struct bnxt_en_dev *edev,
350 unsigned long *events_bmap,
351 u16 max_id)
352{
353 struct net_device *dev = edev->net;
354 struct bnxt *bp = netdev_priv(dev);
355 struct bnxt_ulp *ulp;
356
357 ulp = edev->ulp_tbl;
358 ulp->async_events_bmap = events_bmap;
359 /* Make sure bnxt_ulp_async_events() sees this order */
360 smp_wmb();
361 ulp->max_async_event_id = max_id;
362 bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
363 return 0;
364}
365EXPORT_SYMBOL(bnxt_register_async_events);
366
367void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
368{
369 struct bnxt_aux_priv *aux_priv;
370 struct auxiliary_device *adev;
371
372 /* Skip if no auxiliary device init was done. */
373 if (!bp->aux_priv)
374 return;
375
376 aux_priv = bp->aux_priv;
377 adev = &aux_priv->aux_dev;
378 auxiliary_device_uninit(adev);
379}
380
381static void bnxt_aux_dev_release(struct device *dev)
382{
383 struct bnxt_aux_priv *aux_priv =
384 container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
385 struct bnxt *bp = netdev_priv(aux_priv->edev->net);
386
387 ida_free(&bnxt_aux_dev_ids, aux_priv->id);
388 kfree(aux_priv->edev->ulp_tbl);
389 bp->edev = NULL;
390 kfree(aux_priv->edev);
391 kfree(aux_priv);
392 bp->aux_priv = NULL;
393}
394
395void bnxt_rdma_aux_device_del(struct bnxt *bp)
396{
397 if (!bp->edev)
398 return;
399
400 auxiliary_device_delete(&bp->aux_priv->aux_dev);
401}
402
403static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
404{
405 edev->net = bp->dev;
406 edev->pdev = bp->pdev;
407 edev->l2_db_size = bp->db_size;
408 edev->l2_db_size_nc = bp->db_size;
409 edev->l2_db_offset = bp->db_offset;
410 mutex_init(&edev->en_dev_lock);
411
412 if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
413 edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
414 if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
415 edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
416 if (bp->flags & BNXT_FLAG_VF)
417 edev->flags |= BNXT_EN_FLAG_VF;
418 if (BNXT_ROCE_VF_RESC_CAP(bp))
419 edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
420
421 edev->chip_num = bp->chip_num;
422 edev->hw_ring_stats_size = bp->hw_ring_stats_size;
423 edev->pf_port_id = bp->pf.port_id;
424 edev->en_state = bp->state;
425 edev->bar0 = bp->bar0;
426}
427
428void bnxt_rdma_aux_device_add(struct bnxt *bp)
429{
430 struct auxiliary_device *aux_dev;
431 int rc;
432
433 if (!bp->edev)
434 return;
435
436 aux_dev = &bp->aux_priv->aux_dev;
437 rc = auxiliary_device_add(aux_dev);
438 if (rc) {
439 netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
440 auxiliary_device_uninit(aux_dev);
441 bp->flags &= ~BNXT_FLAG_ROCE_CAP;
442 }
443}
444
445void bnxt_rdma_aux_device_init(struct bnxt *bp)
446{
447 struct auxiliary_device *aux_dev;
448 struct bnxt_aux_priv *aux_priv;
449 struct bnxt_en_dev *edev;
450 struct bnxt_ulp *ulp;
451 int rc;
452
453 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
454 return;
455
456 aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
457 if (!aux_priv)
458 goto exit;
459
460 aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
461 if (aux_priv->id < 0) {
462 netdev_warn(bp->dev,
463 "ida alloc failed for ROCE auxiliary device\n");
464 kfree(aux_priv);
465 goto exit;
466 }
467
468 aux_dev = &aux_priv->aux_dev;
469 aux_dev->id = aux_priv->id;
470 aux_dev->name = "rdma";
471 aux_dev->dev.parent = &bp->pdev->dev;
472 aux_dev->dev.release = bnxt_aux_dev_release;
473
474 rc = auxiliary_device_init(aux_dev);
475 if (rc) {
476 ida_free(&bnxt_aux_dev_ids, aux_priv->id);
477 kfree(aux_priv);
478 goto exit;
479 }
480 bp->aux_priv = aux_priv;
481
482 /* From this point, all cleanup will happen via the .release callback &
483 * any error unwinding will need to include a call to
484 * auxiliary_device_uninit.
485 */
486 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
487 if (!edev)
488 goto aux_dev_uninit;
489
490 aux_priv->edev = edev;
491
492 ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
493 if (!ulp)
494 goto aux_dev_uninit;
495
496 edev->ulp_tbl = ulp;
497 bp->edev = edev;
498 bnxt_set_edev_info(edev, bp);
499 bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
500
501 return;
502
503aux_dev_uninit:
504 auxiliary_device_uninit(aux_dev);
505exit:
506 bp->flags &= ~BNXT_FLAG_ROCE_CAP;
507}