Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019, Intel Corporation. */
3
4#include <net/xdp_sock_drv.h>
5#include "ice_base.h"
6#include "ice_lib.h"
7#include "ice_dcb_lib.h"
8
9/**
10 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
11 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
12 *
13 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
14 */
15static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
16{
17 unsigned int offset, i;
18
19 mutex_lock(qs_cfg->qs_mutex);
20 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
21 0, qs_cfg->q_count, 0);
22 if (offset >= qs_cfg->pf_map_size) {
23 mutex_unlock(qs_cfg->qs_mutex);
24 return -ENOMEM;
25 }
26
27 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
28 for (i = 0; i < qs_cfg->q_count; i++)
29 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
30 mutex_unlock(qs_cfg->qs_mutex);
31
32 return 0;
33}
34
35/**
36 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
37 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
38 *
39 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
40 */
41static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
42{
43 unsigned int i, index = 0;
44
45 mutex_lock(qs_cfg->qs_mutex);
46 for (i = 0; i < qs_cfg->q_count; i++) {
47 index = find_next_zero_bit(qs_cfg->pf_map,
48 qs_cfg->pf_map_size, index);
49 if (index >= qs_cfg->pf_map_size)
50 goto err_scatter;
51 set_bit(index, qs_cfg->pf_map);
52 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
53 }
54 mutex_unlock(qs_cfg->qs_mutex);
55
56 return 0;
57err_scatter:
58 for (index = 0; index < i; index++) {
59 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
60 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
61 }
62 mutex_unlock(qs_cfg->qs_mutex);
63
64 return -ENOMEM;
65}
66
67/**
68 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
69 * @pf: the PF being configured
70 * @pf_q: the PF queue
71 * @ena: enable or disable state of the queue
72 *
73 * This routine will wait for the given Rx queue of the PF to reach the
74 * enabled or disabled state.
75 * Returns -ETIMEDOUT in case of failing to reach the requested state after
76 * multiple retries; else will return 0 in case of success.
77 */
78static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
79{
80 int i;
81
82 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
83 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
84 QRX_CTRL_QENA_STAT_M))
85 return 0;
86
87 usleep_range(20, 40);
88 }
89
90 return -ETIMEDOUT;
91}
92
93/**
94 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
95 * @vsi: the VSI being configured
96 * @v_idx: index of the vector in the VSI struct
97 *
98 * We allocate one q_vector and set default value for ITR setting associated
99 * with this q_vector. If allocation fails we return -ENOMEM.
100 */
101static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
102{
103 struct ice_pf *pf = vsi->back;
104 struct ice_q_vector *q_vector;
105
106 /* allocate q_vector */
107 q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
108 GFP_KERNEL);
109 if (!q_vector)
110 return -ENOMEM;
111
112 q_vector->vsi = vsi;
113 q_vector->v_idx = v_idx;
114 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
115 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
116 q_vector->tx.itr_mode = ITR_DYNAMIC;
117 q_vector->rx.itr_mode = ITR_DYNAMIC;
118
119 if (vsi->type == ICE_VSI_VF)
120 goto out;
121 /* only set affinity_mask if the CPU is online */
122 if (cpu_online(v_idx))
123 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
124
125 /* This will not be called in the driver load path because the netdev
126 * will not be created yet. All other cases with register the NAPI
127 * handler here (i.e. resume, reset/rebuild, etc.)
128 */
129 if (vsi->netdev)
130 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
131 NAPI_POLL_WEIGHT);
132
133out:
134 /* tie q_vector and VSI together */
135 vsi->q_vectors[v_idx] = q_vector;
136
137 return 0;
138}
139
140/**
141 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
142 * @vsi: VSI having the memory freed
143 * @v_idx: index of the vector to be freed
144 */
145static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
146{
147 struct ice_q_vector *q_vector;
148 struct ice_pf *pf = vsi->back;
149 struct ice_ring *ring;
150 struct device *dev;
151
152 dev = ice_pf_to_dev(pf);
153 if (!vsi->q_vectors[v_idx]) {
154 dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
155 return;
156 }
157 q_vector = vsi->q_vectors[v_idx];
158
159 ice_for_each_ring(ring, q_vector->tx)
160 ring->q_vector = NULL;
161 ice_for_each_ring(ring, q_vector->rx)
162 ring->q_vector = NULL;
163
164 /* only VSI with an associated netdev is set up with NAPI */
165 if (vsi->netdev)
166 netif_napi_del(&q_vector->napi);
167
168 devm_kfree(dev, q_vector);
169 vsi->q_vectors[v_idx] = NULL;
170}
171
172/**
173 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
174 * @hw: board specific structure
175 */
176static void ice_cfg_itr_gran(struct ice_hw *hw)
177{
178 u32 regval = rd32(hw, GLINT_CTL);
179
180 /* no need to update global register if ITR gran is already set */
181 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
182 (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
183 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
184 (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
185 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
186 (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
187 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
188 (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
189 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
190 return;
191
192 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
193 GLINT_CTL_ITR_GRAN_200_M) |
194 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
195 GLINT_CTL_ITR_GRAN_100_M) |
196 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
197 GLINT_CTL_ITR_GRAN_50_M) |
198 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
199 GLINT_CTL_ITR_GRAN_25_M);
200 wr32(hw, GLINT_CTL, regval);
201}
202
203/**
204 * ice_calc_q_handle - calculate the queue handle
205 * @vsi: VSI that ring belongs to
206 * @ring: ring to get the absolute queue index
207 * @tc: traffic class number
208 */
209static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
210{
211 WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
212
213 /* Idea here for calculation is that we subtract the number of queue
214 * count from TC that ring belongs to from it's absolute queue index
215 * and as a result we get the queue's index within TC.
216 */
217 return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
218}
219
220/**
221 * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
222 * @ring: The Tx ring to configure
223 *
224 * This enables/disables XPS for a given Tx descriptor ring
225 * based on the TCs enabled for the VSI that ring belongs to.
226 */
227static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
228{
229 if (!ring->q_vector || !ring->netdev)
230 return;
231
232 /* We only initialize XPS once, so as not to overwrite user settings */
233 if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
234 return;
235
236 netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
237 ring->q_index);
238}
239
240/**
241 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
242 * @ring: The Tx ring to configure
243 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
244 * @pf_q: queue index in the PF space
245 *
246 * Configure the Tx descriptor ring in TLAN context.
247 */
248static void
249ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
250{
251 struct ice_vsi *vsi = ring->vsi;
252 struct ice_hw *hw = &vsi->back->hw;
253
254 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
255
256 tlan_ctx->port_num = vsi->port_info->lport;
257
258 /* Transmit Queue Length */
259 tlan_ctx->qlen = ring->count;
260
261 ice_set_cgd_num(tlan_ctx, ring);
262
263 /* PF number */
264 tlan_ctx->pf_num = hw->pf_id;
265
266 /* queue belongs to a specific VSI type
267 * VF / VM index should be programmed per vmvf_type setting:
268 * for vmvf_type = VF, it is VF number between 0-256
269 * for vmvf_type = VM, it is VM number between 0-767
270 * for PF or EMP this field should be set to zero
271 */
272 switch (vsi->type) {
273 case ICE_VSI_LB:
274 case ICE_VSI_CTRL:
275 case ICE_VSI_PF:
276 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
277 break;
278 case ICE_VSI_VF:
279 /* Firmware expects vmvf_num to be absolute VF ID */
280 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
281 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
282 break;
283 default:
284 return;
285 }
286
287 /* make sure the context is associated with the right VSI */
288 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
289
290 /* Restrict Tx timestamps to the PF VSI */
291 switch (vsi->type) {
292 case ICE_VSI_PF:
293 tlan_ctx->tsyn_ena = 1;
294 break;
295 default:
296 break;
297 }
298
299 tlan_ctx->tso_ena = ICE_TX_LEGACY;
300 tlan_ctx->tso_qnum = pf_q;
301
302 /* Legacy or Advanced Host Interface:
303 * 0: Advanced Host Interface
304 * 1: Legacy Host Interface
305 */
306 tlan_ctx->legacy_int = ICE_TX_LEGACY;
307}
308
309/**
310 * ice_rx_offset - Return expected offset into page to access data
311 * @rx_ring: Ring we are requesting offset of
312 *
313 * Returns the offset value for ring into the data buffer.
314 */
315static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
316{
317 if (ice_ring_uses_build_skb(rx_ring))
318 return ICE_SKB_PAD;
319 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
320 return XDP_PACKET_HEADROOM;
321
322 return 0;
323}
324
325/**
326 * ice_setup_rx_ctx - Configure a receive ring context
327 * @ring: The Rx ring to configure
328 *
329 * Configure the Rx descriptor ring in RLAN context.
330 */
331static int ice_setup_rx_ctx(struct ice_ring *ring)
332{
333 int chain_len = ICE_MAX_CHAINED_RX_BUFS;
334 struct ice_vsi *vsi = ring->vsi;
335 u32 rxdid = ICE_RXDID_FLEX_NIC;
336 struct ice_rlan_ctx rlan_ctx;
337 struct ice_hw *hw;
338 u16 pf_q;
339 int err;
340
341 hw = &vsi->back->hw;
342
343 /* what is Rx queue number in global space of 2K Rx queues */
344 pf_q = vsi->rxq_map[ring->q_index];
345
346 /* clear the context structure first */
347 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
348
349 /* Receive Queue Base Address.
350 * Indicates the starting address of the descriptor queue defined in
351 * 128 Byte units.
352 */
353 rlan_ctx.base = ring->dma >> 7;
354
355 rlan_ctx.qlen = ring->count;
356
357 /* Receive Packet Data Buffer Size.
358 * The Packet Data Buffer Size is defined in 128 byte units.
359 */
360 rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
361
362 /* use 32 byte descriptors */
363 rlan_ctx.dsize = 1;
364
365 /* Strip the Ethernet CRC bytes before the packet is posted to host
366 * memory.
367 */
368 rlan_ctx.crcstrip = 1;
369
370 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
371 rlan_ctx.l2tsel = 1;
372
373 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
374 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
375 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
376
377 /* This controls whether VLAN is stripped from inner headers
378 * The VLAN in the inner L2 header is stripped to the receive
379 * descriptor if enabled by this flag.
380 */
381 rlan_ctx.showiv = 0;
382
383 /* For AF_XDP ZC, we disallow packets to span on
384 * multiple buffers, thus letting us skip that
385 * handling in the fast-path.
386 */
387 if (ring->xsk_pool)
388 chain_len = 1;
389 /* Max packet size for this queue - must not be set to a larger value
390 * than 5 x DBUF
391 */
392 rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
393 chain_len * ring->rx_buf_len);
394
395 /* Rx queue threshold in units of 64 */
396 rlan_ctx.lrxqthresh = 1;
397
398 /* Enable Flexible Descriptors in the queue context which
399 * allows this driver to select a specific receive descriptor format
400 * increasing context priority to pick up profile ID; default is 0x01;
401 * setting to 0x03 to ensure profile is programming if prev context is
402 * of same priority
403 */
404 if (vsi->type != ICE_VSI_VF)
405 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
406 else
407 ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
408 false);
409
410 /* Absolute queue number out of 2K needs to be passed */
411 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
412 if (err) {
413 dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
414 pf_q, err);
415 return -EIO;
416 }
417
418 if (vsi->type == ICE_VSI_VF)
419 return 0;
420
421 /* configure Rx buffer alignment */
422 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
423 ice_clear_ring_build_skb_ena(ring);
424 else
425 ice_set_ring_build_skb_ena(ring);
426
427 ring->rx_offset = ice_rx_offset(ring);
428
429 /* init queue specific tail register */
430 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
431 writel(0, ring->tail);
432
433 return 0;
434}
435
436/**
437 * ice_vsi_cfg_rxq - Configure an Rx queue
438 * @ring: the ring being configured
439 *
440 * Return 0 on success and a negative value on error.
441 */
442int ice_vsi_cfg_rxq(struct ice_ring *ring)
443{
444 struct device *dev = ice_pf_to_dev(ring->vsi->back);
445 u16 num_bufs = ICE_DESC_UNUSED(ring);
446 int err;
447
448 ring->rx_buf_len = ring->vsi->rx_buf_len;
449
450 if (ring->vsi->type == ICE_VSI_PF) {
451 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
452 /* coverity[check_return] */
453 xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
454 ring->q_index, ring->q_vector->napi.napi_id);
455
456 ring->xsk_pool = ice_xsk_pool(ring);
457 if (ring->xsk_pool) {
458 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
459
460 ring->rx_buf_len =
461 xsk_pool_get_rx_frame_size(ring->xsk_pool);
462 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
463 MEM_TYPE_XSK_BUFF_POOL,
464 NULL);
465 if (err)
466 return err;
467 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
468
469 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
470 ring->q_index);
471 } else {
472 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
473 /* coverity[check_return] */
474 xdp_rxq_info_reg(&ring->xdp_rxq,
475 ring->netdev,
476 ring->q_index, ring->q_vector->napi.napi_id);
477
478 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
479 MEM_TYPE_PAGE_SHARED,
480 NULL);
481 if (err)
482 return err;
483 }
484 }
485
486 err = ice_setup_rx_ctx(ring);
487 if (err) {
488 dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
489 ring->q_index, err);
490 return err;
491 }
492
493 if (ring->xsk_pool) {
494 bool ok;
495
496 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
497 dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
498 num_bufs, ring->q_index);
499 dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
500
501 return 0;
502 }
503
504 ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
505 if (!ok) {
506 u16 pf_q = ring->vsi->rxq_map[ring->q_index];
507
508 dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
509 ring->q_index, pf_q);
510 }
511
512 return 0;
513 }
514
515 ice_alloc_rx_bufs(ring, num_bufs);
516
517 return 0;
518}
519
520/**
521 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
522 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
523 *
524 * This function first tries to find contiguous space. If it is not successful,
525 * it tries with the scatter approach.
526 *
527 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
528 */
529int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
530{
531 int ret = 0;
532
533 ret = __ice_vsi_get_qs_contig(qs_cfg);
534 if (ret) {
535 /* contig failed, so try with scatter approach */
536 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
537 qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
538 qs_cfg->scatter_count);
539 ret = __ice_vsi_get_qs_sc(qs_cfg);
540 }
541 return ret;
542}
543
544/**
545 * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
546 * @vsi: the VSI being configured
547 * @ena: start or stop the Rx ring
548 * @rxq_idx: 0-based Rx queue index for the VSI passed in
549 * @wait: wait or don't wait for configuration to finish in hardware
550 *
551 * Return 0 on success and negative on error.
552 */
553int
554ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
555{
556 int pf_q = vsi->rxq_map[rxq_idx];
557 struct ice_pf *pf = vsi->back;
558 struct ice_hw *hw = &pf->hw;
559 u32 rx_reg;
560
561 rx_reg = rd32(hw, QRX_CTRL(pf_q));
562
563 /* Skip if the queue is already in the requested state */
564 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
565 return 0;
566
567 /* turn on/off the queue */
568 if (ena)
569 rx_reg |= QRX_CTRL_QENA_REQ_M;
570 else
571 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
572 wr32(hw, QRX_CTRL(pf_q), rx_reg);
573
574 if (!wait)
575 return 0;
576
577 ice_flush(hw);
578 return ice_pf_rxq_wait(pf, pf_q, ena);
579}
580
581/**
582 * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
583 * @vsi: the VSI being configured
584 * @ena: true/false to verify Rx ring has been enabled/disabled respectively
585 * @rxq_idx: 0-based Rx queue index for the VSI passed in
586 *
587 * This routine will wait for the given Rx queue of the VSI to reach the
588 * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
589 * the requested state after multiple retries; else will return 0 in case of
590 * success.
591 */
592int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
593{
594 int pf_q = vsi->rxq_map[rxq_idx];
595 struct ice_pf *pf = vsi->back;
596
597 return ice_pf_rxq_wait(pf, pf_q, ena);
598}
599
600/**
601 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
602 * @vsi: the VSI being configured
603 *
604 * We allocate one q_vector per queue interrupt. If allocation fails we
605 * return -ENOMEM.
606 */
607int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
608{
609 struct device *dev = ice_pf_to_dev(vsi->back);
610 u16 v_idx;
611 int err;
612
613 if (vsi->q_vectors[0]) {
614 dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
615 return -EEXIST;
616 }
617
618 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
619 err = ice_vsi_alloc_q_vector(vsi, v_idx);
620 if (err)
621 goto err_out;
622 }
623
624 return 0;
625
626err_out:
627 while (v_idx--)
628 ice_free_q_vector(vsi, v_idx);
629
630 dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
631 vsi->num_q_vectors, vsi->vsi_num, err);
632 vsi->num_q_vectors = 0;
633 return err;
634}
635
636/**
637 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
638 * @vsi: the VSI being configured
639 *
640 * This function maps descriptor rings to the queue-specific vectors allotted
641 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
642 * and Rx rings to the vector as "efficiently" as possible.
643 */
644void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
645{
646 int q_vectors = vsi->num_q_vectors;
647 u16 tx_rings_rem, rx_rings_rem;
648 int v_id;
649
650 /* initially assigning remaining rings count to VSIs num queue value */
651 tx_rings_rem = vsi->num_txq;
652 rx_rings_rem = vsi->num_rxq;
653
654 for (v_id = 0; v_id < q_vectors; v_id++) {
655 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
656 u8 tx_rings_per_v, rx_rings_per_v;
657 u16 q_id, q_base;
658
659 /* Tx rings mapping to vector */
660 tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
661 q_vectors - v_id);
662 q_vector->num_ring_tx = tx_rings_per_v;
663 q_vector->tx.ring = NULL;
664 q_vector->tx.itr_idx = ICE_TX_ITR;
665 q_base = vsi->num_txq - tx_rings_rem;
666
667 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
668 struct ice_ring *tx_ring = vsi->tx_rings[q_id];
669
670 tx_ring->q_vector = q_vector;
671 tx_ring->next = q_vector->tx.ring;
672 q_vector->tx.ring = tx_ring;
673 }
674 tx_rings_rem -= tx_rings_per_v;
675
676 /* Rx rings mapping to vector */
677 rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
678 q_vectors - v_id);
679 q_vector->num_ring_rx = rx_rings_per_v;
680 q_vector->rx.ring = NULL;
681 q_vector->rx.itr_idx = ICE_RX_ITR;
682 q_base = vsi->num_rxq - rx_rings_rem;
683
684 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
685 struct ice_ring *rx_ring = vsi->rx_rings[q_id];
686
687 rx_ring->q_vector = q_vector;
688 rx_ring->next = q_vector->rx.ring;
689 q_vector->rx.ring = rx_ring;
690 }
691 rx_rings_rem -= rx_rings_per_v;
692 }
693}
694
695/**
696 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
697 * @vsi: the VSI having memory freed
698 */
699void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
700{
701 int v_idx;
702
703 ice_for_each_q_vector(vsi, v_idx)
704 ice_free_q_vector(vsi, v_idx);
705}
706
707/**
708 * ice_vsi_cfg_txq - Configure single Tx queue
709 * @vsi: the VSI that queue belongs to
710 * @ring: Tx ring to be configured
711 * @qg_buf: queue group buffer
712 */
713int
714ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
715 struct ice_aqc_add_tx_qgrp *qg_buf)
716{
717 u8 buf_len = struct_size(qg_buf, txqs, 1);
718 struct ice_tlan_ctx tlan_ctx = { 0 };
719 struct ice_aqc_add_txqs_perq *txq;
720 struct ice_pf *pf = vsi->back;
721 struct ice_hw *hw = &pf->hw;
722 enum ice_status status;
723 u16 pf_q;
724 u8 tc;
725
726 /* Configure XPS */
727 ice_cfg_xps_tx_ring(ring);
728
729 pf_q = ring->reg_idx;
730 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
731 /* copy context contents into the qg_buf */
732 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
733 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
734 ice_tlan_ctx_info);
735
736 /* init queue specific tail reg. It is referred as
737 * transmit comm scheduler queue doorbell.
738 */
739 ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
740
741 if (IS_ENABLED(CONFIG_DCB))
742 tc = ring->dcb_tc;
743 else
744 tc = 0;
745
746 /* Add unique software queue handle of the Tx queue per
747 * TC into the VSI Tx ring
748 */
749 ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
750
751 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
752 1, qg_buf, buf_len, NULL);
753 if (status) {
754 dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
755 ice_stat_str(status));
756 return -ENODEV;
757 }
758
759 /* Add Tx Queue TEID into the VSI Tx ring from the
760 * response. This will complete configuring and
761 * enabling the queue.
762 */
763 txq = &qg_buf->txqs[0];
764 if (pf_q == le16_to_cpu(txq->txq_id))
765 ring->txq_teid = le32_to_cpu(txq->q_teid);
766
767 return 0;
768}
769
770/**
771 * ice_cfg_itr - configure the initial interrupt throttle values
772 * @hw: pointer to the HW structure
773 * @q_vector: interrupt vector that's being configured
774 *
775 * Configure interrupt throttling values for the ring containers that are
776 * associated with the interrupt vector passed in.
777 */
778void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
779{
780 ice_cfg_itr_gran(hw);
781
782 if (q_vector->num_ring_rx)
783 ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
784
785 if (q_vector->num_ring_tx)
786 ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
787
788 ice_write_intrl(q_vector, q_vector->intrl);
789}
790
791/**
792 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
793 * @vsi: the VSI being configured
794 * @txq: Tx queue being mapped to MSI-X vector
795 * @msix_idx: MSI-X vector index within the function
796 * @itr_idx: ITR index of the interrupt cause
797 *
798 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
799 * within the function space.
800 */
801void
802ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
803{
804 struct ice_pf *pf = vsi->back;
805 struct ice_hw *hw = &pf->hw;
806 u32 val;
807
808 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
809
810 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
811 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
812
813 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
814 if (ice_is_xdp_ena_vsi(vsi)) {
815 u32 xdp_txq = txq + vsi->num_xdp_txq;
816
817 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
818 val);
819 }
820 ice_flush(hw);
821}
822
823/**
824 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
825 * @vsi: the VSI being configured
826 * @rxq: Rx queue being mapped to MSI-X vector
827 * @msix_idx: MSI-X vector index within the function
828 * @itr_idx: ITR index of the interrupt cause
829 *
830 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
831 * within the function space.
832 */
833void
834ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
835{
836 struct ice_pf *pf = vsi->back;
837 struct ice_hw *hw = &pf->hw;
838 u32 val;
839
840 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
841
842 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
843 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
844
845 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
846
847 ice_flush(hw);
848}
849
850/**
851 * ice_trigger_sw_intr - trigger a software interrupt
852 * @hw: pointer to the HW structure
853 * @q_vector: interrupt vector to trigger the software interrupt for
854 */
855void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
856{
857 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
858 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
859 GLINT_DYN_CTL_SWINT_TRIG_M |
860 GLINT_DYN_CTL_INTENA_M);
861}
862
863/**
864 * ice_vsi_stop_tx_ring - Disable single Tx ring
865 * @vsi: the VSI being configured
866 * @rst_src: reset source
867 * @rel_vmvf_num: Relative ID of VF/VM
868 * @ring: Tx ring to be stopped
869 * @txq_meta: Meta data of Tx ring to be stopped
870 */
871int
872ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
873 u16 rel_vmvf_num, struct ice_ring *ring,
874 struct ice_txq_meta *txq_meta)
875{
876 struct ice_pf *pf = vsi->back;
877 struct ice_q_vector *q_vector;
878 struct ice_hw *hw = &pf->hw;
879 enum ice_status status;
880 u32 val;
881
882 /* clear cause_ena bit for disabled queues */
883 val = rd32(hw, QINT_TQCTL(ring->reg_idx));
884 val &= ~QINT_TQCTL_CAUSE_ENA_M;
885 wr32(hw, QINT_TQCTL(ring->reg_idx), val);
886
887 /* software is expected to wait for 100 ns */
888 ndelay(100);
889
890 /* trigger a software interrupt for the vector
891 * associated to the queue to schedule NAPI handler
892 */
893 q_vector = ring->q_vector;
894 if (q_vector)
895 ice_trigger_sw_intr(hw, q_vector);
896
897 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
898 txq_meta->tc, 1, &txq_meta->q_handle,
899 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
900 rel_vmvf_num, NULL);
901
902 /* if the disable queue command was exercised during an
903 * active reset flow, ICE_ERR_RESET_ONGOING is returned.
904 * This is not an error as the reset operation disables
905 * queues at the hardware level anyway.
906 */
907 if (status == ICE_ERR_RESET_ONGOING) {
908 dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
909 } else if (status == ICE_ERR_DOES_NOT_EXIST) {
910 dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
911 } else if (status) {
912 dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
913 ice_stat_str(status));
914 return -ENODEV;
915 }
916
917 return 0;
918}
919
920/**
921 * ice_fill_txq_meta - Prepare the Tx queue's meta data
922 * @vsi: VSI that ring belongs to
923 * @ring: ring that txq_meta will be based on
924 * @txq_meta: a helper struct that wraps Tx queue's information
925 *
926 * Set up a helper struct that will contain all the necessary fields that
927 * are needed for stopping Tx queue
928 */
929void
930ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
931 struct ice_txq_meta *txq_meta)
932{
933 u8 tc;
934
935 if (IS_ENABLED(CONFIG_DCB))
936 tc = ring->dcb_tc;
937 else
938 tc = 0;
939
940 txq_meta->q_id = ring->reg_idx;
941 txq_meta->q_teid = ring->txq_teid;
942 txq_meta->q_handle = ring->q_handle;
943 txq_meta->vsi_idx = vsi->idx;
944 txq_meta->tc = tc;
945}