Loading...
Note: File does not exist in v6.9.4.
1// SPDX-License-Identifier: GPL-2.0
2/*******************************************************************************
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40evf.h"
29#include "i40e_prototype.h"
30#include "i40evf_client.h"
31
32/* busy wait delay in msec */
33#define I40EVF_BUSY_WAIT_DELAY 10
34#define I40EVF_BUSY_WAIT_COUNT 50
35
36/**
37 * i40evf_send_pf_msg
38 * @adapter: adapter structure
39 * @op: virtual channel opcode
40 * @msg: pointer to message buffer
41 * @len: message length
42 *
43 * Send message to PF and print status if failure.
44 **/
45static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
46 enum virtchnl_ops op, u8 *msg, u16 len)
47{
48 struct i40e_hw *hw = &adapter->hw;
49 i40e_status err;
50
51 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
52 return 0; /* nothing to see here, move along */
53
54 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
55 if (err)
56 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
57 op, i40evf_stat_str(hw, err),
58 i40evf_aq_str(hw, hw->aq.asq_last_status));
59 return err;
60}
61
62/**
63 * i40evf_send_api_ver
64 * @adapter: adapter structure
65 *
66 * Send API version admin queue message to the PF. The reply is not checked
67 * in this function. Returns 0 if the message was successfully
68 * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
69 **/
70int i40evf_send_api_ver(struct i40evf_adapter *adapter)
71{
72 struct virtchnl_version_info vvi;
73
74 vvi.major = VIRTCHNL_VERSION_MAJOR;
75 vvi.minor = VIRTCHNL_VERSION_MINOR;
76
77 return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
78 sizeof(vvi));
79}
80
81/**
82 * i40evf_verify_api_ver
83 * @adapter: adapter structure
84 *
85 * Compare API versions with the PF. Must be called after admin queue is
86 * initialized. Returns 0 if API versions match, -EIO if they do not,
87 * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
88 * from the firmware are propagated.
89 **/
90int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
91{
92 struct virtchnl_version_info *pf_vvi;
93 struct i40e_hw *hw = &adapter->hw;
94 struct i40e_arq_event_info event;
95 enum virtchnl_ops op;
96 i40e_status err;
97
98 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
99 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
100 if (!event.msg_buf) {
101 err = -ENOMEM;
102 goto out;
103 }
104
105 while (1) {
106 err = i40evf_clean_arq_element(hw, &event, NULL);
107 /* When the AQ is empty, i40evf_clean_arq_element will return
108 * nonzero and this loop will terminate.
109 */
110 if (err)
111 goto out_alloc;
112 op =
113 (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
114 if (op == VIRTCHNL_OP_VERSION)
115 break;
116 }
117
118
119 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
120 if (err)
121 goto out_alloc;
122
123 if (op != VIRTCHNL_OP_VERSION) {
124 dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
125 op);
126 err = -EIO;
127 goto out_alloc;
128 }
129
130 pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
131 adapter->pf_version = *pf_vvi;
132
133 if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
134 ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
135 (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
136 err = -EIO;
137
138out_alloc:
139 kfree(event.msg_buf);
140out:
141 return err;
142}
143
144/**
145 * i40evf_send_vf_config_msg
146 * @adapter: adapter structure
147 *
148 * Send VF configuration request admin queue message to the PF. The reply
149 * is not checked in this function. Returns 0 if the message was
150 * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
151 **/
152int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
153{
154 u32 caps;
155
156 caps = VIRTCHNL_VF_OFFLOAD_L2 |
157 VIRTCHNL_VF_OFFLOAD_RSS_PF |
158 VIRTCHNL_VF_OFFLOAD_RSS_AQ |
159 VIRTCHNL_VF_OFFLOAD_RSS_REG |
160 VIRTCHNL_VF_OFFLOAD_VLAN |
161 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
162 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
163 VIRTCHNL_VF_OFFLOAD_ENCAP |
164 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
165 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
166 VIRTCHNL_VF_OFFLOAD_ADQ;
167
168 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
169 adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
170 if (PF_IS_V11(adapter))
171 return i40evf_send_pf_msg(adapter,
172 VIRTCHNL_OP_GET_VF_RESOURCES,
173 (u8 *)&caps, sizeof(caps));
174 else
175 return i40evf_send_pf_msg(adapter,
176 VIRTCHNL_OP_GET_VF_RESOURCES,
177 NULL, 0);
178}
179
180/**
181 * i40evf_get_vf_config
182 * @hw: pointer to the hardware structure
183 * @len: length of buffer
184 *
185 * Get VF configuration from PF and populate hw structure. Must be called after
186 * admin queue is initialized. Busy waits until response is received from PF,
187 * with maximum timeout. Response from PF is returned in the buffer for further
188 * processing by the caller.
189 **/
190int i40evf_get_vf_config(struct i40evf_adapter *adapter)
191{
192 struct i40e_hw *hw = &adapter->hw;
193 struct i40e_arq_event_info event;
194 enum virtchnl_ops op;
195 i40e_status err;
196 u16 len;
197
198 len = sizeof(struct virtchnl_vf_resource) +
199 I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
200 event.buf_len = len;
201 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
202 if (!event.msg_buf) {
203 err = -ENOMEM;
204 goto out;
205 }
206
207 while (1) {
208 /* When the AQ is empty, i40evf_clean_arq_element will return
209 * nonzero and this loop will terminate.
210 */
211 err = i40evf_clean_arq_element(hw, &event, NULL);
212 if (err)
213 goto out_alloc;
214 op =
215 (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
216 if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
217 break;
218 }
219
220 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
221 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
222
223 i40e_vf_parse_hw_config(hw, adapter->vf_res);
224out_alloc:
225 kfree(event.msg_buf);
226out:
227 return err;
228}
229
230/**
231 * i40evf_configure_queues
232 * @adapter: adapter structure
233 *
234 * Request that the PF set up our (previously allocated) queues.
235 **/
236void i40evf_configure_queues(struct i40evf_adapter *adapter)
237{
238 struct virtchnl_vsi_queue_config_info *vqci;
239 struct virtchnl_queue_pair_info *vqpi;
240 int pairs = adapter->num_active_queues;
241 int i, len, max_frame = I40E_MAX_RXBUFFER;
242
243 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
244 /* bail because we already have a command pending */
245 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
246 adapter->current_op);
247 return;
248 }
249 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
250 len = sizeof(struct virtchnl_vsi_queue_config_info) +
251 (sizeof(struct virtchnl_queue_pair_info) * pairs);
252 vqci = kzalloc(len, GFP_KERNEL);
253 if (!vqci)
254 return;
255
256 /* Limit maximum frame size when jumbo frames is not enabled */
257 if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
258 (adapter->netdev->mtu <= ETH_DATA_LEN))
259 max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
260
261 vqci->vsi_id = adapter->vsi_res->vsi_id;
262 vqci->num_queue_pairs = pairs;
263 vqpi = vqci->qpair;
264 /* Size check is not needed here - HW max is 16 queue pairs, and we
265 * can fit info for 31 of them into the AQ buffer before it overflows.
266 */
267 for (i = 0; i < pairs; i++) {
268 vqpi->txq.vsi_id = vqci->vsi_id;
269 vqpi->txq.queue_id = i;
270 vqpi->txq.ring_len = adapter->tx_rings[i].count;
271 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
272 vqpi->rxq.vsi_id = vqci->vsi_id;
273 vqpi->rxq.queue_id = i;
274 vqpi->rxq.ring_len = adapter->rx_rings[i].count;
275 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
276 vqpi->rxq.max_pkt_size = max_frame;
277 vqpi->rxq.databuffer_size =
278 ALIGN(adapter->rx_rings[i].rx_buf_len,
279 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
280 vqpi++;
281 }
282
283 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
284 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
285 (u8 *)vqci, len);
286 kfree(vqci);
287}
288
289/**
290 * i40evf_enable_queues
291 * @adapter: adapter structure
292 *
293 * Request that the PF enable all of our queues.
294 **/
295void i40evf_enable_queues(struct i40evf_adapter *adapter)
296{
297 struct virtchnl_queue_select vqs;
298
299 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
300 /* bail because we already have a command pending */
301 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
302 adapter->current_op);
303 return;
304 }
305 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
306 vqs.vsi_id = adapter->vsi_res->vsi_id;
307 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
308 vqs.rx_queues = vqs.tx_queues;
309 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
310 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
311 (u8 *)&vqs, sizeof(vqs));
312}
313
314/**
315 * i40evf_disable_queues
316 * @adapter: adapter structure
317 *
318 * Request that the PF disable all of our queues.
319 **/
320void i40evf_disable_queues(struct i40evf_adapter *adapter)
321{
322 struct virtchnl_queue_select vqs;
323
324 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
325 /* bail because we already have a command pending */
326 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
327 adapter->current_op);
328 return;
329 }
330 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
331 vqs.vsi_id = adapter->vsi_res->vsi_id;
332 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
333 vqs.rx_queues = vqs.tx_queues;
334 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
335 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
336 (u8 *)&vqs, sizeof(vqs));
337}
338
339/**
340 * i40evf_map_queues
341 * @adapter: adapter structure
342 *
343 * Request that the PF map queues to interrupt vectors. Misc causes, including
344 * admin queue, are always mapped to vector 0.
345 **/
346void i40evf_map_queues(struct i40evf_adapter *adapter)
347{
348 struct virtchnl_irq_map_info *vimi;
349 struct virtchnl_vector_map *vecmap;
350 int v_idx, q_vectors, len;
351 struct i40e_q_vector *q_vector;
352
353 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
354 /* bail because we already have a command pending */
355 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
356 adapter->current_op);
357 return;
358 }
359 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
360
361 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
362
363 len = sizeof(struct virtchnl_irq_map_info) +
364 (adapter->num_msix_vectors *
365 sizeof(struct virtchnl_vector_map));
366 vimi = kzalloc(len, GFP_KERNEL);
367 if (!vimi)
368 return;
369
370 vimi->num_vectors = adapter->num_msix_vectors;
371 /* Queue vectors first */
372 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
373 q_vector = &adapter->q_vectors[v_idx];
374 vecmap = &vimi->vecmap[v_idx];
375
376 vecmap->vsi_id = adapter->vsi_res->vsi_id;
377 vecmap->vector_id = v_idx + NONQ_VECS;
378 vecmap->txq_map = q_vector->ring_mask;
379 vecmap->rxq_map = q_vector->ring_mask;
380 vecmap->rxitr_idx = I40E_RX_ITR;
381 vecmap->txitr_idx = I40E_TX_ITR;
382 }
383 /* Misc vector last - this is only for AdminQ messages */
384 vecmap = &vimi->vecmap[v_idx];
385 vecmap->vsi_id = adapter->vsi_res->vsi_id;
386 vecmap->vector_id = 0;
387 vecmap->txq_map = 0;
388 vecmap->rxq_map = 0;
389
390 adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
391 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
392 (u8 *)vimi, len);
393 kfree(vimi);
394}
395
396/**
397 * i40evf_request_queues
398 * @adapter: adapter structure
399 * @num: number of requested queues
400 *
401 * We get a default number of queues from the PF. This enables us to request a
402 * different number. Returns 0 on success, negative on failure
403 **/
404int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
405{
406 struct virtchnl_vf_res_request vfres;
407
408 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
409 /* bail because we already have a command pending */
410 dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
411 adapter->current_op);
412 return -EBUSY;
413 }
414
415 vfres.num_queue_pairs = num;
416
417 adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
418 adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
419 return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
420 (u8 *)&vfres, sizeof(vfres));
421}
422
423/**
424 * i40evf_add_ether_addrs
425 * @adapter: adapter structure
426 * @addrs: the MAC address filters to add (contiguous)
427 * @count: number of filters
428 *
429 * Request that the PF add one or more addresses to our filters.
430 **/
431void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
432{
433 struct virtchnl_ether_addr_list *veal;
434 int len, i = 0, count = 0;
435 struct i40evf_mac_filter *f;
436 bool more = false;
437
438 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
439 /* bail because we already have a command pending */
440 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
441 adapter->current_op);
442 return;
443 }
444
445 spin_lock_bh(&adapter->mac_vlan_list_lock);
446
447 list_for_each_entry(f, &adapter->mac_filter_list, list) {
448 if (f->add)
449 count++;
450 }
451 if (!count) {
452 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
453 spin_unlock_bh(&adapter->mac_vlan_list_lock);
454 return;
455 }
456 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
457
458 len = sizeof(struct virtchnl_ether_addr_list) +
459 (count * sizeof(struct virtchnl_ether_addr));
460 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
461 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
462 count = (I40EVF_MAX_AQ_BUF_SIZE -
463 sizeof(struct virtchnl_ether_addr_list)) /
464 sizeof(struct virtchnl_ether_addr);
465 len = sizeof(struct virtchnl_ether_addr_list) +
466 (count * sizeof(struct virtchnl_ether_addr));
467 more = true;
468 }
469
470 veal = kzalloc(len, GFP_ATOMIC);
471 if (!veal) {
472 spin_unlock_bh(&adapter->mac_vlan_list_lock);
473 return;
474 }
475
476 veal->vsi_id = adapter->vsi_res->vsi_id;
477 veal->num_elements = count;
478 list_for_each_entry(f, &adapter->mac_filter_list, list) {
479 if (f->add) {
480 ether_addr_copy(veal->list[i].addr, f->macaddr);
481 i++;
482 f->add = false;
483 if (i == count)
484 break;
485 }
486 }
487 if (!more)
488 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
489
490 spin_unlock_bh(&adapter->mac_vlan_list_lock);
491
492 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
493 (u8 *)veal, len);
494 kfree(veal);
495}
496
497/**
498 * i40evf_del_ether_addrs
499 * @adapter: adapter structure
500 * @addrs: the MAC address filters to remove (contiguous)
501 * @count: number of filtes
502 *
503 * Request that the PF remove one or more addresses from our filters.
504 **/
505void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
506{
507 struct virtchnl_ether_addr_list *veal;
508 struct i40evf_mac_filter *f, *ftmp;
509 int len, i = 0, count = 0;
510 bool more = false;
511
512 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
513 /* bail because we already have a command pending */
514 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
515 adapter->current_op);
516 return;
517 }
518
519 spin_lock_bh(&adapter->mac_vlan_list_lock);
520
521 list_for_each_entry(f, &adapter->mac_filter_list, list) {
522 if (f->remove)
523 count++;
524 }
525 if (!count) {
526 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
527 spin_unlock_bh(&adapter->mac_vlan_list_lock);
528 return;
529 }
530 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
531
532 len = sizeof(struct virtchnl_ether_addr_list) +
533 (count * sizeof(struct virtchnl_ether_addr));
534 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
535 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
536 count = (I40EVF_MAX_AQ_BUF_SIZE -
537 sizeof(struct virtchnl_ether_addr_list)) /
538 sizeof(struct virtchnl_ether_addr);
539 len = sizeof(struct virtchnl_ether_addr_list) +
540 (count * sizeof(struct virtchnl_ether_addr));
541 more = true;
542 }
543 veal = kzalloc(len, GFP_ATOMIC);
544 if (!veal) {
545 spin_unlock_bh(&adapter->mac_vlan_list_lock);
546 return;
547 }
548
549 veal->vsi_id = adapter->vsi_res->vsi_id;
550 veal->num_elements = count;
551 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
552 if (f->remove) {
553 ether_addr_copy(veal->list[i].addr, f->macaddr);
554 i++;
555 list_del(&f->list);
556 kfree(f);
557 if (i == count)
558 break;
559 }
560 }
561 if (!more)
562 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
563
564 spin_unlock_bh(&adapter->mac_vlan_list_lock);
565
566 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
567 (u8 *)veal, len);
568 kfree(veal);
569}
570
571/**
572 * i40evf_add_vlans
573 * @adapter: adapter structure
574 * @vlans: the VLANs to add
575 * @count: number of VLANs
576 *
577 * Request that the PF add one or more VLAN filters to our VSI.
578 **/
579void i40evf_add_vlans(struct i40evf_adapter *adapter)
580{
581 struct virtchnl_vlan_filter_list *vvfl;
582 int len, i = 0, count = 0;
583 struct i40evf_vlan_filter *f;
584 bool more = false;
585
586 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
587 /* bail because we already have a command pending */
588 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
589 adapter->current_op);
590 return;
591 }
592
593 spin_lock_bh(&adapter->mac_vlan_list_lock);
594
595 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
596 if (f->add)
597 count++;
598 }
599 if (!count) {
600 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
601 spin_unlock_bh(&adapter->mac_vlan_list_lock);
602 return;
603 }
604 adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
605
606 len = sizeof(struct virtchnl_vlan_filter_list) +
607 (count * sizeof(u16));
608 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
609 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
610 count = (I40EVF_MAX_AQ_BUF_SIZE -
611 sizeof(struct virtchnl_vlan_filter_list)) /
612 sizeof(u16);
613 len = sizeof(struct virtchnl_vlan_filter_list) +
614 (count * sizeof(u16));
615 more = true;
616 }
617 vvfl = kzalloc(len, GFP_ATOMIC);
618 if (!vvfl) {
619 spin_unlock_bh(&adapter->mac_vlan_list_lock);
620 return;
621 }
622
623 vvfl->vsi_id = adapter->vsi_res->vsi_id;
624 vvfl->num_elements = count;
625 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
626 if (f->add) {
627 vvfl->vlan_id[i] = f->vlan;
628 i++;
629 f->add = false;
630 if (i == count)
631 break;
632 }
633 }
634 if (!more)
635 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
636
637 spin_unlock_bh(&adapter->mac_vlan_list_lock);
638
639 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
640 kfree(vvfl);
641}
642
643/**
644 * i40evf_del_vlans
645 * @adapter: adapter structure
646 * @vlans: the VLANs to remove
647 * @count: number of VLANs
648 *
649 * Request that the PF remove one or more VLAN filters from our VSI.
650 **/
651void i40evf_del_vlans(struct i40evf_adapter *adapter)
652{
653 struct virtchnl_vlan_filter_list *vvfl;
654 struct i40evf_vlan_filter *f, *ftmp;
655 int len, i = 0, count = 0;
656 bool more = false;
657
658 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
659 /* bail because we already have a command pending */
660 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
661 adapter->current_op);
662 return;
663 }
664
665 spin_lock_bh(&adapter->mac_vlan_list_lock);
666
667 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
668 if (f->remove)
669 count++;
670 }
671 if (!count) {
672 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
673 spin_unlock_bh(&adapter->mac_vlan_list_lock);
674 return;
675 }
676 adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
677
678 len = sizeof(struct virtchnl_vlan_filter_list) +
679 (count * sizeof(u16));
680 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
681 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
682 count = (I40EVF_MAX_AQ_BUF_SIZE -
683 sizeof(struct virtchnl_vlan_filter_list)) /
684 sizeof(u16);
685 len = sizeof(struct virtchnl_vlan_filter_list) +
686 (count * sizeof(u16));
687 more = true;
688 }
689 vvfl = kzalloc(len, GFP_ATOMIC);
690 if (!vvfl) {
691 spin_unlock_bh(&adapter->mac_vlan_list_lock);
692 return;
693 }
694
695 vvfl->vsi_id = adapter->vsi_res->vsi_id;
696 vvfl->num_elements = count;
697 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
698 if (f->remove) {
699 vvfl->vlan_id[i] = f->vlan;
700 i++;
701 list_del(&f->list);
702 kfree(f);
703 if (i == count)
704 break;
705 }
706 }
707 if (!more)
708 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
709
710 spin_unlock_bh(&adapter->mac_vlan_list_lock);
711
712 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
713 kfree(vvfl);
714}
715
716/**
717 * i40evf_set_promiscuous
718 * @adapter: adapter structure
719 * @flags: bitmask to control unicast/multicast promiscuous.
720 *
721 * Request that the PF enable promiscuous mode for our VSI.
722 **/
723void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
724{
725 struct virtchnl_promisc_info vpi;
726 int promisc_all;
727
728 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
729 /* bail because we already have a command pending */
730 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
731 adapter->current_op);
732 return;
733 }
734
735 promisc_all = FLAG_VF_UNICAST_PROMISC |
736 FLAG_VF_MULTICAST_PROMISC;
737 if ((flags & promisc_all) == promisc_all) {
738 adapter->flags |= I40EVF_FLAG_PROMISC_ON;
739 adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
740 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
741 }
742
743 if (flags & FLAG_VF_MULTICAST_PROMISC) {
744 adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
745 adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
746 dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
747 }
748
749 if (!flags) {
750 adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
751 I40EVF_FLAG_ALLMULTI_ON);
752 adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
753 I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
754 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
755 }
756
757 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
758 vpi.vsi_id = adapter->vsi_res->vsi_id;
759 vpi.flags = flags;
760 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
761 (u8 *)&vpi, sizeof(vpi));
762}
763
764/**
765 * i40evf_request_stats
766 * @adapter: adapter structure
767 *
768 * Request VSI statistics from PF.
769 **/
770void i40evf_request_stats(struct i40evf_adapter *adapter)
771{
772 struct virtchnl_queue_select vqs;
773
774 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
775 /* no error message, this isn't crucial */
776 return;
777 }
778 adapter->current_op = VIRTCHNL_OP_GET_STATS;
779 vqs.vsi_id = adapter->vsi_res->vsi_id;
780 /* queue maps are ignored for this message - only the vsi is used */
781 if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
782 (u8 *)&vqs, sizeof(vqs)))
783 /* if the request failed, don't lock out others */
784 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
785}
786
787/**
788 * i40evf_get_hena
789 * @adapter: adapter structure
790 *
791 * Request hash enable capabilities from PF
792 **/
793void i40evf_get_hena(struct i40evf_adapter *adapter)
794{
795 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
796 /* bail because we already have a command pending */
797 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
798 adapter->current_op);
799 return;
800 }
801 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
802 adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
803 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
804 NULL, 0);
805}
806
807/**
808 * i40evf_set_hena
809 * @adapter: adapter structure
810 *
811 * Request the PF to set our RSS hash capabilities
812 **/
813void i40evf_set_hena(struct i40evf_adapter *adapter)
814{
815 struct virtchnl_rss_hena vrh;
816
817 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
818 /* bail because we already have a command pending */
819 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
820 adapter->current_op);
821 return;
822 }
823 vrh.hena = adapter->hena;
824 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
825 adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
826 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
827 (u8 *)&vrh, sizeof(vrh));
828}
829
830/**
831 * i40evf_set_rss_key
832 * @adapter: adapter structure
833 *
834 * Request the PF to set our RSS hash key
835 **/
836void i40evf_set_rss_key(struct i40evf_adapter *adapter)
837{
838 struct virtchnl_rss_key *vrk;
839 int len;
840
841 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
842 /* bail because we already have a command pending */
843 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
844 adapter->current_op);
845 return;
846 }
847 len = sizeof(struct virtchnl_rss_key) +
848 (adapter->rss_key_size * sizeof(u8)) - 1;
849 vrk = kzalloc(len, GFP_KERNEL);
850 if (!vrk)
851 return;
852 vrk->vsi_id = adapter->vsi.id;
853 vrk->key_len = adapter->rss_key_size;
854 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
855
856 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
857 adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
858 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
859 (u8 *)vrk, len);
860 kfree(vrk);
861}
862
863/**
864 * i40evf_set_rss_lut
865 * @adapter: adapter structure
866 *
867 * Request the PF to set our RSS lookup table
868 **/
869void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
870{
871 struct virtchnl_rss_lut *vrl;
872 int len;
873
874 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
875 /* bail because we already have a command pending */
876 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
877 adapter->current_op);
878 return;
879 }
880 len = sizeof(struct virtchnl_rss_lut) +
881 (adapter->rss_lut_size * sizeof(u8)) - 1;
882 vrl = kzalloc(len, GFP_KERNEL);
883 if (!vrl)
884 return;
885 vrl->vsi_id = adapter->vsi.id;
886 vrl->lut_entries = adapter->rss_lut_size;
887 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
888 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
889 adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
890 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
891 (u8 *)vrl, len);
892 kfree(vrl);
893}
894
895/**
896 * i40evf_enable_vlan_stripping
897 * @adapter: adapter structure
898 *
899 * Request VLAN header stripping to be enabled
900 **/
901void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
902{
903 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
904 /* bail because we already have a command pending */
905 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
906 adapter->current_op);
907 return;
908 }
909 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
910 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
911 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
912 NULL, 0);
913}
914
915/**
916 * i40evf_disable_vlan_stripping
917 * @adapter: adapter structure
918 *
919 * Request VLAN header stripping to be disabled
920 **/
921void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
922{
923 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
924 /* bail because we already have a command pending */
925 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
926 adapter->current_op);
927 return;
928 }
929 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
930 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
931 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
932 NULL, 0);
933}
934
935/**
936 * i40evf_print_link_message - print link up or down
937 * @adapter: adapter structure
938 *
939 * Log a message telling the world of our wonderous link status
940 */
941static void i40evf_print_link_message(struct i40evf_adapter *adapter)
942{
943 struct net_device *netdev = adapter->netdev;
944 char *speed = "Unknown ";
945
946 if (!adapter->link_up) {
947 netdev_info(netdev, "NIC Link is Down\n");
948 return;
949 }
950
951 switch (adapter->link_speed) {
952 case I40E_LINK_SPEED_40GB:
953 speed = "40 G";
954 break;
955 case I40E_LINK_SPEED_25GB:
956 speed = "25 G";
957 break;
958 case I40E_LINK_SPEED_20GB:
959 speed = "20 G";
960 break;
961 case I40E_LINK_SPEED_10GB:
962 speed = "10 G";
963 break;
964 case I40E_LINK_SPEED_1GB:
965 speed = "1000 M";
966 break;
967 case I40E_LINK_SPEED_100MB:
968 speed = "100 M";
969 break;
970 default:
971 break;
972 }
973
974 netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
975}
976
977/**
978 * i40evf_enable_channel
979 * @adapter: adapter structure
980 *
981 * Request that the PF enable channels as specified by
982 * the user via tc tool.
983 **/
984void i40evf_enable_channels(struct i40evf_adapter *adapter)
985{
986 struct virtchnl_tc_info *vti = NULL;
987 u16 len;
988 int i;
989
990 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
991 /* bail because we already have a command pending */
992 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
993 adapter->current_op);
994 return;
995 }
996
997 len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
998 sizeof(struct virtchnl_tc_info);
999
1000 vti = kzalloc(len, GFP_KERNEL);
1001 if (!vti)
1002 return;
1003 vti->num_tc = adapter->num_tc;
1004 for (i = 0; i < vti->num_tc; i++) {
1005 vti->list[i].count = adapter->ch_config.ch_info[i].count;
1006 vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1007 vti->list[i].pad = 0;
1008 vti->list[i].max_tx_rate =
1009 adapter->ch_config.ch_info[i].max_tx_rate;
1010 }
1011
1012 adapter->ch_config.state = __I40EVF_TC_RUNNING;
1013 adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
1014 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1015 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS;
1016 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS,
1017 (u8 *)vti, len);
1018 kfree(vti);
1019}
1020
1021/**
1022 * i40evf_disable_channel
1023 * @adapter: adapter structure
1024 *
1025 * Request that the PF disable channels that are configured
1026 **/
1027void i40evf_disable_channels(struct i40evf_adapter *adapter)
1028{
1029 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1030 /* bail because we already have a command pending */
1031 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1032 adapter->current_op);
1033 return;
1034 }
1035
1036 adapter->ch_config.state = __I40EVF_TC_INVALID;
1037 adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
1038 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1039 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS;
1040 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS,
1041 NULL, 0);
1042}
1043
1044/**
1045 * i40evf_print_cloud_filter
1046 * @adapter: adapter structure
1047 * @f: cloud filter to print
1048 *
1049 * Print the cloud filter
1050 **/
1051static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
1052 struct virtchnl_filter *f)
1053{
1054 switch (f->flow_type) {
1055 case VIRTCHNL_TCP_V4_FLOW:
1056 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1057 &f->data.tcp_spec.dst_mac,
1058 &f->data.tcp_spec.src_mac,
1059 ntohs(f->data.tcp_spec.vlan_id),
1060 &f->data.tcp_spec.dst_ip[0],
1061 &f->data.tcp_spec.src_ip[0],
1062 ntohs(f->data.tcp_spec.dst_port),
1063 ntohs(f->data.tcp_spec.src_port));
1064 break;
1065 case VIRTCHNL_TCP_V6_FLOW:
1066 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1067 &f->data.tcp_spec.dst_mac,
1068 &f->data.tcp_spec.src_mac,
1069 ntohs(f->data.tcp_spec.vlan_id),
1070 &f->data.tcp_spec.dst_ip,
1071 &f->data.tcp_spec.src_ip,
1072 ntohs(f->data.tcp_spec.dst_port),
1073 ntohs(f->data.tcp_spec.src_port));
1074 break;
1075 }
1076}
1077
1078/**
1079 * i40evf_add_cloud_filter
1080 * @adapter: adapter structure
1081 *
1082 * Request that the PF add cloud filters as specified
1083 * by the user via tc tool.
1084 **/
1085void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
1086{
1087 struct i40evf_cloud_filter *cf;
1088 struct virtchnl_filter *f;
1089 int len = 0, count = 0;
1090
1091 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1092 /* bail because we already have a command pending */
1093 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1094 adapter->current_op);
1095 return;
1096 }
1097 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1098 if (cf->add) {
1099 count++;
1100 break;
1101 }
1102 }
1103 if (!count) {
1104 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
1105 return;
1106 }
1107 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1108
1109 len = sizeof(struct virtchnl_filter);
1110 f = kzalloc(len, GFP_KERNEL);
1111 if (!f)
1112 return;
1113
1114 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1115 if (cf->add) {
1116 memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1117 cf->add = false;
1118 cf->state = __I40EVF_CF_ADD_PENDING;
1119 i40evf_send_pf_msg(adapter,
1120 VIRTCHNL_OP_ADD_CLOUD_FILTER,
1121 (u8 *)f, len);
1122 }
1123 }
1124 kfree(f);
1125}
1126
1127/**
1128 * i40evf_del_cloud_filter
1129 * @adapter: adapter structure
1130 *
1131 * Request that the PF delete cloud filters as specified
1132 * by the user via tc tool.
1133 **/
1134void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
1135{
1136 struct i40evf_cloud_filter *cf, *cftmp;
1137 struct virtchnl_filter *f;
1138 int len = 0, count = 0;
1139
1140 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1141 /* bail because we already have a command pending */
1142 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1143 adapter->current_op);
1144 return;
1145 }
1146 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1147 if (cf->del) {
1148 count++;
1149 break;
1150 }
1151 }
1152 if (!count) {
1153 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
1154 return;
1155 }
1156 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1157
1158 len = sizeof(struct virtchnl_filter);
1159 f = kzalloc(len, GFP_KERNEL);
1160 if (!f)
1161 return;
1162
1163 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1164 if (cf->del) {
1165 memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1166 cf->del = false;
1167 cf->state = __I40EVF_CF_DEL_PENDING;
1168 i40evf_send_pf_msg(adapter,
1169 VIRTCHNL_OP_DEL_CLOUD_FILTER,
1170 (u8 *)f, len);
1171 }
1172 }
1173 kfree(f);
1174}
1175
1176/**
1177 * i40evf_request_reset
1178 * @adapter: adapter structure
1179 *
1180 * Request that the PF reset this VF. No response is expected.
1181 **/
1182void i40evf_request_reset(struct i40evf_adapter *adapter)
1183{
1184 /* Don't check CURRENT_OP - this is always higher priority */
1185 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
1186 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1187}
1188
1189/**
1190 * i40evf_virtchnl_completion
1191 * @adapter: adapter structure
1192 * @v_opcode: opcode sent by PF
1193 * @v_retval: retval sent by PF
1194 * @msg: message sent by PF
1195 * @msglen: message length
1196 *
1197 * Asynchronous completion function for admin queue messages. Rather than busy
1198 * wait, we fire off our requests and assume that no errors will be returned.
1199 * This function handles the reply messages.
1200 **/
1201void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
1202 enum virtchnl_ops v_opcode,
1203 i40e_status v_retval,
1204 u8 *msg, u16 msglen)
1205{
1206 struct net_device *netdev = adapter->netdev;
1207
1208 if (v_opcode == VIRTCHNL_OP_EVENT) {
1209 struct virtchnl_pf_event *vpe =
1210 (struct virtchnl_pf_event *)msg;
1211 bool link_up = vpe->event_data.link_event.link_status;
1212 switch (vpe->event) {
1213 case VIRTCHNL_EVENT_LINK_CHANGE:
1214 adapter->link_speed =
1215 vpe->event_data.link_event.link_speed;
1216
1217 /* we've already got the right link status, bail */
1218 if (adapter->link_up == link_up)
1219 break;
1220
1221 if (link_up) {
1222 /* If we get link up message and start queues
1223 * before our queues are configured it will
1224 * trigger a TX hang. In that case, just ignore
1225 * the link status message,we'll get another one
1226 * after we enable queues and actually prepared
1227 * to send traffic.
1228 */
1229 if (adapter->state != __I40EVF_RUNNING)
1230 break;
1231
1232 /* For ADq enabled VF, we reconfigure VSIs and
1233 * re-allocate queues. Hence wait till all
1234 * queues are enabled.
1235 */
1236 if (adapter->flags &
1237 I40EVF_FLAG_QUEUES_DISABLED)
1238 break;
1239 }
1240
1241 adapter->link_up = link_up;
1242 if (link_up) {
1243 netif_tx_start_all_queues(netdev);
1244 netif_carrier_on(netdev);
1245 } else {
1246 netif_tx_stop_all_queues(netdev);
1247 netif_carrier_off(netdev);
1248 }
1249 i40evf_print_link_message(adapter);
1250 break;
1251 case VIRTCHNL_EVENT_RESET_IMPENDING:
1252 dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
1253 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
1254 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1255 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1256 schedule_work(&adapter->reset_task);
1257 }
1258 break;
1259 default:
1260 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
1261 vpe->event);
1262 break;
1263 }
1264 return;
1265 }
1266 if (v_retval) {
1267 switch (v_opcode) {
1268 case VIRTCHNL_OP_ADD_VLAN:
1269 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
1270 i40evf_stat_str(&adapter->hw, v_retval));
1271 break;
1272 case VIRTCHNL_OP_ADD_ETH_ADDR:
1273 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
1274 i40evf_stat_str(&adapter->hw, v_retval));
1275 break;
1276 case VIRTCHNL_OP_DEL_VLAN:
1277 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
1278 i40evf_stat_str(&adapter->hw, v_retval));
1279 break;
1280 case VIRTCHNL_OP_DEL_ETH_ADDR:
1281 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
1282 i40evf_stat_str(&adapter->hw, v_retval));
1283 break;
1284 case VIRTCHNL_OP_ENABLE_CHANNELS:
1285 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
1286 i40evf_stat_str(&adapter->hw, v_retval));
1287 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
1288 adapter->ch_config.state = __I40EVF_TC_INVALID;
1289 netdev_reset_tc(netdev);
1290 netif_tx_start_all_queues(netdev);
1291 break;
1292 case VIRTCHNL_OP_DISABLE_CHANNELS:
1293 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
1294 i40evf_stat_str(&adapter->hw, v_retval));
1295 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
1296 adapter->ch_config.state = __I40EVF_TC_RUNNING;
1297 netif_tx_start_all_queues(netdev);
1298 break;
1299 case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1300 struct i40evf_cloud_filter *cf, *cftmp;
1301
1302 list_for_each_entry_safe(cf, cftmp,
1303 &adapter->cloud_filter_list,
1304 list) {
1305 if (cf->state == __I40EVF_CF_ADD_PENDING) {
1306 cf->state = __I40EVF_CF_INVALID;
1307 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
1308 i40evf_stat_str(&adapter->hw,
1309 v_retval));
1310 i40evf_print_cloud_filter(adapter,
1311 &cf->f);
1312 list_del(&cf->list);
1313 kfree(cf);
1314 adapter->num_cloud_filters--;
1315 }
1316 }
1317 }
1318 break;
1319 case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1320 struct i40evf_cloud_filter *cf;
1321
1322 list_for_each_entry(cf, &adapter->cloud_filter_list,
1323 list) {
1324 if (cf->state == __I40EVF_CF_DEL_PENDING) {
1325 cf->state = __I40EVF_CF_ACTIVE;
1326 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
1327 i40evf_stat_str(&adapter->hw,
1328 v_retval));
1329 i40evf_print_cloud_filter(adapter,
1330 &cf->f);
1331 }
1332 }
1333 }
1334 break;
1335 default:
1336 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
1337 v_retval,
1338 i40evf_stat_str(&adapter->hw, v_retval),
1339 v_opcode);
1340 }
1341 }
1342 switch (v_opcode) {
1343 case VIRTCHNL_OP_GET_STATS: {
1344 struct i40e_eth_stats *stats =
1345 (struct i40e_eth_stats *)msg;
1346 netdev->stats.rx_packets = stats->rx_unicast +
1347 stats->rx_multicast +
1348 stats->rx_broadcast;
1349 netdev->stats.tx_packets = stats->tx_unicast +
1350 stats->tx_multicast +
1351 stats->tx_broadcast;
1352 netdev->stats.rx_bytes = stats->rx_bytes;
1353 netdev->stats.tx_bytes = stats->tx_bytes;
1354 netdev->stats.tx_errors = stats->tx_errors;
1355 netdev->stats.rx_dropped = stats->rx_discards;
1356 netdev->stats.tx_dropped = stats->tx_discards;
1357 adapter->current_stats = *stats;
1358 }
1359 break;
1360 case VIRTCHNL_OP_GET_VF_RESOURCES: {
1361 u16 len = sizeof(struct virtchnl_vf_resource) +
1362 I40E_MAX_VF_VSI *
1363 sizeof(struct virtchnl_vsi_resource);
1364 memcpy(adapter->vf_res, msg, min(msglen, len));
1365 i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
1366 /* restore current mac address */
1367 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1368 i40evf_process_config(adapter);
1369 }
1370 break;
1371 case VIRTCHNL_OP_ENABLE_QUEUES:
1372 /* enable transmits */
1373 i40evf_irq_enable(adapter, true);
1374 adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED;
1375 break;
1376 case VIRTCHNL_OP_DISABLE_QUEUES:
1377 i40evf_free_all_tx_resources(adapter);
1378 i40evf_free_all_rx_resources(adapter);
1379 if (adapter->state == __I40EVF_DOWN_PENDING) {
1380 adapter->state = __I40EVF_DOWN;
1381 wake_up(&adapter->down_waitqueue);
1382 }
1383 break;
1384 case VIRTCHNL_OP_VERSION:
1385 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1386 /* Don't display an error if we get these out of sequence.
1387 * If the firmware needed to get kicked, we'll get these and
1388 * it's no problem.
1389 */
1390 if (v_opcode != adapter->current_op)
1391 return;
1392 break;
1393 case VIRTCHNL_OP_IWARP:
1394 /* Gobble zero-length replies from the PF. They indicate that
1395 * a previous message was received OK, and the client doesn't
1396 * care about that.
1397 */
1398 if (msglen && CLIENT_ENABLED(adapter))
1399 i40evf_notify_client_message(&adapter->vsi,
1400 msg, msglen);
1401 break;
1402
1403 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
1404 adapter->client_pending &=
1405 ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
1406 break;
1407 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
1408 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
1409 if (msglen == sizeof(*vrh))
1410 adapter->hena = vrh->hena;
1411 else
1412 dev_warn(&adapter->pdev->dev,
1413 "Invalid message %d from PF\n", v_opcode);
1414 }
1415 break;
1416 case VIRTCHNL_OP_REQUEST_QUEUES: {
1417 struct virtchnl_vf_res_request *vfres =
1418 (struct virtchnl_vf_res_request *)msg;
1419 if (vfres->num_queue_pairs != adapter->num_req_queues) {
1420 dev_info(&adapter->pdev->dev,
1421 "Requested %d queues, PF can support %d\n",
1422 adapter->num_req_queues,
1423 vfres->num_queue_pairs);
1424 adapter->num_req_queues = 0;
1425 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
1426 }
1427 }
1428 break;
1429 case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1430 struct i40evf_cloud_filter *cf;
1431
1432 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1433 if (cf->state == __I40EVF_CF_ADD_PENDING)
1434 cf->state = __I40EVF_CF_ACTIVE;
1435 }
1436 }
1437 break;
1438 case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1439 struct i40evf_cloud_filter *cf, *cftmp;
1440
1441 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1442 list) {
1443 if (cf->state == __I40EVF_CF_DEL_PENDING) {
1444 cf->state = __I40EVF_CF_INVALID;
1445 list_del(&cf->list);
1446 kfree(cf);
1447 adapter->num_cloud_filters--;
1448 }
1449 }
1450 }
1451 break;
1452 default:
1453 if (adapter->current_op && (v_opcode != adapter->current_op))
1454 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
1455 adapter->current_op, v_opcode);
1456 break;
1457 } /* switch v_opcode */
1458 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1459}