Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4#include <linux/avf/virtchnl.h>
5#include <linux/bitfield.h>
6#include <linux/delay.h>
7#include <linux/etherdevice.h>
8#include <linux/pci.h>
9#include "i40e_adminq_cmd.h"
10#include "i40e_devids.h"
11#include "i40e_prototype.h"
12#include "i40e_register.h"
13
14/**
15 * i40e_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
17 *
18 * This function sets the mac type of the adapter based on the
19 * vendor ID and device ID stored in the hw structure.
20 **/
21int i40e_set_mac_type(struct i40e_hw *hw)
22{
23 int status = 0;
24
25 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
26 switch (hw->device_id) {
27 case I40E_DEV_ID_SFP_XL710:
28 case I40E_DEV_ID_QEMU:
29 case I40E_DEV_ID_KX_B:
30 case I40E_DEV_ID_KX_C:
31 case I40E_DEV_ID_QSFP_A:
32 case I40E_DEV_ID_QSFP_B:
33 case I40E_DEV_ID_QSFP_C:
34 case I40E_DEV_ID_1G_BASE_T_BC:
35 case I40E_DEV_ID_5G_BASE_T_BC:
36 case I40E_DEV_ID_10G_BASE_T:
37 case I40E_DEV_ID_10G_BASE_T4:
38 case I40E_DEV_ID_10G_BASE_T_BC:
39 case I40E_DEV_ID_10G_B:
40 case I40E_DEV_ID_10G_SFP:
41 case I40E_DEV_ID_20G_KR2:
42 case I40E_DEV_ID_20G_KR2_A:
43 case I40E_DEV_ID_25G_B:
44 case I40E_DEV_ID_25G_SFP28:
45 case I40E_DEV_ID_X710_N3000:
46 case I40E_DEV_ID_XXV710_N3000:
47 hw->mac.type = I40E_MAC_XL710;
48 break;
49 case I40E_DEV_ID_KX_X722:
50 case I40E_DEV_ID_QSFP_X722:
51 case I40E_DEV_ID_SFP_X722:
52 case I40E_DEV_ID_1G_BASE_T_X722:
53 case I40E_DEV_ID_10G_BASE_T_X722:
54 case I40E_DEV_ID_SFP_I_X722:
55 case I40E_DEV_ID_SFP_X722_A:
56 hw->mac.type = I40E_MAC_X722;
57 break;
58 default:
59 hw->mac.type = I40E_MAC_GENERIC;
60 break;
61 }
62 } else {
63 status = -ENODEV;
64 }
65
66 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
67 hw->mac.type, status);
68 return status;
69}
70
71/**
72 * i40e_aq_str - convert AQ err code to a string
73 * @hw: pointer to the HW structure
74 * @aq_err: the AQ error code to convert
75 **/
76const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
77{
78 switch (aq_err) {
79 case I40E_AQ_RC_OK:
80 return "OK";
81 case I40E_AQ_RC_EPERM:
82 return "I40E_AQ_RC_EPERM";
83 case I40E_AQ_RC_ENOENT:
84 return "I40E_AQ_RC_ENOENT";
85 case I40E_AQ_RC_ESRCH:
86 return "I40E_AQ_RC_ESRCH";
87 case I40E_AQ_RC_EINTR:
88 return "I40E_AQ_RC_EINTR";
89 case I40E_AQ_RC_EIO:
90 return "I40E_AQ_RC_EIO";
91 case I40E_AQ_RC_ENXIO:
92 return "I40E_AQ_RC_ENXIO";
93 case I40E_AQ_RC_E2BIG:
94 return "I40E_AQ_RC_E2BIG";
95 case I40E_AQ_RC_EAGAIN:
96 return "I40E_AQ_RC_EAGAIN";
97 case I40E_AQ_RC_ENOMEM:
98 return "I40E_AQ_RC_ENOMEM";
99 case I40E_AQ_RC_EACCES:
100 return "I40E_AQ_RC_EACCES";
101 case I40E_AQ_RC_EFAULT:
102 return "I40E_AQ_RC_EFAULT";
103 case I40E_AQ_RC_EBUSY:
104 return "I40E_AQ_RC_EBUSY";
105 case I40E_AQ_RC_EEXIST:
106 return "I40E_AQ_RC_EEXIST";
107 case I40E_AQ_RC_EINVAL:
108 return "I40E_AQ_RC_EINVAL";
109 case I40E_AQ_RC_ENOTTY:
110 return "I40E_AQ_RC_ENOTTY";
111 case I40E_AQ_RC_ENOSPC:
112 return "I40E_AQ_RC_ENOSPC";
113 case I40E_AQ_RC_ENOSYS:
114 return "I40E_AQ_RC_ENOSYS";
115 case I40E_AQ_RC_ERANGE:
116 return "I40E_AQ_RC_ERANGE";
117 case I40E_AQ_RC_EFLUSHED:
118 return "I40E_AQ_RC_EFLUSHED";
119 case I40E_AQ_RC_BAD_ADDR:
120 return "I40E_AQ_RC_BAD_ADDR";
121 case I40E_AQ_RC_EMODE:
122 return "I40E_AQ_RC_EMODE";
123 case I40E_AQ_RC_EFBIG:
124 return "I40E_AQ_RC_EFBIG";
125 }
126
127 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
128 return hw->err_str;
129}
130
131/**
132 * i40e_debug_aq
133 * @hw: debug mask related to admin queue
134 * @mask: debug mask
135 * @desc: pointer to admin queue descriptor
136 * @buffer: pointer to command buffer
137 * @buf_len: max length of buffer
138 *
139 * Dumps debug log about adminq command with descriptor contents.
140 **/
141void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
142 void *buffer, u16 buf_len)
143{
144 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
145 u32 effective_mask = hw->debug_mask & mask;
146 char prefix[27];
147 u16 len;
148 u8 *buf = (u8 *)buffer;
149
150 if (!effective_mask || !desc)
151 return;
152
153 len = le16_to_cpu(aq_desc->datalen);
154
155 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
156 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
157 le16_to_cpu(aq_desc->opcode),
158 le16_to_cpu(aq_desc->flags),
159 le16_to_cpu(aq_desc->datalen),
160 le16_to_cpu(aq_desc->retval));
161 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
162 "\tcookie (h,l) 0x%08X 0x%08X\n",
163 le32_to_cpu(aq_desc->cookie_high),
164 le32_to_cpu(aq_desc->cookie_low));
165 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
166 "\tparam (0,1) 0x%08X 0x%08X\n",
167 le32_to_cpu(aq_desc->params.internal.param0),
168 le32_to_cpu(aq_desc->params.internal.param1));
169 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
170 "\taddr (h,l) 0x%08X 0x%08X\n",
171 le32_to_cpu(aq_desc->params.external.addr_high),
172 le32_to_cpu(aq_desc->params.external.addr_low));
173
174 if (buffer && buf_len != 0 && len != 0 &&
175 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
176 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
177 if (buf_len < len)
178 len = buf_len;
179
180 snprintf(prefix, sizeof(prefix),
181 "i40e %02x:%02x.%x: \t0x",
182 hw->bus.bus_id,
183 hw->bus.device,
184 hw->bus.func);
185
186 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
187 16, 1, buf, len, false);
188 }
189}
190
191/**
192 * i40e_check_asq_alive
193 * @hw: pointer to the hw struct
194 *
195 * Returns true if Queue is enabled else false.
196 **/
197bool i40e_check_asq_alive(struct i40e_hw *hw)
198{
199 /* Check if the queue is initialized */
200 if (!hw->aq.asq.count)
201 return false;
202
203 return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK);
204}
205
206/**
207 * i40e_aq_queue_shutdown
208 * @hw: pointer to the hw struct
209 * @unloading: is the driver unloading itself
210 *
211 * Tell the Firmware that we're shutting down the AdminQ and whether
212 * or not the driver is unloading as well.
213 **/
214int i40e_aq_queue_shutdown(struct i40e_hw *hw,
215 bool unloading)
216{
217 struct i40e_aq_desc desc;
218 struct i40e_aqc_queue_shutdown *cmd =
219 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
220 int status;
221
222 i40e_fill_default_direct_cmd_desc(&desc,
223 i40e_aqc_opc_queue_shutdown);
224
225 if (unloading)
226 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
227 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
228
229 return status;
230}
231
232/**
233 * i40e_aq_get_set_rss_lut
234 * @hw: pointer to the hardware structure
235 * @vsi_id: vsi fw index
236 * @pf_lut: for PF table set true, for VSI table set false
237 * @lut: pointer to the lut buffer provided by the caller
238 * @lut_size: size of the lut buffer
239 * @set: set true to set the table, false to get the table
240 *
241 * Internal function to get or set RSS look up table
242 **/
243static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
244 u16 vsi_id, bool pf_lut,
245 u8 *lut, u16 lut_size,
246 bool set)
247{
248 struct i40e_aq_desc desc;
249 struct i40e_aqc_get_set_rss_lut *cmd_resp =
250 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
251 int status;
252 u16 flags;
253
254 if (set)
255 i40e_fill_default_direct_cmd_desc(&desc,
256 i40e_aqc_opc_set_rss_lut);
257 else
258 i40e_fill_default_direct_cmd_desc(&desc,
259 i40e_aqc_opc_get_rss_lut);
260
261 /* Indirect command */
262 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
263 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
264
265 vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
266 FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
267 cmd_resp->vsi_id = cpu_to_le16(vsi_id);
268
269 if (pf_lut)
270 flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
271 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF);
272 else
273 flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
274 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI);
275
276 cmd_resp->flags = cpu_to_le16(flags);
277 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
278
279 return status;
280}
281
282/**
283 * i40e_aq_get_rss_lut
284 * @hw: pointer to the hardware structure
285 * @vsi_id: vsi fw index
286 * @pf_lut: for PF table set true, for VSI table set false
287 * @lut: pointer to the lut buffer provided by the caller
288 * @lut_size: size of the lut buffer
289 *
290 * get the RSS lookup table, PF or VSI type
291 **/
292int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
293 bool pf_lut, u8 *lut, u16 lut_size)
294{
295 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
296 false);
297}
298
299/**
300 * i40e_aq_set_rss_lut
301 * @hw: pointer to the hardware structure
302 * @vsi_id: vsi fw index
303 * @pf_lut: for PF table set true, for VSI table set false
304 * @lut: pointer to the lut buffer provided by the caller
305 * @lut_size: size of the lut buffer
306 *
307 * set the RSS lookup table, PF or VSI type
308 **/
309int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
310 bool pf_lut, u8 *lut, u16 lut_size)
311{
312 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
313}
314
315/**
316 * i40e_aq_get_set_rss_key
317 * @hw: pointer to the hw struct
318 * @vsi_id: vsi fw index
319 * @key: pointer to key info struct
320 * @set: set true to set the key, false to get the key
321 *
322 * get the RSS key per VSI
323 **/
324static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
325 u16 vsi_id,
326 struct i40e_aqc_get_set_rss_key_data *key,
327 bool set)
328{
329 struct i40e_aq_desc desc;
330 struct i40e_aqc_get_set_rss_key *cmd_resp =
331 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
332 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
333 int status;
334
335 if (set)
336 i40e_fill_default_direct_cmd_desc(&desc,
337 i40e_aqc_opc_set_rss_key);
338 else
339 i40e_fill_default_direct_cmd_desc(&desc,
340 i40e_aqc_opc_get_rss_key);
341
342 /* Indirect command */
343 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
344 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
345
346 vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
347 FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
348 cmd_resp->vsi_id = cpu_to_le16(vsi_id);
349
350 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
351
352 return status;
353}
354
355/**
356 * i40e_aq_get_rss_key
357 * @hw: pointer to the hw struct
358 * @vsi_id: vsi fw index
359 * @key: pointer to key info struct
360 *
361 **/
362int i40e_aq_get_rss_key(struct i40e_hw *hw,
363 u16 vsi_id,
364 struct i40e_aqc_get_set_rss_key_data *key)
365{
366 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
367}
368
369/**
370 * i40e_aq_set_rss_key
371 * @hw: pointer to the hw struct
372 * @vsi_id: vsi fw index
373 * @key: pointer to key info struct
374 *
375 * set the RSS key per VSI
376 **/
377int i40e_aq_set_rss_key(struct i40e_hw *hw,
378 u16 vsi_id,
379 struct i40e_aqc_get_set_rss_key_data *key)
380{
381 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
382}
383
384/**
385 * i40e_init_shared_code - Initialize the shared code
386 * @hw: pointer to hardware structure
387 *
388 * This assigns the MAC type and PHY code and inits the NVM.
389 * Does not touch the hardware. This function must be called prior to any
390 * other function in the shared code. The i40e_hw structure should be
391 * memset to 0 prior to calling this function. The following fields in
392 * hw structure should be filled in prior to calling this function:
393 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
394 * subsystem_vendor_id, and revision_id
395 **/
396int i40e_init_shared_code(struct i40e_hw *hw)
397{
398 u32 port, ari, func_rid;
399 int status = 0;
400
401 i40e_set_mac_type(hw);
402
403 switch (hw->mac.type) {
404 case I40E_MAC_XL710:
405 case I40E_MAC_X722:
406 break;
407 default:
408 return -ENODEV;
409 }
410
411 hw->phy.get_link_info = true;
412
413 /* Determine port number and PF number*/
414 port = FIELD_GET(I40E_PFGEN_PORTNUM_PORT_NUM_MASK,
415 rd32(hw, I40E_PFGEN_PORTNUM));
416 hw->port = (u8)port;
417 ari = FIELD_GET(I40E_GLPCI_CAPSUP_ARI_EN_MASK,
418 rd32(hw, I40E_GLPCI_CAPSUP));
419 func_rid = rd32(hw, I40E_PF_FUNC_RID);
420 if (ari)
421 hw->pf_id = (u8)(func_rid & 0xff);
422 else
423 hw->pf_id = (u8)(func_rid & 0x7);
424
425 status = i40e_init_nvm(hw);
426 return status;
427}
428
429/**
430 * i40e_aq_mac_address_read - Retrieve the MAC addresses
431 * @hw: pointer to the hw struct
432 * @flags: a return indicator of what addresses were added to the addr store
433 * @addrs: the requestor's mac addr store
434 * @cmd_details: pointer to command details structure or NULL
435 **/
436static int
437i40e_aq_mac_address_read(struct i40e_hw *hw,
438 u16 *flags,
439 struct i40e_aqc_mac_address_read_data *addrs,
440 struct i40e_asq_cmd_details *cmd_details)
441{
442 struct i40e_aq_desc desc;
443 struct i40e_aqc_mac_address_read *cmd_data =
444 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
445 int status;
446
447 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
448 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
449
450 status = i40e_asq_send_command(hw, &desc, addrs,
451 sizeof(*addrs), cmd_details);
452 *flags = le16_to_cpu(cmd_data->command_flags);
453
454 return status;
455}
456
457/**
458 * i40e_aq_mac_address_write - Change the MAC addresses
459 * @hw: pointer to the hw struct
460 * @flags: indicates which MAC to be written
461 * @mac_addr: address to write
462 * @cmd_details: pointer to command details structure or NULL
463 **/
464int i40e_aq_mac_address_write(struct i40e_hw *hw,
465 u16 flags, u8 *mac_addr,
466 struct i40e_asq_cmd_details *cmd_details)
467{
468 struct i40e_aq_desc desc;
469 struct i40e_aqc_mac_address_write *cmd_data =
470 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
471 int status;
472
473 i40e_fill_default_direct_cmd_desc(&desc,
474 i40e_aqc_opc_mac_address_write);
475 cmd_data->command_flags = cpu_to_le16(flags);
476 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
477 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
478 ((u32)mac_addr[3] << 16) |
479 ((u32)mac_addr[4] << 8) |
480 mac_addr[5]);
481
482 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
483
484 return status;
485}
486
487/**
488 * i40e_get_mac_addr - get MAC address
489 * @hw: pointer to the HW structure
490 * @mac_addr: pointer to MAC address
491 *
492 * Reads the adapter's MAC address from register
493 **/
494int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
495{
496 struct i40e_aqc_mac_address_read_data addrs;
497 u16 flags = 0;
498 int status;
499
500 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
501
502 if (flags & I40E_AQC_LAN_ADDR_VALID)
503 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
504
505 return status;
506}
507
508/**
509 * i40e_get_port_mac_addr - get Port MAC address
510 * @hw: pointer to the HW structure
511 * @mac_addr: pointer to Port MAC address
512 *
513 * Reads the adapter's Port MAC address
514 **/
515int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
516{
517 struct i40e_aqc_mac_address_read_data addrs;
518 u16 flags = 0;
519 int status;
520
521 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
522 if (status)
523 return status;
524
525 if (flags & I40E_AQC_PORT_ADDR_VALID)
526 ether_addr_copy(mac_addr, addrs.port_mac);
527 else
528 status = -EINVAL;
529
530 return status;
531}
532
533/**
534 * i40e_pre_tx_queue_cfg - pre tx queue configure
535 * @hw: pointer to the HW structure
536 * @queue: target PF queue index
537 * @enable: state change request
538 *
539 * Handles hw requirement to indicate intention to enable
540 * or disable target queue.
541 **/
542void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
543{
544 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
545 u32 reg_block = 0;
546 u32 reg_val;
547
548 if (abs_queue_idx >= 128) {
549 reg_block = abs_queue_idx / 128;
550 abs_queue_idx %= 128;
551 }
552
553 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
554 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
555 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
556
557 if (enable)
558 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
559 else
560 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
561
562 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
563}
564
565/**
566 * i40e_get_pba_string - Reads part number string from EEPROM
567 * @hw: pointer to hardware structure
568 *
569 * Reads the part number string from the EEPROM and stores it
570 * into newly allocated buffer and saves resulting pointer
571 * to i40e_hw->pba_id field.
572 **/
573void i40e_get_pba_string(struct i40e_hw *hw)
574{
575#define I40E_NVM_PBA_FLAGS_BLK_PRESENT 0xFAFA
576 u16 pba_word = 0;
577 u16 pba_size = 0;
578 u16 pba_ptr = 0;
579 int status;
580 char *ptr;
581 u16 i;
582
583 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
584 if (status) {
585 hw_dbg(hw, "Failed to read PBA flags.\n");
586 return;
587 }
588 if (pba_word != I40E_NVM_PBA_FLAGS_BLK_PRESENT) {
589 hw_dbg(hw, "PBA block is not present.\n");
590 return;
591 }
592
593 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
594 if (status) {
595 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
596 return;
597 }
598
599 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
600 if (status) {
601 hw_dbg(hw, "Failed to read PBA Block size.\n");
602 return;
603 }
604
605 /* Subtract one to get PBA word count (PBA Size word is included in
606 * total size) and advance pointer to first PBA word.
607 */
608 pba_size--;
609 pba_ptr++;
610 if (!pba_size) {
611 hw_dbg(hw, "PBA ID is empty.\n");
612 return;
613 }
614
615 ptr = devm_kzalloc(i40e_hw_to_dev(hw), pba_size * 2 + 1, GFP_KERNEL);
616 if (!ptr)
617 return;
618 hw->pba_id = ptr;
619
620 for (i = 0; i < pba_size; i++) {
621 status = i40e_read_nvm_word(hw, pba_ptr + i, &pba_word);
622 if (status) {
623 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
624 devm_kfree(i40e_hw_to_dev(hw), hw->pba_id);
625 hw->pba_id = NULL;
626 return;
627 }
628
629 *ptr++ = (pba_word >> 8) & 0xFF;
630 *ptr++ = pba_word & 0xFF;
631 }
632}
633
634/**
635 * i40e_get_media_type - Gets media type
636 * @hw: pointer to the hardware structure
637 **/
638static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
639{
640 enum i40e_media_type media;
641
642 switch (hw->phy.link_info.phy_type) {
643 case I40E_PHY_TYPE_10GBASE_SR:
644 case I40E_PHY_TYPE_10GBASE_LR:
645 case I40E_PHY_TYPE_1000BASE_SX:
646 case I40E_PHY_TYPE_1000BASE_LX:
647 case I40E_PHY_TYPE_40GBASE_SR4:
648 case I40E_PHY_TYPE_40GBASE_LR4:
649 case I40E_PHY_TYPE_25GBASE_LR:
650 case I40E_PHY_TYPE_25GBASE_SR:
651 media = I40E_MEDIA_TYPE_FIBER;
652 break;
653 case I40E_PHY_TYPE_100BASE_TX:
654 case I40E_PHY_TYPE_1000BASE_T:
655 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
656 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
657 case I40E_PHY_TYPE_10GBASE_T:
658 media = I40E_MEDIA_TYPE_BASET;
659 break;
660 case I40E_PHY_TYPE_10GBASE_CR1_CU:
661 case I40E_PHY_TYPE_40GBASE_CR4_CU:
662 case I40E_PHY_TYPE_10GBASE_CR1:
663 case I40E_PHY_TYPE_40GBASE_CR4:
664 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
665 case I40E_PHY_TYPE_40GBASE_AOC:
666 case I40E_PHY_TYPE_10GBASE_AOC:
667 case I40E_PHY_TYPE_25GBASE_CR:
668 case I40E_PHY_TYPE_25GBASE_AOC:
669 case I40E_PHY_TYPE_25GBASE_ACC:
670 media = I40E_MEDIA_TYPE_DA;
671 break;
672 case I40E_PHY_TYPE_1000BASE_KX:
673 case I40E_PHY_TYPE_10GBASE_KX4:
674 case I40E_PHY_TYPE_10GBASE_KR:
675 case I40E_PHY_TYPE_40GBASE_KR4:
676 case I40E_PHY_TYPE_20GBASE_KR2:
677 case I40E_PHY_TYPE_25GBASE_KR:
678 media = I40E_MEDIA_TYPE_BACKPLANE;
679 break;
680 case I40E_PHY_TYPE_SGMII:
681 case I40E_PHY_TYPE_XAUI:
682 case I40E_PHY_TYPE_XFI:
683 case I40E_PHY_TYPE_XLAUI:
684 case I40E_PHY_TYPE_XLPPI:
685 default:
686 media = I40E_MEDIA_TYPE_UNKNOWN;
687 break;
688 }
689
690 return media;
691}
692
693/**
694 * i40e_poll_globr - Poll for Global Reset completion
695 * @hw: pointer to the hardware structure
696 * @retry_limit: how many times to retry before failure
697 **/
698static int i40e_poll_globr(struct i40e_hw *hw,
699 u32 retry_limit)
700{
701 u32 cnt, reg = 0;
702
703 for (cnt = 0; cnt < retry_limit; cnt++) {
704 reg = rd32(hw, I40E_GLGEN_RSTAT);
705 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
706 return 0;
707 msleep(100);
708 }
709
710 hw_dbg(hw, "Global reset failed.\n");
711 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
712
713 return -EIO;
714}
715
716#define I40E_PF_RESET_WAIT_COUNT_A0 200
717#define I40E_PF_RESET_WAIT_COUNT 200
718/**
719 * i40e_pf_reset - Reset the PF
720 * @hw: pointer to the hardware structure
721 *
722 * Assuming someone else has triggered a global reset,
723 * assure the global reset is complete and then reset the PF
724 **/
725int i40e_pf_reset(struct i40e_hw *hw)
726{
727 u32 cnt = 0;
728 u32 cnt1 = 0;
729 u32 reg = 0;
730 u32 grst_del;
731
732 /* Poll for Global Reset steady state in case of recent GRST.
733 * The grst delay value is in 100ms units, and we'll wait a
734 * couple counts longer to be sure we don't just miss the end.
735 */
736 grst_del = FIELD_GET(I40E_GLGEN_RSTCTL_GRSTDEL_MASK,
737 rd32(hw, I40E_GLGEN_RSTCTL));
738
739 /* It can take upto 15 secs for GRST steady state.
740 * Bump it to 16 secs max to be safe.
741 */
742 grst_del = grst_del * 20;
743
744 for (cnt = 0; cnt < grst_del; cnt++) {
745 reg = rd32(hw, I40E_GLGEN_RSTAT);
746 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
747 break;
748 msleep(100);
749 }
750 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
751 hw_dbg(hw, "Global reset polling failed to complete.\n");
752 return -EIO;
753 }
754
755 /* Now Wait for the FW to be ready */
756 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
757 reg = rd32(hw, I40E_GLNVM_ULD);
758 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
759 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
760 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
761 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
762 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
763 break;
764 }
765 usleep_range(10000, 20000);
766 }
767 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
768 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
769 hw_dbg(hw, "wait for FW Reset complete timedout\n");
770 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
771 return -EIO;
772 }
773
774 /* If there was a Global Reset in progress when we got here,
775 * we don't need to do the PF Reset
776 */
777 if (!cnt) {
778 u32 reg2 = 0;
779 if (hw->revision_id == 0)
780 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
781 else
782 cnt = I40E_PF_RESET_WAIT_COUNT;
783 reg = rd32(hw, I40E_PFGEN_CTRL);
784 wr32(hw, I40E_PFGEN_CTRL,
785 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
786 for (; cnt; cnt--) {
787 reg = rd32(hw, I40E_PFGEN_CTRL);
788 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
789 break;
790 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
791 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
792 break;
793 usleep_range(1000, 2000);
794 }
795 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
796 if (i40e_poll_globr(hw, grst_del))
797 return -EIO;
798 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
799 hw_dbg(hw, "PF reset polling failed to complete.\n");
800 return -EIO;
801 }
802 }
803
804 i40e_clear_pxe_mode(hw);
805
806 return 0;
807}
808
809/**
810 * i40e_clear_hw - clear out any left over hw state
811 * @hw: pointer to the hw struct
812 *
813 * Clear queues and interrupts, typically called at init time,
814 * but after the capabilities have been found so we know how many
815 * queues and msix vectors have been allocated.
816 **/
817void i40e_clear_hw(struct i40e_hw *hw)
818{
819 u32 num_queues, base_queue;
820 u32 num_pf_int;
821 u32 num_vf_int;
822 u32 num_vfs;
823 u32 i, j;
824 u32 val;
825 u32 eol = 0x7ff;
826
827 /* get number of interrupts, queues, and VFs */
828 val = rd32(hw, I40E_GLPCI_CNF2);
829 num_pf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_PF_N_MASK, val);
830 num_vf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_VF_N_MASK, val);
831
832 val = rd32(hw, I40E_PFLAN_QALLOC);
833 base_queue = FIELD_GET(I40E_PFLAN_QALLOC_FIRSTQ_MASK, val);
834 j = FIELD_GET(I40E_PFLAN_QALLOC_LASTQ_MASK, val);
835 if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
836 num_queues = (j - base_queue) + 1;
837 else
838 num_queues = 0;
839
840 val = rd32(hw, I40E_PF_VT_PFALLOC);
841 i = FIELD_GET(I40E_PF_VT_PFALLOC_FIRSTVF_MASK, val);
842 j = FIELD_GET(I40E_PF_VT_PFALLOC_LASTVF_MASK, val);
843 if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
844 num_vfs = (j - i) + 1;
845 else
846 num_vfs = 0;
847
848 /* stop all the interrupts */
849 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
850 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
851 for (i = 0; i < num_pf_int - 2; i++)
852 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
853
854 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
855 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
856 wr32(hw, I40E_PFINT_LNKLST0, val);
857 for (i = 0; i < num_pf_int - 2; i++)
858 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
859 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
860 for (i = 0; i < num_vfs; i++)
861 wr32(hw, I40E_VPINT_LNKLST0(i), val);
862 for (i = 0; i < num_vf_int - 2; i++)
863 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
864
865 /* warn the HW of the coming Tx disables */
866 for (i = 0; i < num_queues; i++) {
867 u32 abs_queue_idx = base_queue + i;
868 u32 reg_block = 0;
869
870 if (abs_queue_idx >= 128) {
871 reg_block = abs_queue_idx / 128;
872 abs_queue_idx %= 128;
873 }
874
875 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
876 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
877 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
878 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
879
880 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
881 }
882 udelay(400);
883
884 /* stop all the queues */
885 for (i = 0; i < num_queues; i++) {
886 wr32(hw, I40E_QINT_TQCTL(i), 0);
887 wr32(hw, I40E_QTX_ENA(i), 0);
888 wr32(hw, I40E_QINT_RQCTL(i), 0);
889 wr32(hw, I40E_QRX_ENA(i), 0);
890 }
891
892 /* short wait for all queue disables to settle */
893 udelay(50);
894}
895
896/**
897 * i40e_clear_pxe_mode - clear pxe operations mode
898 * @hw: pointer to the hw struct
899 *
900 * Make sure all PXE mode settings are cleared, including things
901 * like descriptor fetch/write-back mode.
902 **/
903void i40e_clear_pxe_mode(struct i40e_hw *hw)
904{
905 u32 reg;
906
907 if (i40e_check_asq_alive(hw))
908 i40e_aq_clear_pxe_mode(hw, NULL);
909
910 /* Clear single descriptor fetch/write-back mode */
911 reg = rd32(hw, I40E_GLLAN_RCTL_0);
912
913 if (hw->revision_id == 0) {
914 /* As a work around clear PXE_MODE instead of setting it */
915 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
916 } else {
917 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
918 }
919}
920
921/**
922 * i40e_led_is_mine - helper to find matching led
923 * @hw: pointer to the hw struct
924 * @idx: index into GPIO registers
925 *
926 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
927 */
928static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
929{
930 u32 gpio_val = 0;
931 u32 port;
932
933 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
934 !hw->func_caps.led[idx])
935 return 0;
936 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
937 port = FIELD_GET(I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK, gpio_val);
938
939 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
940 * if it is not our port then ignore
941 */
942 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
943 (port != hw->port))
944 return 0;
945
946 return gpio_val;
947}
948
949#define I40E_FW_LED BIT(4)
950#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
951 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
952
953#define I40E_LED0 22
954
955#define I40E_PIN_FUNC_SDP 0x0
956#define I40E_PIN_FUNC_LED 0x1
957
958/**
959 * i40e_led_get - return current on/off mode
960 * @hw: pointer to the hw struct
961 *
962 * The value returned is the 'mode' field as defined in the
963 * GPIO register definitions: 0x0 = off, 0xf = on, and other
964 * values are variations of possible behaviors relating to
965 * blink, link, and wire.
966 **/
967u32 i40e_led_get(struct i40e_hw *hw)
968{
969 u32 mode = 0;
970 int i;
971
972 /* as per the documentation GPIO 22-29 are the LED
973 * GPIO pins named LED0..LED7
974 */
975 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
976 u32 gpio_val = i40e_led_is_mine(hw, i);
977
978 if (!gpio_val)
979 continue;
980
981 mode = FIELD_GET(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, gpio_val);
982 break;
983 }
984
985 return mode;
986}
987
988/**
989 * i40e_led_set - set new on/off mode
990 * @hw: pointer to the hw struct
991 * @mode: 0=off, 0xf=on (else see manual for mode details)
992 * @blink: true if the LED should blink when on, false if steady
993 *
994 * if this function is used to turn on the blink it should
995 * be used to disable the blink when restoring the original state.
996 **/
997void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
998{
999 int i;
1000
1001 if (mode & ~I40E_LED_MODE_VALID) {
1002 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1003 return;
1004 }
1005
1006 /* as per the documentation GPIO 22-29 are the LED
1007 * GPIO pins named LED0..LED7
1008 */
1009 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1010 u32 gpio_val = i40e_led_is_mine(hw, i);
1011
1012 if (!gpio_val)
1013 continue;
1014
1015 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1016 u32 pin_func = 0;
1017
1018 if (mode & I40E_FW_LED)
1019 pin_func = I40E_PIN_FUNC_SDP;
1020 else
1021 pin_func = I40E_PIN_FUNC_LED;
1022
1023 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1024 gpio_val |=
1025 FIELD_PREP(I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK,
1026 pin_func);
1027 }
1028 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1029 /* this & is a bit of paranoia, but serves as a range check */
1030 gpio_val |= FIELD_PREP(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK,
1031 mode);
1032
1033 if (blink)
1034 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1035 else
1036 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1037
1038 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1039 break;
1040 }
1041}
1042
1043/* Admin command wrappers */
1044
1045/**
1046 * i40e_aq_get_phy_capabilities
1047 * @hw: pointer to the hw struct
1048 * @abilities: structure for PHY capabilities to be filled
1049 * @qualified_modules: report Qualified Modules
1050 * @report_init: report init capabilities (active are default)
1051 * @cmd_details: pointer to command details structure or NULL
1052 *
1053 * Returns the various PHY abilities supported on the Port.
1054 **/
1055int
1056i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1057 bool qualified_modules, bool report_init,
1058 struct i40e_aq_get_phy_abilities_resp *abilities,
1059 struct i40e_asq_cmd_details *cmd_details)
1060{
1061 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1062 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1063 struct i40e_aq_desc desc;
1064 int status;
1065
1066 if (!abilities)
1067 return -EINVAL;
1068
1069 do {
1070 i40e_fill_default_direct_cmd_desc(&desc,
1071 i40e_aqc_opc_get_phy_abilities);
1072
1073 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1074 if (abilities_size > I40E_AQ_LARGE_BUF)
1075 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1076
1077 if (qualified_modules)
1078 desc.params.external.param0 |=
1079 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1080
1081 if (report_init)
1082 desc.params.external.param0 |=
1083 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1084
1085 status = i40e_asq_send_command(hw, &desc, abilities,
1086 abilities_size, cmd_details);
1087
1088 switch (hw->aq.asq_last_status) {
1089 case I40E_AQ_RC_EIO:
1090 status = -EIO;
1091 break;
1092 case I40E_AQ_RC_EAGAIN:
1093 usleep_range(1000, 2000);
1094 total_delay++;
1095 status = -EIO;
1096 break;
1097 /* also covers I40E_AQ_RC_OK */
1098 default:
1099 break;
1100 }
1101
1102 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1103 (total_delay < max_delay));
1104
1105 if (status)
1106 return status;
1107
1108 if (report_init) {
1109 if (hw->mac.type == I40E_MAC_XL710 &&
1110 i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
1111 I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
1112 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1113 } else {
1114 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1115 hw->phy.phy_types |=
1116 ((u64)abilities->phy_type_ext << 32);
1117 }
1118 }
1119
1120 return status;
1121}
1122
1123/**
1124 * i40e_aq_set_phy_config
1125 * @hw: pointer to the hw struct
1126 * @config: structure with PHY configuration to be set
1127 * @cmd_details: pointer to command details structure or NULL
1128 *
1129 * Set the various PHY configuration parameters
1130 * supported on the Port.One or more of the Set PHY config parameters may be
1131 * ignored in an MFP mode as the PF may not have the privilege to set some
1132 * of the PHY Config parameters. This status will be indicated by the
1133 * command response.
1134 **/
1135int i40e_aq_set_phy_config(struct i40e_hw *hw,
1136 struct i40e_aq_set_phy_config *config,
1137 struct i40e_asq_cmd_details *cmd_details)
1138{
1139 struct i40e_aq_desc desc;
1140 struct i40e_aq_set_phy_config *cmd =
1141 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1142 int status;
1143
1144 if (!config)
1145 return -EINVAL;
1146
1147 i40e_fill_default_direct_cmd_desc(&desc,
1148 i40e_aqc_opc_set_phy_config);
1149
1150 *cmd = *config;
1151
1152 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1153
1154 return status;
1155}
1156
1157static noinline_for_stack int
1158i40e_set_fc_status(struct i40e_hw *hw,
1159 struct i40e_aq_get_phy_abilities_resp *abilities,
1160 bool atomic_restart)
1161{
1162 struct i40e_aq_set_phy_config config;
1163 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1164 u8 pause_mask = 0x0;
1165
1166 switch (fc_mode) {
1167 case I40E_FC_FULL:
1168 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1169 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1170 break;
1171 case I40E_FC_RX_PAUSE:
1172 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1173 break;
1174 case I40E_FC_TX_PAUSE:
1175 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1176 break;
1177 default:
1178 break;
1179 }
1180
1181 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1182 /* clear the old pause settings */
1183 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1184 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1185 /* set the new abilities */
1186 config.abilities |= pause_mask;
1187 /* If the abilities have changed, then set the new config */
1188 if (config.abilities == abilities->abilities)
1189 return 0;
1190
1191 /* Auto restart link so settings take effect */
1192 if (atomic_restart)
1193 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1194 /* Copy over all the old settings */
1195 config.phy_type = abilities->phy_type;
1196 config.phy_type_ext = abilities->phy_type_ext;
1197 config.link_speed = abilities->link_speed;
1198 config.eee_capability = abilities->eee_capability;
1199 config.eeer = abilities->eeer_val;
1200 config.low_power_ctrl = abilities->d3_lpan;
1201 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1202 I40E_AQ_PHY_FEC_CONFIG_MASK;
1203
1204 return i40e_aq_set_phy_config(hw, &config, NULL);
1205}
1206
1207/**
1208 * i40e_set_fc
1209 * @hw: pointer to the hw struct
1210 * @aq_failures: buffer to return AdminQ failure information
1211 * @atomic_restart: whether to enable atomic link restart
1212 *
1213 * Set the requested flow control mode using set_phy_config.
1214 **/
1215int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1216 bool atomic_restart)
1217{
1218 struct i40e_aq_get_phy_abilities_resp abilities;
1219 int status;
1220
1221 *aq_failures = 0x0;
1222
1223 /* Get the current phy config */
1224 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1225 NULL);
1226 if (status) {
1227 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1228 return status;
1229 }
1230
1231 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1232 if (status)
1233 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1234
1235 /* Update the link info */
1236 status = i40e_update_link_info(hw);
1237 if (status) {
1238 /* Wait a little bit (on 40G cards it sometimes takes a really
1239 * long time for link to come back from the atomic reset)
1240 * and try once more
1241 */
1242 msleep(1000);
1243 status = i40e_update_link_info(hw);
1244 }
1245 if (status)
1246 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1247
1248 return status;
1249}
1250
1251/**
1252 * i40e_aq_clear_pxe_mode
1253 * @hw: pointer to the hw struct
1254 * @cmd_details: pointer to command details structure or NULL
1255 *
1256 * Tell the firmware that the driver is taking over from PXE
1257 **/
1258int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1259 struct i40e_asq_cmd_details *cmd_details)
1260{
1261 struct i40e_aq_desc desc;
1262 struct i40e_aqc_clear_pxe *cmd =
1263 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1264 int status;
1265
1266 i40e_fill_default_direct_cmd_desc(&desc,
1267 i40e_aqc_opc_clear_pxe_mode);
1268
1269 cmd->rx_cnt = 0x2;
1270
1271 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1272
1273 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1274
1275 return status;
1276}
1277
1278/**
1279 * i40e_aq_set_link_restart_an
1280 * @hw: pointer to the hw struct
1281 * @enable_link: if true: enable link, if false: disable link
1282 * @cmd_details: pointer to command details structure or NULL
1283 *
1284 * Sets up the link and restarts the Auto-Negotiation over the link.
1285 **/
1286int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1287 bool enable_link,
1288 struct i40e_asq_cmd_details *cmd_details)
1289{
1290 struct i40e_aq_desc desc;
1291 struct i40e_aqc_set_link_restart_an *cmd =
1292 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1293 int status;
1294
1295 i40e_fill_default_direct_cmd_desc(&desc,
1296 i40e_aqc_opc_set_link_restart_an);
1297
1298 cmd->command = I40E_AQ_PHY_RESTART_AN;
1299 if (enable_link)
1300 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1301 else
1302 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1303
1304 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1305
1306 return status;
1307}
1308
1309/**
1310 * i40e_aq_get_link_info
1311 * @hw: pointer to the hw struct
1312 * @enable_lse: enable/disable LinkStatusEvent reporting
1313 * @link: pointer to link status structure - optional
1314 * @cmd_details: pointer to command details structure or NULL
1315 *
1316 * Returns the link status of the adapter.
1317 **/
1318int i40e_aq_get_link_info(struct i40e_hw *hw,
1319 bool enable_lse, struct i40e_link_status *link,
1320 struct i40e_asq_cmd_details *cmd_details)
1321{
1322 struct i40e_aq_desc desc;
1323 struct i40e_aqc_get_link_status *resp =
1324 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1325 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1326 bool tx_pause, rx_pause;
1327 u16 command_flags;
1328 int status;
1329
1330 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1331
1332 if (enable_lse)
1333 command_flags = I40E_AQ_LSE_ENABLE;
1334 else
1335 command_flags = I40E_AQ_LSE_DISABLE;
1336 resp->command_flags = cpu_to_le16(command_flags);
1337
1338 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1339
1340 if (status)
1341 goto aq_get_link_info_exit;
1342
1343 /* save off old link status information */
1344 hw->phy.link_info_old = *hw_link_info;
1345
1346 /* update link status */
1347 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1348 hw->phy.media_type = i40e_get_media_type(hw);
1349 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1350 hw_link_info->link_info = resp->link_info;
1351 hw_link_info->an_info = resp->an_info;
1352 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1353 I40E_AQ_CONFIG_FEC_RS_ENA);
1354 hw_link_info->ext_info = resp->ext_info;
1355 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1356 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1357 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1358
1359 /* update fc info */
1360 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1361 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1362 if (tx_pause & rx_pause)
1363 hw->fc.current_mode = I40E_FC_FULL;
1364 else if (tx_pause)
1365 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1366 else if (rx_pause)
1367 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1368 else
1369 hw->fc.current_mode = I40E_FC_NONE;
1370
1371 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1372 hw_link_info->crc_enable = true;
1373 else
1374 hw_link_info->crc_enable = false;
1375
1376 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1377 hw_link_info->lse_enable = true;
1378 else
1379 hw_link_info->lse_enable = false;
1380
1381 if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) &&
1382 hw_link_info->phy_type == 0xE)
1383 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1384
1385 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) &&
1386 hw->mac.type != I40E_MAC_X722) {
1387 __le32 tmp;
1388
1389 memcpy(&tmp, resp->link_type, sizeof(tmp));
1390 hw->phy.phy_types = le32_to_cpu(tmp);
1391 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1392 }
1393
1394 /* save link status information */
1395 if (link)
1396 *link = *hw_link_info;
1397
1398 /* flag cleared so helper functions don't call AQ again */
1399 hw->phy.get_link_info = false;
1400
1401aq_get_link_info_exit:
1402 return status;
1403}
1404
1405/**
1406 * i40e_aq_set_phy_int_mask
1407 * @hw: pointer to the hw struct
1408 * @mask: interrupt mask to be set
1409 * @cmd_details: pointer to command details structure or NULL
1410 *
1411 * Set link interrupt mask.
1412 **/
1413int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1414 u16 mask,
1415 struct i40e_asq_cmd_details *cmd_details)
1416{
1417 struct i40e_aq_desc desc;
1418 struct i40e_aqc_set_phy_int_mask *cmd =
1419 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1420 int status;
1421
1422 i40e_fill_default_direct_cmd_desc(&desc,
1423 i40e_aqc_opc_set_phy_int_mask);
1424
1425 cmd->event_mask = cpu_to_le16(mask);
1426
1427 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1428
1429 return status;
1430}
1431
1432/**
1433 * i40e_aq_set_mac_loopback
1434 * @hw: pointer to the HW struct
1435 * @ena_lpbk: Enable or Disable loopback
1436 * @cmd_details: pointer to command details structure or NULL
1437 *
1438 * Enable/disable loopback on a given port
1439 */
1440int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
1441 struct i40e_asq_cmd_details *cmd_details)
1442{
1443 struct i40e_aq_desc desc;
1444 struct i40e_aqc_set_lb_mode *cmd =
1445 (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
1446
1447 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
1448 if (ena_lpbk) {
1449 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
1450 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
1451 else
1452 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL);
1453 }
1454
1455 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1456}
1457
1458/**
1459 * i40e_aq_set_phy_debug
1460 * @hw: pointer to the hw struct
1461 * @cmd_flags: debug command flags
1462 * @cmd_details: pointer to command details structure or NULL
1463 *
1464 * Reset the external PHY.
1465 **/
1466int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1467 struct i40e_asq_cmd_details *cmd_details)
1468{
1469 struct i40e_aq_desc desc;
1470 struct i40e_aqc_set_phy_debug *cmd =
1471 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1472 int status;
1473
1474 i40e_fill_default_direct_cmd_desc(&desc,
1475 i40e_aqc_opc_set_phy_debug);
1476
1477 cmd->command_flags = cmd_flags;
1478
1479 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1480
1481 return status;
1482}
1483
1484/**
1485 * i40e_aq_add_vsi
1486 * @hw: pointer to the hw struct
1487 * @vsi_ctx: pointer to a vsi context struct
1488 * @cmd_details: pointer to command details structure or NULL
1489 *
1490 * Add a VSI context to the hardware.
1491**/
1492int i40e_aq_add_vsi(struct i40e_hw *hw,
1493 struct i40e_vsi_context *vsi_ctx,
1494 struct i40e_asq_cmd_details *cmd_details)
1495{
1496 struct i40e_aq_desc desc;
1497 struct i40e_aqc_add_get_update_vsi *cmd =
1498 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1499 struct i40e_aqc_add_get_update_vsi_completion *resp =
1500 (struct i40e_aqc_add_get_update_vsi_completion *)
1501 &desc.params.raw;
1502 int status;
1503
1504 i40e_fill_default_direct_cmd_desc(&desc,
1505 i40e_aqc_opc_add_vsi);
1506
1507 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1508 cmd->connection_type = vsi_ctx->connection_type;
1509 cmd->vf_id = vsi_ctx->vf_num;
1510 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1511
1512 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1513
1514 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1515 sizeof(vsi_ctx->info),
1516 cmd_details, true);
1517
1518 if (status)
1519 goto aq_add_vsi_exit;
1520
1521 vsi_ctx->seid = le16_to_cpu(resp->seid);
1522 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1523 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1524 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1525
1526aq_add_vsi_exit:
1527 return status;
1528}
1529
1530/**
1531 * i40e_aq_set_default_vsi
1532 * @hw: pointer to the hw struct
1533 * @seid: vsi number
1534 * @cmd_details: pointer to command details structure or NULL
1535 **/
1536int i40e_aq_set_default_vsi(struct i40e_hw *hw,
1537 u16 seid,
1538 struct i40e_asq_cmd_details *cmd_details)
1539{
1540 struct i40e_aq_desc desc;
1541 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1542 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1543 &desc.params.raw;
1544 int status;
1545
1546 i40e_fill_default_direct_cmd_desc(&desc,
1547 i40e_aqc_opc_set_vsi_promiscuous_modes);
1548
1549 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1550 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1551 cmd->seid = cpu_to_le16(seid);
1552
1553 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1554
1555 return status;
1556}
1557
1558/**
1559 * i40e_aq_clear_default_vsi
1560 * @hw: pointer to the hw struct
1561 * @seid: vsi number
1562 * @cmd_details: pointer to command details structure or NULL
1563 **/
1564int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1565 u16 seid,
1566 struct i40e_asq_cmd_details *cmd_details)
1567{
1568 struct i40e_aq_desc desc;
1569 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1570 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1571 &desc.params.raw;
1572 int status;
1573
1574 i40e_fill_default_direct_cmd_desc(&desc,
1575 i40e_aqc_opc_set_vsi_promiscuous_modes);
1576
1577 cmd->promiscuous_flags = cpu_to_le16(0);
1578 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1579 cmd->seid = cpu_to_le16(seid);
1580
1581 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1582
1583 return status;
1584}
1585
1586/**
1587 * i40e_aq_set_vsi_unicast_promiscuous
1588 * @hw: pointer to the hw struct
1589 * @seid: vsi number
1590 * @set: set unicast promiscuous enable/disable
1591 * @cmd_details: pointer to command details structure or NULL
1592 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
1593 **/
1594int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1595 u16 seid, bool set,
1596 struct i40e_asq_cmd_details *cmd_details,
1597 bool rx_only_promisc)
1598{
1599 struct i40e_aq_desc desc;
1600 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1601 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1602 u16 flags = 0;
1603 int status;
1604
1605 i40e_fill_default_direct_cmd_desc(&desc,
1606 i40e_aqc_opc_set_vsi_promiscuous_modes);
1607
1608 if (set) {
1609 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1610 if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
1611 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1612 }
1613
1614 cmd->promiscuous_flags = cpu_to_le16(flags);
1615
1616 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1617 if (i40e_is_aq_api_ver_ge(hw, 1, 5))
1618 cmd->valid_flags |=
1619 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1620
1621 cmd->seid = cpu_to_le16(seid);
1622 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1623
1624 return status;
1625}
1626
1627/**
1628 * i40e_aq_set_vsi_multicast_promiscuous
1629 * @hw: pointer to the hw struct
1630 * @seid: vsi number
1631 * @set: set multicast promiscuous enable/disable
1632 * @cmd_details: pointer to command details structure or NULL
1633 **/
1634int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
1635 u16 seid, bool set,
1636 struct i40e_asq_cmd_details *cmd_details)
1637{
1638 struct i40e_aq_desc desc;
1639 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1640 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1641 u16 flags = 0;
1642 int status;
1643
1644 i40e_fill_default_direct_cmd_desc(&desc,
1645 i40e_aqc_opc_set_vsi_promiscuous_modes);
1646
1647 if (set)
1648 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1649
1650 cmd->promiscuous_flags = cpu_to_le16(flags);
1651
1652 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1653
1654 cmd->seid = cpu_to_le16(seid);
1655 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1656
1657 return status;
1658}
1659
1660/**
1661 * i40e_aq_set_vsi_mc_promisc_on_vlan
1662 * @hw: pointer to the hw struct
1663 * @seid: vsi number
1664 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1665 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
1666 * @cmd_details: pointer to command details structure or NULL
1667 **/
1668int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
1669 u16 seid, bool enable,
1670 u16 vid,
1671 struct i40e_asq_cmd_details *cmd_details)
1672{
1673 struct i40e_aq_desc desc;
1674 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1675 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1676 u16 flags = 0;
1677 int status;
1678
1679 i40e_fill_default_direct_cmd_desc(&desc,
1680 i40e_aqc_opc_set_vsi_promiscuous_modes);
1681
1682 if (enable)
1683 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1684
1685 cmd->promiscuous_flags = cpu_to_le16(flags);
1686 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1687 cmd->seid = cpu_to_le16(seid);
1688 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1689
1690 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
1691 cmd_details, true);
1692
1693 return status;
1694}
1695
1696/**
1697 * i40e_aq_set_vsi_uc_promisc_on_vlan
1698 * @hw: pointer to the hw struct
1699 * @seid: vsi number
1700 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1701 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
1702 * @cmd_details: pointer to command details structure or NULL
1703 **/
1704int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
1705 u16 seid, bool enable,
1706 u16 vid,
1707 struct i40e_asq_cmd_details *cmd_details)
1708{
1709 struct i40e_aq_desc desc;
1710 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1711 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1712 u16 flags = 0;
1713 int status;
1714
1715 i40e_fill_default_direct_cmd_desc(&desc,
1716 i40e_aqc_opc_set_vsi_promiscuous_modes);
1717
1718 if (enable) {
1719 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1720 if (i40e_is_aq_api_ver_ge(hw, 1, 5))
1721 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1722 }
1723
1724 cmd->promiscuous_flags = cpu_to_le16(flags);
1725 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1726 if (i40e_is_aq_api_ver_ge(hw, 1, 5))
1727 cmd->valid_flags |=
1728 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1729 cmd->seid = cpu_to_le16(seid);
1730 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1731
1732 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
1733 cmd_details, true);
1734
1735 return status;
1736}
1737
1738/**
1739 * i40e_aq_set_vsi_bc_promisc_on_vlan
1740 * @hw: pointer to the hw struct
1741 * @seid: vsi number
1742 * @enable: set broadcast promiscuous enable/disable for a given VLAN
1743 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
1744 * @cmd_details: pointer to command details structure or NULL
1745 **/
1746int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
1747 u16 seid, bool enable, u16 vid,
1748 struct i40e_asq_cmd_details *cmd_details)
1749{
1750 struct i40e_aq_desc desc;
1751 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1752 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1753 u16 flags = 0;
1754 int status;
1755
1756 i40e_fill_default_direct_cmd_desc(&desc,
1757 i40e_aqc_opc_set_vsi_promiscuous_modes);
1758
1759 if (enable)
1760 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
1761
1762 cmd->promiscuous_flags = cpu_to_le16(flags);
1763 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
1764 cmd->seid = cpu_to_le16(seid);
1765 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1766
1767 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1768
1769 return status;
1770}
1771
1772/**
1773 * i40e_aq_set_vsi_broadcast
1774 * @hw: pointer to the hw struct
1775 * @seid: vsi number
1776 * @set_filter: true to set filter, false to clear filter
1777 * @cmd_details: pointer to command details structure or NULL
1778 *
1779 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
1780 **/
1781int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
1782 u16 seid, bool set_filter,
1783 struct i40e_asq_cmd_details *cmd_details)
1784{
1785 struct i40e_aq_desc desc;
1786 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1787 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1788 int status;
1789
1790 i40e_fill_default_direct_cmd_desc(&desc,
1791 i40e_aqc_opc_set_vsi_promiscuous_modes);
1792
1793 if (set_filter)
1794 cmd->promiscuous_flags
1795 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
1796 else
1797 cmd->promiscuous_flags
1798 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
1799
1800 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
1801 cmd->seid = cpu_to_le16(seid);
1802 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1803
1804 return status;
1805}
1806
1807/**
1808 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
1809 * @hw: pointer to the hw struct
1810 * @seid: vsi number
1811 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1812 * @cmd_details: pointer to command details structure or NULL
1813 **/
1814int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
1815 u16 seid, bool enable,
1816 struct i40e_asq_cmd_details *cmd_details)
1817{
1818 struct i40e_aq_desc desc;
1819 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1820 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1821 u16 flags = 0;
1822 int status;
1823
1824 i40e_fill_default_direct_cmd_desc(&desc,
1825 i40e_aqc_opc_set_vsi_promiscuous_modes);
1826 if (enable)
1827 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
1828
1829 cmd->promiscuous_flags = cpu_to_le16(flags);
1830 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
1831 cmd->seid = cpu_to_le16(seid);
1832
1833 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1834
1835 return status;
1836}
1837
1838/**
1839 * i40e_aq_get_vsi_params - get VSI configuration info
1840 * @hw: pointer to the hw struct
1841 * @vsi_ctx: pointer to a vsi context struct
1842 * @cmd_details: pointer to command details structure or NULL
1843 **/
1844int i40e_aq_get_vsi_params(struct i40e_hw *hw,
1845 struct i40e_vsi_context *vsi_ctx,
1846 struct i40e_asq_cmd_details *cmd_details)
1847{
1848 struct i40e_aq_desc desc;
1849 struct i40e_aqc_add_get_update_vsi *cmd =
1850 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1851 struct i40e_aqc_add_get_update_vsi_completion *resp =
1852 (struct i40e_aqc_add_get_update_vsi_completion *)
1853 &desc.params.raw;
1854 int status;
1855
1856 i40e_fill_default_direct_cmd_desc(&desc,
1857 i40e_aqc_opc_get_vsi_parameters);
1858
1859 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
1860
1861 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1862
1863 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1864 sizeof(vsi_ctx->info), NULL);
1865
1866 if (status)
1867 goto aq_get_vsi_params_exit;
1868
1869 vsi_ctx->seid = le16_to_cpu(resp->seid);
1870 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1871 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1872 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1873
1874aq_get_vsi_params_exit:
1875 return status;
1876}
1877
1878/**
1879 * i40e_aq_update_vsi_params
1880 * @hw: pointer to the hw struct
1881 * @vsi_ctx: pointer to a vsi context struct
1882 * @cmd_details: pointer to command details structure or NULL
1883 *
1884 * Update a VSI context.
1885 **/
1886int i40e_aq_update_vsi_params(struct i40e_hw *hw,
1887 struct i40e_vsi_context *vsi_ctx,
1888 struct i40e_asq_cmd_details *cmd_details)
1889{
1890 struct i40e_aq_desc desc;
1891 struct i40e_aqc_add_get_update_vsi *cmd =
1892 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1893 struct i40e_aqc_add_get_update_vsi_completion *resp =
1894 (struct i40e_aqc_add_get_update_vsi_completion *)
1895 &desc.params.raw;
1896 int status;
1897
1898 i40e_fill_default_direct_cmd_desc(&desc,
1899 i40e_aqc_opc_update_vsi_parameters);
1900 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
1901
1902 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1903
1904 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1905 sizeof(vsi_ctx->info),
1906 cmd_details, true);
1907
1908 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1909 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1910
1911 return status;
1912}
1913
1914/**
1915 * i40e_aq_get_switch_config
1916 * @hw: pointer to the hardware structure
1917 * @buf: pointer to the result buffer
1918 * @buf_size: length of input buffer
1919 * @start_seid: seid to start for the report, 0 == beginning
1920 * @cmd_details: pointer to command details structure or NULL
1921 *
1922 * Fill the buf with switch configuration returned from AdminQ command
1923 **/
1924int i40e_aq_get_switch_config(struct i40e_hw *hw,
1925 struct i40e_aqc_get_switch_config_resp *buf,
1926 u16 buf_size, u16 *start_seid,
1927 struct i40e_asq_cmd_details *cmd_details)
1928{
1929 struct i40e_aq_desc desc;
1930 struct i40e_aqc_switch_seid *scfg =
1931 (struct i40e_aqc_switch_seid *)&desc.params.raw;
1932 int status;
1933
1934 i40e_fill_default_direct_cmd_desc(&desc,
1935 i40e_aqc_opc_get_switch_config);
1936 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1937 if (buf_size > I40E_AQ_LARGE_BUF)
1938 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1939 scfg->seid = cpu_to_le16(*start_seid);
1940
1941 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
1942 *start_seid = le16_to_cpu(scfg->seid);
1943
1944 return status;
1945}
1946
1947/**
1948 * i40e_aq_set_switch_config
1949 * @hw: pointer to the hardware structure
1950 * @flags: bit flag values to set
1951 * @mode: cloud filter mode
1952 * @valid_flags: which bit flags to set
1953 * @mode: cloud filter mode
1954 * @cmd_details: pointer to command details structure or NULL
1955 *
1956 * Set switch configuration bits
1957 **/
1958int i40e_aq_set_switch_config(struct i40e_hw *hw,
1959 u16 flags,
1960 u16 valid_flags, u8 mode,
1961 struct i40e_asq_cmd_details *cmd_details)
1962{
1963 struct i40e_aq_desc desc;
1964 struct i40e_aqc_set_switch_config *scfg =
1965 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
1966 int status;
1967
1968 i40e_fill_default_direct_cmd_desc(&desc,
1969 i40e_aqc_opc_set_switch_config);
1970 scfg->flags = cpu_to_le16(flags);
1971 scfg->valid_flags = cpu_to_le16(valid_flags);
1972 scfg->mode = mode;
1973 if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) {
1974 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
1975 scfg->first_tag = cpu_to_le16(hw->first_tag);
1976 scfg->second_tag = cpu_to_le16(hw->second_tag);
1977 }
1978 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1979
1980 return status;
1981}
1982
1983/**
1984 * i40e_aq_get_firmware_version
1985 * @hw: pointer to the hw struct
1986 * @fw_major_version: firmware major version
1987 * @fw_minor_version: firmware minor version
1988 * @fw_build: firmware build number
1989 * @api_major_version: major queue version
1990 * @api_minor_version: minor queue version
1991 * @cmd_details: pointer to command details structure or NULL
1992 *
1993 * Get the firmware version from the admin queue commands
1994 **/
1995int i40e_aq_get_firmware_version(struct i40e_hw *hw,
1996 u16 *fw_major_version, u16 *fw_minor_version,
1997 u32 *fw_build,
1998 u16 *api_major_version, u16 *api_minor_version,
1999 struct i40e_asq_cmd_details *cmd_details)
2000{
2001 struct i40e_aq_desc desc;
2002 struct i40e_aqc_get_version *resp =
2003 (struct i40e_aqc_get_version *)&desc.params.raw;
2004 int status;
2005
2006 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2007
2008 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2009
2010 if (!status) {
2011 if (fw_major_version)
2012 *fw_major_version = le16_to_cpu(resp->fw_major);
2013 if (fw_minor_version)
2014 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2015 if (fw_build)
2016 *fw_build = le32_to_cpu(resp->fw_build);
2017 if (api_major_version)
2018 *api_major_version = le16_to_cpu(resp->api_major);
2019 if (api_minor_version)
2020 *api_minor_version = le16_to_cpu(resp->api_minor);
2021 }
2022
2023 return status;
2024}
2025
2026/**
2027 * i40e_aq_send_driver_version
2028 * @hw: pointer to the hw struct
2029 * @dv: driver's major, minor version
2030 * @cmd_details: pointer to command details structure or NULL
2031 *
2032 * Send the driver version to the firmware
2033 **/
2034int i40e_aq_send_driver_version(struct i40e_hw *hw,
2035 struct i40e_driver_version *dv,
2036 struct i40e_asq_cmd_details *cmd_details)
2037{
2038 struct i40e_aq_desc desc;
2039 struct i40e_aqc_driver_version *cmd =
2040 (struct i40e_aqc_driver_version *)&desc.params.raw;
2041 int status;
2042 u16 len;
2043
2044 if (dv == NULL)
2045 return -EINVAL;
2046
2047 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2048
2049 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2050 cmd->driver_major_ver = dv->major_version;
2051 cmd->driver_minor_ver = dv->minor_version;
2052 cmd->driver_build_ver = dv->build_version;
2053 cmd->driver_subbuild_ver = dv->subbuild_version;
2054
2055 len = 0;
2056 while (len < sizeof(dv->driver_string) &&
2057 (dv->driver_string[len] < 0x80) &&
2058 dv->driver_string[len])
2059 len++;
2060 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2061 len, cmd_details);
2062
2063 return status;
2064}
2065
2066/**
2067 * i40e_get_link_status - get status of the HW network link
2068 * @hw: pointer to the hw struct
2069 * @link_up: pointer to bool (true/false = linkup/linkdown)
2070 *
2071 * Variable link_up true if link is up, false if link is down.
2072 * The variable link_up is invalid if returned value of status != 0
2073 *
2074 * Side effect: LinkStatusEvent reporting becomes enabled
2075 **/
2076int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2077{
2078 int status = 0;
2079
2080 if (hw->phy.get_link_info) {
2081 status = i40e_update_link_info(hw);
2082
2083 if (status)
2084 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2085 status);
2086 }
2087
2088 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2089
2090 return status;
2091}
2092
2093/**
2094 * i40e_update_link_info - update status of the HW network link
2095 * @hw: pointer to the hw struct
2096 **/
2097noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
2098{
2099 struct i40e_aq_get_phy_abilities_resp abilities;
2100 int status = 0;
2101
2102 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2103 if (status)
2104 return status;
2105
2106 /* extra checking needed to ensure link info to user is timely */
2107 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2108 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2109 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2110 status = i40e_aq_get_phy_capabilities(hw, false, false,
2111 &abilities, NULL);
2112 if (status)
2113 return status;
2114
2115 if (abilities.fec_cfg_curr_mod_ext_info &
2116 I40E_AQ_ENABLE_FEC_AUTO)
2117 hw->phy.link_info.req_fec_info =
2118 (I40E_AQ_REQUEST_FEC_KR |
2119 I40E_AQ_REQUEST_FEC_RS);
2120 else
2121 hw->phy.link_info.req_fec_info =
2122 abilities.fec_cfg_curr_mod_ext_info &
2123 (I40E_AQ_REQUEST_FEC_KR |
2124 I40E_AQ_REQUEST_FEC_RS);
2125
2126 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2127 sizeof(hw->phy.link_info.module_type));
2128 }
2129
2130 return status;
2131}
2132
2133/**
2134 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2135 * @hw: pointer to the hw struct
2136 * @uplink_seid: the MAC or other gizmo SEID
2137 * @downlink_seid: the VSI SEID
2138 * @enabled_tc: bitmap of TCs to be enabled
2139 * @default_port: true for default port VSI, false for control port
2140 * @veb_seid: pointer to where to put the resulting VEB SEID
2141 * @enable_stats: true to turn on VEB stats
2142 * @cmd_details: pointer to command details structure or NULL
2143 *
2144 * This asks the FW to add a VEB between the uplink and downlink
2145 * elements. If the uplink SEID is 0, this will be a floating VEB.
2146 **/
2147int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2148 u16 downlink_seid, u8 enabled_tc,
2149 bool default_port, u16 *veb_seid,
2150 bool enable_stats,
2151 struct i40e_asq_cmd_details *cmd_details)
2152{
2153 struct i40e_aq_desc desc;
2154 struct i40e_aqc_add_veb *cmd =
2155 (struct i40e_aqc_add_veb *)&desc.params.raw;
2156 struct i40e_aqc_add_veb_completion *resp =
2157 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2158 u16 veb_flags = 0;
2159 int status;
2160
2161 /* SEIDs need to either both be set or both be 0 for floating VEB */
2162 if (!!uplink_seid != !!downlink_seid)
2163 return -EINVAL;
2164
2165 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2166
2167 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2168 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2169 cmd->enable_tcs = enabled_tc;
2170 if (!uplink_seid)
2171 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2172 if (default_port)
2173 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2174 else
2175 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2176
2177 /* reverse logic here: set the bitflag to disable the stats */
2178 if (!enable_stats)
2179 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2180
2181 cmd->veb_flags = cpu_to_le16(veb_flags);
2182
2183 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2184
2185 if (!status && veb_seid)
2186 *veb_seid = le16_to_cpu(resp->veb_seid);
2187
2188 return status;
2189}
2190
2191/**
2192 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2193 * @hw: pointer to the hw struct
2194 * @veb_seid: the SEID of the VEB to query
2195 * @switch_id: the uplink switch id
2196 * @floating: set to true if the VEB is floating
2197 * @statistic_index: index of the stats counter block for this VEB
2198 * @vebs_used: number of VEB's used by function
2199 * @vebs_free: total VEB's not reserved by any function
2200 * @cmd_details: pointer to command details structure or NULL
2201 *
2202 * This retrieves the parameters for a particular VEB, specified by
2203 * uplink_seid, and returns them to the caller.
2204 **/
2205int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2206 u16 veb_seid, u16 *switch_id,
2207 bool *floating, u16 *statistic_index,
2208 u16 *vebs_used, u16 *vebs_free,
2209 struct i40e_asq_cmd_details *cmd_details)
2210{
2211 struct i40e_aq_desc desc;
2212 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2213 (struct i40e_aqc_get_veb_parameters_completion *)
2214 &desc.params.raw;
2215 int status;
2216
2217 if (veb_seid == 0)
2218 return -EINVAL;
2219
2220 i40e_fill_default_direct_cmd_desc(&desc,
2221 i40e_aqc_opc_get_veb_parameters);
2222 cmd_resp->seid = cpu_to_le16(veb_seid);
2223
2224 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2225 if (status)
2226 goto get_veb_exit;
2227
2228 if (switch_id)
2229 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2230 if (statistic_index)
2231 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2232 if (vebs_used)
2233 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2234 if (vebs_free)
2235 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2236 if (floating) {
2237 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2238
2239 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2240 *floating = true;
2241 else
2242 *floating = false;
2243 }
2244
2245get_veb_exit:
2246 return status;
2247}
2248
2249/**
2250 * i40e_prepare_add_macvlan
2251 * @mv_list: list of macvlans to be added
2252 * @desc: pointer to AQ descriptor structure
2253 * @count: length of the list
2254 * @seid: VSI for the mac address
2255 *
2256 * Internal helper function that prepares the add macvlan request
2257 * and returns the buffer size.
2258 **/
2259static u16
2260i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
2261 struct i40e_aq_desc *desc, u16 count, u16 seid)
2262{
2263 struct i40e_aqc_macvlan *cmd =
2264 (struct i40e_aqc_macvlan *)&desc->params.raw;
2265 u16 buf_size;
2266 int i;
2267
2268 buf_size = count * sizeof(*mv_list);
2269
2270 /* prep the rest of the request */
2271 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
2272 cmd->num_addresses = cpu_to_le16(count);
2273 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2274 cmd->seid[1] = 0;
2275 cmd->seid[2] = 0;
2276
2277 for (i = 0; i < count; i++)
2278 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2279 mv_list[i].flags |=
2280 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2281
2282 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2283 if (buf_size > I40E_AQ_LARGE_BUF)
2284 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2285
2286 return buf_size;
2287}
2288
2289/**
2290 * i40e_aq_add_macvlan
2291 * @hw: pointer to the hw struct
2292 * @seid: VSI for the mac address
2293 * @mv_list: list of macvlans to be added
2294 * @count: length of the list
2295 * @cmd_details: pointer to command details structure or NULL
2296 *
2297 * Add MAC/VLAN addresses to the HW filtering
2298 **/
2299int
2300i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2301 struct i40e_aqc_add_macvlan_element_data *mv_list,
2302 u16 count, struct i40e_asq_cmd_details *cmd_details)
2303{
2304 struct i40e_aq_desc desc;
2305 u16 buf_size;
2306
2307 if (count == 0 || !mv_list || !hw)
2308 return -EINVAL;
2309
2310 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2311
2312 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2313 cmd_details, true);
2314}
2315
2316/**
2317 * i40e_aq_add_macvlan_v2
2318 * @hw: pointer to the hw struct
2319 * @seid: VSI for the mac address
2320 * @mv_list: list of macvlans to be added
2321 * @count: length of the list
2322 * @cmd_details: pointer to command details structure or NULL
2323 * @aq_status: pointer to Admin Queue status return value
2324 *
2325 * Add MAC/VLAN addresses to the HW filtering.
2326 * The _v2 version returns the last Admin Queue status in aq_status
2327 * to avoid race conditions in access to hw->aq.asq_last_status.
2328 * It also calls _v2 versions of asq_send_command functions to
2329 * get the aq_status on the stack.
2330 **/
2331int
2332i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
2333 struct i40e_aqc_add_macvlan_element_data *mv_list,
2334 u16 count, struct i40e_asq_cmd_details *cmd_details,
2335 enum i40e_admin_queue_err *aq_status)
2336{
2337 struct i40e_aq_desc desc;
2338 u16 buf_size;
2339
2340 if (count == 0 || !mv_list || !hw)
2341 return -EINVAL;
2342
2343 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2344
2345 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2346 cmd_details, true, aq_status);
2347}
2348
2349/**
2350 * i40e_aq_remove_macvlan
2351 * @hw: pointer to the hw struct
2352 * @seid: VSI for the mac address
2353 * @mv_list: list of macvlans to be removed
2354 * @count: length of the list
2355 * @cmd_details: pointer to command details structure or NULL
2356 *
2357 * Remove MAC/VLAN addresses from the HW filtering
2358 **/
2359int
2360i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2361 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2362 u16 count, struct i40e_asq_cmd_details *cmd_details)
2363{
2364 struct i40e_aq_desc desc;
2365 struct i40e_aqc_macvlan *cmd =
2366 (struct i40e_aqc_macvlan *)&desc.params.raw;
2367 u16 buf_size;
2368 int status;
2369
2370 if (count == 0 || !mv_list || !hw)
2371 return -EINVAL;
2372
2373 buf_size = count * sizeof(*mv_list);
2374
2375 /* prep the rest of the request */
2376 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2377 cmd->num_addresses = cpu_to_le16(count);
2378 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2379 cmd->seid[1] = 0;
2380 cmd->seid[2] = 0;
2381
2382 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2383 if (buf_size > I40E_AQ_LARGE_BUF)
2384 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2385
2386 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2387 cmd_details, true);
2388
2389 return status;
2390}
2391
2392/**
2393 * i40e_aq_remove_macvlan_v2
2394 * @hw: pointer to the hw struct
2395 * @seid: VSI for the mac address
2396 * @mv_list: list of macvlans to be removed
2397 * @count: length of the list
2398 * @cmd_details: pointer to command details structure or NULL
2399 * @aq_status: pointer to Admin Queue status return value
2400 *
2401 * Remove MAC/VLAN addresses from the HW filtering.
2402 * The _v2 version returns the last Admin Queue status in aq_status
2403 * to avoid race conditions in access to hw->aq.asq_last_status.
2404 * It also calls _v2 versions of asq_send_command functions to
2405 * get the aq_status on the stack.
2406 **/
2407int
2408i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
2409 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2410 u16 count, struct i40e_asq_cmd_details *cmd_details,
2411 enum i40e_admin_queue_err *aq_status)
2412{
2413 struct i40e_aqc_macvlan *cmd;
2414 struct i40e_aq_desc desc;
2415 u16 buf_size;
2416
2417 if (count == 0 || !mv_list || !hw)
2418 return -EINVAL;
2419
2420 buf_size = count * sizeof(*mv_list);
2421
2422 /* prep the rest of the request */
2423 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2424 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
2425 cmd->num_addresses = cpu_to_le16(count);
2426 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2427 cmd->seid[1] = 0;
2428 cmd->seid[2] = 0;
2429
2430 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2431 if (buf_size > I40E_AQ_LARGE_BUF)
2432 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2433
2434 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2435 cmd_details, true, aq_status);
2436}
2437
2438/**
2439 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2440 * @hw: pointer to the hw struct
2441 * @opcode: AQ opcode for add or delete mirror rule
2442 * @sw_seid: Switch SEID (to which rule refers)
2443 * @rule_type: Rule Type (ingress/egress/VLAN)
2444 * @id: Destination VSI SEID or Rule ID
2445 * @count: length of the list
2446 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2447 * @cmd_details: pointer to command details structure or NULL
2448 * @rule_id: Rule ID returned from FW
2449 * @rules_used: Number of rules used in internal switch
2450 * @rules_free: Number of rules free in internal switch
2451 *
2452 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2453 * VEBs/VEPA elements only
2454 **/
2455static int i40e_mirrorrule_op(struct i40e_hw *hw,
2456 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2457 u16 count, __le16 *mr_list,
2458 struct i40e_asq_cmd_details *cmd_details,
2459 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2460{
2461 struct i40e_aq_desc desc;
2462 struct i40e_aqc_add_delete_mirror_rule *cmd =
2463 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2464 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2465 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2466 u16 buf_size;
2467 int status;
2468
2469 buf_size = count * sizeof(*mr_list);
2470
2471 /* prep the rest of the request */
2472 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2473 cmd->seid = cpu_to_le16(sw_seid);
2474 cmd->rule_type = cpu_to_le16(rule_type &
2475 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2476 cmd->num_entries = cpu_to_le16(count);
2477 /* Dest VSI for add, rule_id for delete */
2478 cmd->destination = cpu_to_le16(id);
2479 if (mr_list) {
2480 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2481 I40E_AQ_FLAG_RD));
2482 if (buf_size > I40E_AQ_LARGE_BUF)
2483 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2484 }
2485
2486 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2487 cmd_details);
2488 if (!status ||
2489 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2490 if (rule_id)
2491 *rule_id = le16_to_cpu(resp->rule_id);
2492 if (rules_used)
2493 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2494 if (rules_free)
2495 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2496 }
2497 return status;
2498}
2499
2500/**
2501 * i40e_aq_add_mirrorrule - add a mirror rule
2502 * @hw: pointer to the hw struct
2503 * @sw_seid: Switch SEID (to which rule refers)
2504 * @rule_type: Rule Type (ingress/egress/VLAN)
2505 * @dest_vsi: SEID of VSI to which packets will be mirrored
2506 * @count: length of the list
2507 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2508 * @cmd_details: pointer to command details structure or NULL
2509 * @rule_id: Rule ID returned from FW
2510 * @rules_used: Number of rules used in internal switch
2511 * @rules_free: Number of rules free in internal switch
2512 *
2513 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2514 **/
2515int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2516 u16 rule_type, u16 dest_vsi, u16 count,
2517 __le16 *mr_list,
2518 struct i40e_asq_cmd_details *cmd_details,
2519 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2520{
2521 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2522 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2523 if (count == 0 || !mr_list)
2524 return -EINVAL;
2525 }
2526
2527 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2528 rule_type, dest_vsi, count, mr_list,
2529 cmd_details, rule_id, rules_used, rules_free);
2530}
2531
2532/**
2533 * i40e_aq_delete_mirrorrule - delete a mirror rule
2534 * @hw: pointer to the hw struct
2535 * @sw_seid: Switch SEID (to which rule refers)
2536 * @rule_type: Rule Type (ingress/egress/VLAN)
2537 * @count: length of the list
2538 * @rule_id: Rule ID that is returned in the receive desc as part of
2539 * add_mirrorrule.
2540 * @mr_list: list of mirrored VLAN IDs to be removed
2541 * @cmd_details: pointer to command details structure or NULL
2542 * @rules_used: Number of rules used in internal switch
2543 * @rules_free: Number of rules free in internal switch
2544 *
2545 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2546 **/
2547int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2548 u16 rule_type, u16 rule_id, u16 count,
2549 __le16 *mr_list,
2550 struct i40e_asq_cmd_details *cmd_details,
2551 u16 *rules_used, u16 *rules_free)
2552{
2553 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2554 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2555 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2556 * mirroring. For other rule_type, count and rule_type should
2557 * not matter.
2558 */
2559 if (count == 0 || !mr_list)
2560 return -EINVAL;
2561 }
2562
2563 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2564 rule_type, rule_id, count, mr_list,
2565 cmd_details, NULL, rules_used, rules_free);
2566}
2567
2568/**
2569 * i40e_aq_send_msg_to_vf
2570 * @hw: pointer to the hardware structure
2571 * @vfid: VF id to send msg
2572 * @v_opcode: opcodes for VF-PF communication
2573 * @v_retval: return error code
2574 * @msg: pointer to the msg buffer
2575 * @msglen: msg length
2576 * @cmd_details: pointer to command details
2577 *
2578 * send msg to vf
2579 **/
2580int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2581 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2582 struct i40e_asq_cmd_details *cmd_details)
2583{
2584 struct i40e_aq_desc desc;
2585 struct i40e_aqc_pf_vf_message *cmd =
2586 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2587 int status;
2588
2589 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2590 cmd->id = cpu_to_le32(vfid);
2591 desc.cookie_high = cpu_to_le32(v_opcode);
2592 desc.cookie_low = cpu_to_le32(v_retval);
2593 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2594 if (msglen) {
2595 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2596 I40E_AQ_FLAG_RD));
2597 if (msglen > I40E_AQ_LARGE_BUF)
2598 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2599 desc.datalen = cpu_to_le16(msglen);
2600 }
2601 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2602
2603 return status;
2604}
2605
2606/**
2607 * i40e_aq_debug_read_register
2608 * @hw: pointer to the hw struct
2609 * @reg_addr: register address
2610 * @reg_val: register value
2611 * @cmd_details: pointer to command details structure or NULL
2612 *
2613 * Read the register using the admin queue commands
2614 **/
2615int i40e_aq_debug_read_register(struct i40e_hw *hw,
2616 u32 reg_addr, u64 *reg_val,
2617 struct i40e_asq_cmd_details *cmd_details)
2618{
2619 struct i40e_aq_desc desc;
2620 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2621 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2622 int status;
2623
2624 if (reg_val == NULL)
2625 return -EINVAL;
2626
2627 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2628
2629 cmd_resp->address = cpu_to_le32(reg_addr);
2630
2631 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2632
2633 if (!status) {
2634 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2635 (u64)le32_to_cpu(cmd_resp->value_low);
2636 }
2637
2638 return status;
2639}
2640
2641/**
2642 * i40e_aq_debug_write_register
2643 * @hw: pointer to the hw struct
2644 * @reg_addr: register address
2645 * @reg_val: register value
2646 * @cmd_details: pointer to command details structure or NULL
2647 *
2648 * Write to a register using the admin queue commands
2649 **/
2650int i40e_aq_debug_write_register(struct i40e_hw *hw,
2651 u32 reg_addr, u64 reg_val,
2652 struct i40e_asq_cmd_details *cmd_details)
2653{
2654 struct i40e_aq_desc desc;
2655 struct i40e_aqc_debug_reg_read_write *cmd =
2656 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2657 int status;
2658
2659 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2660
2661 cmd->address = cpu_to_le32(reg_addr);
2662 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2663 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2664
2665 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2666
2667 return status;
2668}
2669
2670/**
2671 * i40e_aq_request_resource
2672 * @hw: pointer to the hw struct
2673 * @resource: resource id
2674 * @access: access type
2675 * @sdp_number: resource number
2676 * @timeout: the maximum time in ms that the driver may hold the resource
2677 * @cmd_details: pointer to command details structure or NULL
2678 *
2679 * requests common resource using the admin queue commands
2680 **/
2681int i40e_aq_request_resource(struct i40e_hw *hw,
2682 enum i40e_aq_resources_ids resource,
2683 enum i40e_aq_resource_access_type access,
2684 u8 sdp_number, u64 *timeout,
2685 struct i40e_asq_cmd_details *cmd_details)
2686{
2687 struct i40e_aq_desc desc;
2688 struct i40e_aqc_request_resource *cmd_resp =
2689 (struct i40e_aqc_request_resource *)&desc.params.raw;
2690 int status;
2691
2692 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
2693
2694 cmd_resp->resource_id = cpu_to_le16(resource);
2695 cmd_resp->access_type = cpu_to_le16(access);
2696 cmd_resp->resource_number = cpu_to_le32(sdp_number);
2697
2698 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2699 /* The completion specifies the maximum time in ms that the driver
2700 * may hold the resource in the Timeout field.
2701 * If the resource is held by someone else, the command completes with
2702 * busy return value and the timeout field indicates the maximum time
2703 * the current owner of the resource has to free it.
2704 */
2705 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
2706 *timeout = le32_to_cpu(cmd_resp->timeout);
2707
2708 return status;
2709}
2710
2711/**
2712 * i40e_aq_release_resource
2713 * @hw: pointer to the hw struct
2714 * @resource: resource id
2715 * @sdp_number: resource number
2716 * @cmd_details: pointer to command details structure or NULL
2717 *
2718 * release common resource using the admin queue commands
2719 **/
2720int i40e_aq_release_resource(struct i40e_hw *hw,
2721 enum i40e_aq_resources_ids resource,
2722 u8 sdp_number,
2723 struct i40e_asq_cmd_details *cmd_details)
2724{
2725 struct i40e_aq_desc desc;
2726 struct i40e_aqc_request_resource *cmd =
2727 (struct i40e_aqc_request_resource *)&desc.params.raw;
2728 int status;
2729
2730 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
2731
2732 cmd->resource_id = cpu_to_le16(resource);
2733 cmd->resource_number = cpu_to_le32(sdp_number);
2734
2735 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2736
2737 return status;
2738}
2739
2740/**
2741 * i40e_aq_read_nvm
2742 * @hw: pointer to the hw struct
2743 * @module_pointer: module pointer location in words from the NVM beginning
2744 * @offset: byte offset from the module beginning
2745 * @length: length of the section to be read (in bytes from the offset)
2746 * @data: command buffer (size [bytes] = length)
2747 * @last_command: tells if this is the last command in a series
2748 * @cmd_details: pointer to command details structure or NULL
2749 *
2750 * Read the NVM using the admin queue commands
2751 **/
2752int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
2753 u32 offset, u16 length, void *data,
2754 bool last_command,
2755 struct i40e_asq_cmd_details *cmd_details)
2756{
2757 struct i40e_aq_desc desc;
2758 struct i40e_aqc_nvm_update *cmd =
2759 (struct i40e_aqc_nvm_update *)&desc.params.raw;
2760 int status;
2761
2762 /* In offset the highest byte must be zeroed. */
2763 if (offset & 0xFF000000) {
2764 status = -EINVAL;
2765 goto i40e_aq_read_nvm_exit;
2766 }
2767
2768 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
2769
2770 /* If this is the last command in a series, set the proper flag. */
2771 if (last_command)
2772 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
2773 cmd->module_pointer = module_pointer;
2774 cmd->offset = cpu_to_le32(offset);
2775 cmd->length = cpu_to_le16(length);
2776
2777 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2778 if (length > I40E_AQ_LARGE_BUF)
2779 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2780
2781 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
2782
2783i40e_aq_read_nvm_exit:
2784 return status;
2785}
2786
2787/**
2788 * i40e_aq_erase_nvm
2789 * @hw: pointer to the hw struct
2790 * @module_pointer: module pointer location in words from the NVM beginning
2791 * @offset: offset in the module (expressed in 4 KB from module's beginning)
2792 * @length: length of the section to be erased (expressed in 4 KB)
2793 * @last_command: tells if this is the last command in a series
2794 * @cmd_details: pointer to command details structure or NULL
2795 *
2796 * Erase the NVM sector using the admin queue commands
2797 **/
2798int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
2799 u32 offset, u16 length, bool last_command,
2800 struct i40e_asq_cmd_details *cmd_details)
2801{
2802 struct i40e_aq_desc desc;
2803 struct i40e_aqc_nvm_update *cmd =
2804 (struct i40e_aqc_nvm_update *)&desc.params.raw;
2805 int status;
2806
2807 /* In offset the highest byte must be zeroed. */
2808 if (offset & 0xFF000000) {
2809 status = -EINVAL;
2810 goto i40e_aq_erase_nvm_exit;
2811 }
2812
2813 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
2814
2815 /* If this is the last command in a series, set the proper flag. */
2816 if (last_command)
2817 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
2818 cmd->module_pointer = module_pointer;
2819 cmd->offset = cpu_to_le32(offset);
2820 cmd->length = cpu_to_le16(length);
2821
2822 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2823
2824i40e_aq_erase_nvm_exit:
2825 return status;
2826}
2827
2828/**
2829 * i40e_parse_discover_capabilities
2830 * @hw: pointer to the hw struct
2831 * @buff: pointer to a buffer containing device/function capability records
2832 * @cap_count: number of capability records in the list
2833 * @list_type_opc: type of capabilities list to parse
2834 *
2835 * Parse the device/function capabilities list.
2836 **/
2837static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
2838 u32 cap_count,
2839 enum i40e_admin_queue_opc list_type_opc)
2840{
2841 struct i40e_aqc_list_capabilities_element_resp *cap;
2842 u32 valid_functions, num_functions;
2843 u32 number, logical_id, phys_id;
2844 struct i40e_hw_capabilities *p;
2845 u16 id, ocp_cfg_word0;
2846 u8 major_rev;
2847 int status;
2848 u32 i = 0;
2849
2850 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
2851
2852 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
2853 p = &hw->dev_caps;
2854 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
2855 p = &hw->func_caps;
2856 else
2857 return;
2858
2859 for (i = 0; i < cap_count; i++, cap++) {
2860 id = le16_to_cpu(cap->id);
2861 number = le32_to_cpu(cap->number);
2862 logical_id = le32_to_cpu(cap->logical_id);
2863 phys_id = le32_to_cpu(cap->phys_id);
2864 major_rev = cap->major_rev;
2865
2866 switch (id) {
2867 case I40E_AQ_CAP_ID_SWITCH_MODE:
2868 p->switch_mode = number;
2869 break;
2870 case I40E_AQ_CAP_ID_MNG_MODE:
2871 p->management_mode = number;
2872 if (major_rev > 1) {
2873 p->mng_protocols_over_mctp = logical_id;
2874 i40e_debug(hw, I40E_DEBUG_INIT,
2875 "HW Capability: Protocols over MCTP = %d\n",
2876 p->mng_protocols_over_mctp);
2877 } else {
2878 p->mng_protocols_over_mctp = 0;
2879 }
2880 break;
2881 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
2882 p->npar_enable = number;
2883 break;
2884 case I40E_AQ_CAP_ID_OS2BMC_CAP:
2885 p->os2bmc = number;
2886 break;
2887 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
2888 p->valid_functions = number;
2889 break;
2890 case I40E_AQ_CAP_ID_SRIOV:
2891 if (number == 1)
2892 p->sr_iov_1_1 = true;
2893 break;
2894 case I40E_AQ_CAP_ID_VF:
2895 p->num_vfs = number;
2896 p->vf_base_id = logical_id;
2897 break;
2898 case I40E_AQ_CAP_ID_VMDQ:
2899 if (number == 1)
2900 p->vmdq = true;
2901 break;
2902 case I40E_AQ_CAP_ID_8021QBG:
2903 if (number == 1)
2904 p->evb_802_1_qbg = true;
2905 break;
2906 case I40E_AQ_CAP_ID_8021QBR:
2907 if (number == 1)
2908 p->evb_802_1_qbh = true;
2909 break;
2910 case I40E_AQ_CAP_ID_VSI:
2911 p->num_vsis = number;
2912 break;
2913 case I40E_AQ_CAP_ID_DCB:
2914 if (number == 1) {
2915 p->dcb = true;
2916 p->enabled_tcmap = logical_id;
2917 p->maxtc = phys_id;
2918 }
2919 break;
2920 case I40E_AQ_CAP_ID_FCOE:
2921 if (number == 1)
2922 p->fcoe = true;
2923 break;
2924 case I40E_AQ_CAP_ID_ISCSI:
2925 if (number == 1)
2926 p->iscsi = true;
2927 break;
2928 case I40E_AQ_CAP_ID_RSS:
2929 p->rss = true;
2930 p->rss_table_size = number;
2931 p->rss_table_entry_width = logical_id;
2932 break;
2933 case I40E_AQ_CAP_ID_RXQ:
2934 p->num_rx_qp = number;
2935 p->base_queue = phys_id;
2936 break;
2937 case I40E_AQ_CAP_ID_TXQ:
2938 p->num_tx_qp = number;
2939 p->base_queue = phys_id;
2940 break;
2941 case I40E_AQ_CAP_ID_MSIX:
2942 p->num_msix_vectors = number;
2943 i40e_debug(hw, I40E_DEBUG_INIT,
2944 "HW Capability: MSIX vector count = %d\n",
2945 p->num_msix_vectors);
2946 break;
2947 case I40E_AQ_CAP_ID_VF_MSIX:
2948 p->num_msix_vectors_vf = number;
2949 break;
2950 case I40E_AQ_CAP_ID_FLEX10:
2951 if (major_rev == 1) {
2952 if (number == 1) {
2953 p->flex10_enable = true;
2954 p->flex10_capable = true;
2955 }
2956 } else {
2957 /* Capability revision >= 2 */
2958 if (number & 1)
2959 p->flex10_enable = true;
2960 if (number & 2)
2961 p->flex10_capable = true;
2962 }
2963 p->flex10_mode = logical_id;
2964 p->flex10_status = phys_id;
2965 break;
2966 case I40E_AQ_CAP_ID_CEM:
2967 if (number == 1)
2968 p->mgmt_cem = true;
2969 break;
2970 case I40E_AQ_CAP_ID_IWARP:
2971 if (number == 1)
2972 p->iwarp = true;
2973 break;
2974 case I40E_AQ_CAP_ID_LED:
2975 if (phys_id < I40E_HW_CAP_MAX_GPIO)
2976 p->led[phys_id] = true;
2977 break;
2978 case I40E_AQ_CAP_ID_SDP:
2979 if (phys_id < I40E_HW_CAP_MAX_GPIO)
2980 p->sdp[phys_id] = true;
2981 break;
2982 case I40E_AQ_CAP_ID_MDIO:
2983 if (number == 1) {
2984 p->mdio_port_num = phys_id;
2985 p->mdio_port_mode = logical_id;
2986 }
2987 break;
2988 case I40E_AQ_CAP_ID_1588:
2989 if (number == 1)
2990 p->ieee_1588 = true;
2991 break;
2992 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
2993 p->fd = true;
2994 p->fd_filters_guaranteed = number;
2995 p->fd_filters_best_effort = logical_id;
2996 break;
2997 case I40E_AQ_CAP_ID_WSR_PROT:
2998 p->wr_csr_prot = (u64)number;
2999 p->wr_csr_prot |= (u64)logical_id << 32;
3000 break;
3001 case I40E_AQ_CAP_ID_NVM_MGMT:
3002 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3003 p->sec_rev_disabled = true;
3004 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3005 p->update_disabled = true;
3006 break;
3007 default:
3008 break;
3009 }
3010 }
3011
3012 if (p->fcoe)
3013 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3014
3015 /* Software override ensuring FCoE is disabled if npar or mfp
3016 * mode because it is not supported in these modes.
3017 */
3018 if (p->npar_enable || p->flex10_enable)
3019 p->fcoe = false;
3020
3021 /* count the enabled ports (aka the "not disabled" ports) */
3022 hw->num_ports = 0;
3023 for (i = 0; i < 4; i++) {
3024 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3025 u64 port_cfg = 0;
3026
3027 /* use AQ read to get the physical register offset instead
3028 * of the port relative offset
3029 */
3030 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3031 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3032 hw->num_ports++;
3033 }
3034
3035 /* OCP cards case: if a mezz is removed the Ethernet port is at
3036 * disabled state in PRTGEN_CNF register. Additional NVM read is
3037 * needed in order to check if we are dealing with OCP card.
3038 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3039 * physical ports results in wrong partition id calculation and thus
3040 * not supporting WoL.
3041 */
3042 if (hw->mac.type == I40E_MAC_X722) {
3043 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3044 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3045 2 * I40E_SR_OCP_CFG_WORD0,
3046 sizeof(ocp_cfg_word0),
3047 &ocp_cfg_word0, true, NULL);
3048 if (!status &&
3049 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3050 hw->num_ports = 4;
3051 i40e_release_nvm(hw);
3052 }
3053 }
3054
3055 valid_functions = p->valid_functions;
3056 num_functions = 0;
3057 while (valid_functions) {
3058 if (valid_functions & 1)
3059 num_functions++;
3060 valid_functions >>= 1;
3061 }
3062
3063 /* partition id is 1-based, and functions are evenly spread
3064 * across the ports as partitions
3065 */
3066 if (hw->num_ports != 0) {
3067 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3068 hw->num_partitions = num_functions / hw->num_ports;
3069 }
3070
3071 /* additional HW specific goodies that might
3072 * someday be HW version specific
3073 */
3074 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3075}
3076
3077/**
3078 * i40e_aq_discover_capabilities
3079 * @hw: pointer to the hw struct
3080 * @buff: a virtual buffer to hold the capabilities
3081 * @buff_size: Size of the virtual buffer
3082 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3083 * @list_type_opc: capabilities type to discover - pass in the command opcode
3084 * @cmd_details: pointer to command details structure or NULL
3085 *
3086 * Get the device capabilities descriptions from the firmware
3087 **/
3088int i40e_aq_discover_capabilities(struct i40e_hw *hw,
3089 void *buff, u16 buff_size, u16 *data_size,
3090 enum i40e_admin_queue_opc list_type_opc,
3091 struct i40e_asq_cmd_details *cmd_details)
3092{
3093 struct i40e_aqc_list_capabilites *cmd;
3094 struct i40e_aq_desc desc;
3095 int status = 0;
3096
3097 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3098
3099 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3100 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3101 status = -EINVAL;
3102 goto exit;
3103 }
3104
3105 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3106
3107 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3108 if (buff_size > I40E_AQ_LARGE_BUF)
3109 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3110
3111 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3112 *data_size = le16_to_cpu(desc.datalen);
3113
3114 if (status)
3115 goto exit;
3116
3117 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3118 list_type_opc);
3119
3120exit:
3121 return status;
3122}
3123
3124/**
3125 * i40e_aq_update_nvm
3126 * @hw: pointer to the hw struct
3127 * @module_pointer: module pointer location in words from the NVM beginning
3128 * @offset: byte offset from the module beginning
3129 * @length: length of the section to be written (in bytes from the offset)
3130 * @data: command buffer (size [bytes] = length)
3131 * @last_command: tells if this is the last command in a series
3132 * @preservation_flags: Preservation mode flags
3133 * @cmd_details: pointer to command details structure or NULL
3134 *
3135 * Update the NVM using the admin queue commands
3136 **/
3137int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3138 u32 offset, u16 length, void *data,
3139 bool last_command, u8 preservation_flags,
3140 struct i40e_asq_cmd_details *cmd_details)
3141{
3142 struct i40e_aq_desc desc;
3143 struct i40e_aqc_nvm_update *cmd =
3144 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3145 int status;
3146
3147 /* In offset the highest byte must be zeroed. */
3148 if (offset & 0xFF000000) {
3149 status = -EINVAL;
3150 goto i40e_aq_update_nvm_exit;
3151 }
3152
3153 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3154
3155 /* If this is the last command in a series, set the proper flag. */
3156 if (last_command)
3157 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3158 if (hw->mac.type == I40E_MAC_X722) {
3159 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3160 cmd->command_flags |=
3161 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3162 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3163 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3164 cmd->command_flags |=
3165 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3166 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3167 }
3168 cmd->module_pointer = module_pointer;
3169 cmd->offset = cpu_to_le32(offset);
3170 cmd->length = cpu_to_le16(length);
3171
3172 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3173 if (length > I40E_AQ_LARGE_BUF)
3174 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3175
3176 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3177
3178i40e_aq_update_nvm_exit:
3179 return status;
3180}
3181
3182/**
3183 * i40e_aq_rearrange_nvm
3184 * @hw: pointer to the hw struct
3185 * @rearrange_nvm: defines direction of rearrangement
3186 * @cmd_details: pointer to command details structure or NULL
3187 *
3188 * Rearrange NVM structure, available only for transition FW
3189 **/
3190int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3191 u8 rearrange_nvm,
3192 struct i40e_asq_cmd_details *cmd_details)
3193{
3194 struct i40e_aqc_nvm_update *cmd;
3195 struct i40e_aq_desc desc;
3196 int status;
3197
3198 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3199
3200 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3201
3202 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3203 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3204
3205 if (!rearrange_nvm) {
3206 status = -EINVAL;
3207 goto i40e_aq_rearrange_nvm_exit;
3208 }
3209
3210 cmd->command_flags |= rearrange_nvm;
3211 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3212
3213i40e_aq_rearrange_nvm_exit:
3214 return status;
3215}
3216
3217/**
3218 * i40e_aq_get_lldp_mib
3219 * @hw: pointer to the hw struct
3220 * @bridge_type: type of bridge requested
3221 * @mib_type: Local, Remote or both Local and Remote MIBs
3222 * @buff: pointer to a user supplied buffer to store the MIB block
3223 * @buff_size: size of the buffer (in bytes)
3224 * @local_len : length of the returned Local LLDP MIB
3225 * @remote_len: length of the returned Remote LLDP MIB
3226 * @cmd_details: pointer to command details structure or NULL
3227 *
3228 * Requests the complete LLDP MIB (entire packet).
3229 **/
3230int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3231 u8 mib_type, void *buff, u16 buff_size,
3232 u16 *local_len, u16 *remote_len,
3233 struct i40e_asq_cmd_details *cmd_details)
3234{
3235 struct i40e_aq_desc desc;
3236 struct i40e_aqc_lldp_get_mib *cmd =
3237 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3238 struct i40e_aqc_lldp_get_mib *resp =
3239 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3240 int status;
3241
3242 if (buff_size == 0 || !buff)
3243 return -EINVAL;
3244
3245 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3246 /* Indirect Command */
3247 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3248
3249 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3250 cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
3251
3252 desc.datalen = cpu_to_le16(buff_size);
3253
3254 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3255 if (buff_size > I40E_AQ_LARGE_BUF)
3256 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3257
3258 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3259 if (!status) {
3260 if (local_len != NULL)
3261 *local_len = le16_to_cpu(resp->local_len);
3262 if (remote_len != NULL)
3263 *remote_len = le16_to_cpu(resp->remote_len);
3264 }
3265
3266 return status;
3267}
3268
3269/**
3270 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3271 * @hw: pointer to the hw struct
3272 * @mib_type: Local, Remote or both Local and Remote MIBs
3273 * @buff: pointer to a user supplied buffer to store the MIB block
3274 * @buff_size: size of the buffer (in bytes)
3275 * @cmd_details: pointer to command details structure or NULL
3276 *
3277 * Set the LLDP MIB.
3278 **/
3279int
3280i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3281 u8 mib_type, void *buff, u16 buff_size,
3282 struct i40e_asq_cmd_details *cmd_details)
3283{
3284 struct i40e_aqc_lldp_set_local_mib *cmd;
3285 struct i40e_aq_desc desc;
3286 int status;
3287
3288 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3289 if (buff_size == 0 || !buff)
3290 return -EINVAL;
3291
3292 i40e_fill_default_direct_cmd_desc(&desc,
3293 i40e_aqc_opc_lldp_set_local_mib);
3294 /* Indirect Command */
3295 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3296 if (buff_size > I40E_AQ_LARGE_BUF)
3297 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3298 desc.datalen = cpu_to_le16(buff_size);
3299
3300 cmd->type = mib_type;
3301 cmd->length = cpu_to_le16(buff_size);
3302 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3303 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3304
3305 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3306 return status;
3307}
3308
3309/**
3310 * i40e_aq_cfg_lldp_mib_change_event
3311 * @hw: pointer to the hw struct
3312 * @enable_update: Enable or Disable event posting
3313 * @cmd_details: pointer to command details structure or NULL
3314 *
3315 * Enable or Disable posting of an event on ARQ when LLDP MIB
3316 * associated with the interface changes
3317 **/
3318int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3319 bool enable_update,
3320 struct i40e_asq_cmd_details *cmd_details)
3321{
3322 struct i40e_aq_desc desc;
3323 struct i40e_aqc_lldp_update_mib *cmd =
3324 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3325 int status;
3326
3327 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3328
3329 if (!enable_update)
3330 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3331
3332 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3333
3334 return status;
3335}
3336
3337/**
3338 * i40e_aq_restore_lldp
3339 * @hw: pointer to the hw struct
3340 * @setting: pointer to factory setting variable or NULL
3341 * @restore: True if factory settings should be restored
3342 * @cmd_details: pointer to command details structure or NULL
3343 *
3344 * Restore LLDP Agent factory settings if @restore set to True. In other case
3345 * only returns factory setting in AQ response.
3346 **/
3347int
3348i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3349 struct i40e_asq_cmd_details *cmd_details)
3350{
3351 struct i40e_aq_desc desc;
3352 struct i40e_aqc_lldp_restore *cmd =
3353 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3354 int status;
3355
3356 if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
3357 i40e_debug(hw, I40E_DEBUG_ALL,
3358 "Restore LLDP not supported by current FW version.\n");
3359 return -ENODEV;
3360 }
3361
3362 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3363
3364 if (restore)
3365 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3366
3367 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3368
3369 if (setting)
3370 *setting = cmd->command & 1;
3371
3372 return status;
3373}
3374
3375/**
3376 * i40e_aq_stop_lldp
3377 * @hw: pointer to the hw struct
3378 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3379 * @persist: True if stop of LLDP should be persistent across power cycles
3380 * @cmd_details: pointer to command details structure or NULL
3381 *
3382 * Stop or Shutdown the embedded LLDP Agent
3383 **/
3384int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3385 bool persist,
3386 struct i40e_asq_cmd_details *cmd_details)
3387{
3388 struct i40e_aq_desc desc;
3389 struct i40e_aqc_lldp_stop *cmd =
3390 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3391 int status;
3392
3393 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3394
3395 if (shutdown_agent)
3396 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3397
3398 if (persist) {
3399 if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
3400 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3401 else
3402 i40e_debug(hw, I40E_DEBUG_ALL,
3403 "Persistent Stop LLDP not supported by current FW version.\n");
3404 }
3405
3406 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3407
3408 return status;
3409}
3410
3411/**
3412 * i40e_aq_start_lldp
3413 * @hw: pointer to the hw struct
3414 * @persist: True if start of LLDP should be persistent across power cycles
3415 * @cmd_details: pointer to command details structure or NULL
3416 *
3417 * Start the embedded LLDP Agent on all ports.
3418 **/
3419int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3420 struct i40e_asq_cmd_details *cmd_details)
3421{
3422 struct i40e_aq_desc desc;
3423 struct i40e_aqc_lldp_start *cmd =
3424 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3425 int status;
3426
3427 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3428
3429 cmd->command = I40E_AQ_LLDP_AGENT_START;
3430
3431 if (persist) {
3432 if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
3433 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3434 else
3435 i40e_debug(hw, I40E_DEBUG_ALL,
3436 "Persistent Start LLDP not supported by current FW version.\n");
3437 }
3438
3439 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3440
3441 return status;
3442}
3443
3444/**
3445 * i40e_aq_set_dcb_parameters
3446 * @hw: pointer to the hw struct
3447 * @cmd_details: pointer to command details structure or NULL
3448 * @dcb_enable: True if DCB configuration needs to be applied
3449 *
3450 **/
3451int
3452i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3453 struct i40e_asq_cmd_details *cmd_details)
3454{
3455 struct i40e_aq_desc desc;
3456 struct i40e_aqc_set_dcb_parameters *cmd =
3457 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3458 int status;
3459
3460 if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
3461 return -ENODEV;
3462
3463 i40e_fill_default_direct_cmd_desc(&desc,
3464 i40e_aqc_opc_set_dcb_parameters);
3465
3466 if (dcb_enable) {
3467 cmd->valid_flags = I40E_DCB_VALID;
3468 cmd->command = I40E_AQ_DCB_SET_AGENT;
3469 }
3470 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3471
3472 return status;
3473}
3474
3475/**
3476 * i40e_aq_get_cee_dcb_config
3477 * @hw: pointer to the hw struct
3478 * @buff: response buffer that stores CEE operational configuration
3479 * @buff_size: size of the buffer passed
3480 * @cmd_details: pointer to command details structure or NULL
3481 *
3482 * Get CEE DCBX mode operational configuration from firmware
3483 **/
3484int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3485 void *buff, u16 buff_size,
3486 struct i40e_asq_cmd_details *cmd_details)
3487{
3488 struct i40e_aq_desc desc;
3489 int status;
3490
3491 if (buff_size == 0 || !buff)
3492 return -EINVAL;
3493
3494 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3495
3496 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3497 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3498 cmd_details);
3499
3500 return status;
3501}
3502
3503/**
3504 * i40e_aq_add_udp_tunnel
3505 * @hw: pointer to the hw struct
3506 * @udp_port: the UDP port to add in Host byte order
3507 * @protocol_index: protocol index type
3508 * @filter_index: pointer to filter index
3509 * @cmd_details: pointer to command details structure or NULL
3510 *
3511 * Note: Firmware expects the udp_port value to be in Little Endian format,
3512 * and this function will call cpu_to_le16 to convert from Host byte order to
3513 * Little Endian order.
3514 **/
3515int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3516 u16 udp_port, u8 protocol_index,
3517 u8 *filter_index,
3518 struct i40e_asq_cmd_details *cmd_details)
3519{
3520 struct i40e_aq_desc desc;
3521 struct i40e_aqc_add_udp_tunnel *cmd =
3522 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3523 struct i40e_aqc_del_udp_tunnel_completion *resp =
3524 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3525 int status;
3526
3527 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3528
3529 cmd->udp_port = cpu_to_le16(udp_port);
3530 cmd->protocol_type = protocol_index;
3531
3532 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3533
3534 if (!status && filter_index)
3535 *filter_index = resp->index;
3536
3537 return status;
3538}
3539
3540/**
3541 * i40e_aq_del_udp_tunnel
3542 * @hw: pointer to the hw struct
3543 * @index: filter index
3544 * @cmd_details: pointer to command details structure or NULL
3545 **/
3546int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3547 struct i40e_asq_cmd_details *cmd_details)
3548{
3549 struct i40e_aq_desc desc;
3550 struct i40e_aqc_remove_udp_tunnel *cmd =
3551 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3552 int status;
3553
3554 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3555
3556 cmd->index = index;
3557
3558 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3559
3560 return status;
3561}
3562
3563/**
3564 * i40e_aq_delete_element - Delete switch element
3565 * @hw: pointer to the hw struct
3566 * @seid: the SEID to delete from the switch
3567 * @cmd_details: pointer to command details structure or NULL
3568 *
3569 * This deletes a switch element from the switch.
3570 **/
3571int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3572 struct i40e_asq_cmd_details *cmd_details)
3573{
3574 struct i40e_aq_desc desc;
3575 struct i40e_aqc_switch_seid *cmd =
3576 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3577 int status;
3578
3579 if (seid == 0)
3580 return -EINVAL;
3581
3582 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3583
3584 cmd->seid = cpu_to_le16(seid);
3585
3586 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
3587 cmd_details, true);
3588
3589 return status;
3590}
3591
3592/**
3593 * i40e_aq_dcb_updated - DCB Updated Command
3594 * @hw: pointer to the hw struct
3595 * @cmd_details: pointer to command details structure or NULL
3596 *
3597 * EMP will return when the shared RPB settings have been
3598 * recomputed and modified. The retval field in the descriptor
3599 * will be set to 0 when RPB is modified.
3600 **/
3601int i40e_aq_dcb_updated(struct i40e_hw *hw,
3602 struct i40e_asq_cmd_details *cmd_details)
3603{
3604 struct i40e_aq_desc desc;
3605 int status;
3606
3607 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3608
3609 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3610
3611 return status;
3612}
3613
3614/**
3615 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3616 * @hw: pointer to the hw struct
3617 * @seid: seid for the physical port/switching component/vsi
3618 * @buff: Indirect buffer to hold data parameters and response
3619 * @buff_size: Indirect buffer size
3620 * @opcode: Tx scheduler AQ command opcode
3621 * @cmd_details: pointer to command details structure or NULL
3622 *
3623 * Generic command handler for Tx scheduler AQ commands
3624 **/
3625static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3626 void *buff, u16 buff_size,
3627 enum i40e_admin_queue_opc opcode,
3628 struct i40e_asq_cmd_details *cmd_details)
3629{
3630 struct i40e_aq_desc desc;
3631 struct i40e_aqc_tx_sched_ind *cmd =
3632 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3633 int status;
3634 bool cmd_param_flag = false;
3635
3636 switch (opcode) {
3637 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3638 case i40e_aqc_opc_configure_vsi_tc_bw:
3639 case i40e_aqc_opc_enable_switching_comp_ets:
3640 case i40e_aqc_opc_modify_switching_comp_ets:
3641 case i40e_aqc_opc_disable_switching_comp_ets:
3642 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3643 case i40e_aqc_opc_configure_switching_comp_bw_config:
3644 cmd_param_flag = true;
3645 break;
3646 case i40e_aqc_opc_query_vsi_bw_config:
3647 case i40e_aqc_opc_query_vsi_ets_sla_config:
3648 case i40e_aqc_opc_query_switching_comp_ets_config:
3649 case i40e_aqc_opc_query_port_ets_config:
3650 case i40e_aqc_opc_query_switching_comp_bw_config:
3651 cmd_param_flag = false;
3652 break;
3653 default:
3654 return -EINVAL;
3655 }
3656
3657 i40e_fill_default_direct_cmd_desc(&desc, opcode);
3658
3659 /* Indirect command */
3660 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3661 if (cmd_param_flag)
3662 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3663 if (buff_size > I40E_AQ_LARGE_BUF)
3664 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3665
3666 desc.datalen = cpu_to_le16(buff_size);
3667
3668 cmd->vsi_seid = cpu_to_le16(seid);
3669
3670 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3671
3672 return status;
3673}
3674
3675/**
3676 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3677 * @hw: pointer to the hw struct
3678 * @seid: VSI seid
3679 * @credit: BW limit credits (0 = disabled)
3680 * @max_credit: Max BW limit credits
3681 * @cmd_details: pointer to command details structure or NULL
3682 **/
3683int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3684 u16 seid, u16 credit, u8 max_credit,
3685 struct i40e_asq_cmd_details *cmd_details)
3686{
3687 struct i40e_aq_desc desc;
3688 struct i40e_aqc_configure_vsi_bw_limit *cmd =
3689 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
3690 int status;
3691
3692 i40e_fill_default_direct_cmd_desc(&desc,
3693 i40e_aqc_opc_configure_vsi_bw_limit);
3694
3695 cmd->vsi_seid = cpu_to_le16(seid);
3696 cmd->credit = cpu_to_le16(credit);
3697 cmd->max_credit = max_credit;
3698
3699 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3700
3701 return status;
3702}
3703
3704/**
3705 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
3706 * @hw: pointer to the hw struct
3707 * @seid: VSI seid
3708 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
3709 * @cmd_details: pointer to command details structure or NULL
3710 **/
3711int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
3712 u16 seid,
3713 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
3714 struct i40e_asq_cmd_details *cmd_details)
3715{
3716 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3717 i40e_aqc_opc_configure_vsi_tc_bw,
3718 cmd_details);
3719}
3720
3721/**
3722 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
3723 * @hw: pointer to the hw struct
3724 * @seid: seid of the switching component connected to Physical Port
3725 * @ets_data: Buffer holding ETS parameters
3726 * @opcode: Tx scheduler AQ command opcode
3727 * @cmd_details: pointer to command details structure or NULL
3728 **/
3729int
3730i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
3731 u16 seid,
3732 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
3733 enum i40e_admin_queue_opc opcode,
3734 struct i40e_asq_cmd_details *cmd_details)
3735{
3736 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
3737 sizeof(*ets_data), opcode, cmd_details);
3738}
3739
3740/**
3741 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
3742 * @hw: pointer to the hw struct
3743 * @seid: seid of the switching component
3744 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
3745 * @cmd_details: pointer to command details structure or NULL
3746 **/
3747int
3748i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
3749 u16 seid,
3750 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
3751 struct i40e_asq_cmd_details *cmd_details)
3752{
3753 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3754 i40e_aqc_opc_configure_switching_comp_bw_config,
3755 cmd_details);
3756}
3757
3758/**
3759 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
3760 * @hw: pointer to the hw struct
3761 * @seid: seid of the VSI
3762 * @bw_data: Buffer to hold VSI BW configuration
3763 * @cmd_details: pointer to command details structure or NULL
3764 **/
3765int
3766i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
3767 u16 seid,
3768 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
3769 struct i40e_asq_cmd_details *cmd_details)
3770{
3771 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3772 i40e_aqc_opc_query_vsi_bw_config,
3773 cmd_details);
3774}
3775
3776/**
3777 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
3778 * @hw: pointer to the hw struct
3779 * @seid: seid of the VSI
3780 * @bw_data: Buffer to hold VSI BW configuration per TC
3781 * @cmd_details: pointer to command details structure or NULL
3782 **/
3783int
3784i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
3785 u16 seid,
3786 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
3787 struct i40e_asq_cmd_details *cmd_details)
3788{
3789 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3790 i40e_aqc_opc_query_vsi_ets_sla_config,
3791 cmd_details);
3792}
3793
3794/**
3795 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
3796 * @hw: pointer to the hw struct
3797 * @seid: seid of the switching component
3798 * @bw_data: Buffer to hold switching component's per TC BW config
3799 * @cmd_details: pointer to command details structure or NULL
3800 **/
3801int
3802i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
3803 u16 seid,
3804 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
3805 struct i40e_asq_cmd_details *cmd_details)
3806{
3807 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3808 i40e_aqc_opc_query_switching_comp_ets_config,
3809 cmd_details);
3810}
3811
3812/**
3813 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
3814 * @hw: pointer to the hw struct
3815 * @seid: seid of the VSI or switching component connected to Physical Port
3816 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
3817 * @cmd_details: pointer to command details structure or NULL
3818 **/
3819int
3820i40e_aq_query_port_ets_config(struct i40e_hw *hw,
3821 u16 seid,
3822 struct i40e_aqc_query_port_ets_config_resp *bw_data,
3823 struct i40e_asq_cmd_details *cmd_details)
3824{
3825 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3826 i40e_aqc_opc_query_port_ets_config,
3827 cmd_details);
3828}
3829
3830/**
3831 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
3832 * @hw: pointer to the hw struct
3833 * @seid: seid of the switching component
3834 * @bw_data: Buffer to hold switching component's BW configuration
3835 * @cmd_details: pointer to command details structure or NULL
3836 **/
3837int
3838i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
3839 u16 seid,
3840 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
3841 struct i40e_asq_cmd_details *cmd_details)
3842{
3843 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3844 i40e_aqc_opc_query_switching_comp_bw_config,
3845 cmd_details);
3846}
3847
3848/**
3849 * i40e_validate_filter_settings
3850 * @hw: pointer to the hardware structure
3851 * @settings: Filter control settings
3852 *
3853 * Check and validate the filter control settings passed.
3854 * The function checks for the valid filter/context sizes being
3855 * passed for FCoE and PE.
3856 *
3857 * Returns 0 if the values passed are valid and within
3858 * range else returns an error.
3859 **/
3860static int
3861i40e_validate_filter_settings(struct i40e_hw *hw,
3862 struct i40e_filter_control_settings *settings)
3863{
3864 u32 fcoe_cntx_size, fcoe_filt_size;
3865 u32 fcoe_fmax;
3866 u32 val;
3867
3868 /* Validate FCoE settings passed */
3869 switch (settings->fcoe_filt_num) {
3870 case I40E_HASH_FILTER_SIZE_1K:
3871 case I40E_HASH_FILTER_SIZE_2K:
3872 case I40E_HASH_FILTER_SIZE_4K:
3873 case I40E_HASH_FILTER_SIZE_8K:
3874 case I40E_HASH_FILTER_SIZE_16K:
3875 case I40E_HASH_FILTER_SIZE_32K:
3876 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
3877 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
3878 break;
3879 default:
3880 return -EINVAL;
3881 }
3882
3883 switch (settings->fcoe_cntx_num) {
3884 case I40E_DMA_CNTX_SIZE_512:
3885 case I40E_DMA_CNTX_SIZE_1K:
3886 case I40E_DMA_CNTX_SIZE_2K:
3887 case I40E_DMA_CNTX_SIZE_4K:
3888 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
3889 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
3890 break;
3891 default:
3892 return -EINVAL;
3893 }
3894
3895 /* Validate PE settings passed */
3896 switch (settings->pe_filt_num) {
3897 case I40E_HASH_FILTER_SIZE_1K:
3898 case I40E_HASH_FILTER_SIZE_2K:
3899 case I40E_HASH_FILTER_SIZE_4K:
3900 case I40E_HASH_FILTER_SIZE_8K:
3901 case I40E_HASH_FILTER_SIZE_16K:
3902 case I40E_HASH_FILTER_SIZE_32K:
3903 case I40E_HASH_FILTER_SIZE_64K:
3904 case I40E_HASH_FILTER_SIZE_128K:
3905 case I40E_HASH_FILTER_SIZE_256K:
3906 case I40E_HASH_FILTER_SIZE_512K:
3907 case I40E_HASH_FILTER_SIZE_1M:
3908 break;
3909 default:
3910 return -EINVAL;
3911 }
3912
3913 switch (settings->pe_cntx_num) {
3914 case I40E_DMA_CNTX_SIZE_512:
3915 case I40E_DMA_CNTX_SIZE_1K:
3916 case I40E_DMA_CNTX_SIZE_2K:
3917 case I40E_DMA_CNTX_SIZE_4K:
3918 case I40E_DMA_CNTX_SIZE_8K:
3919 case I40E_DMA_CNTX_SIZE_16K:
3920 case I40E_DMA_CNTX_SIZE_32K:
3921 case I40E_DMA_CNTX_SIZE_64K:
3922 case I40E_DMA_CNTX_SIZE_128K:
3923 case I40E_DMA_CNTX_SIZE_256K:
3924 break;
3925 default:
3926 return -EINVAL;
3927 }
3928
3929 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
3930 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
3931 fcoe_fmax = FIELD_GET(I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK, val);
3932 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
3933 return -EINVAL;
3934
3935 return 0;
3936}
3937
3938/**
3939 * i40e_set_filter_control
3940 * @hw: pointer to the hardware structure
3941 * @settings: Filter control settings
3942 *
3943 * Set the Queue Filters for PE/FCoE and enable filters required
3944 * for a single PF. It is expected that these settings are programmed
3945 * at the driver initialization time.
3946 **/
3947int i40e_set_filter_control(struct i40e_hw *hw,
3948 struct i40e_filter_control_settings *settings)
3949{
3950 u32 hash_lut_size = 0;
3951 int ret = 0;
3952 u32 val;
3953
3954 if (!settings)
3955 return -EINVAL;
3956
3957 /* Validate the input settings */
3958 ret = i40e_validate_filter_settings(hw, settings);
3959 if (ret)
3960 return ret;
3961
3962 /* Read the PF Queue Filter control register */
3963 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
3964
3965 /* Program required PE hash buckets for the PF */
3966 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
3967 val |= FIELD_PREP(I40E_PFQF_CTL_0_PEHSIZE_MASK, settings->pe_filt_num);
3968 /* Program required PE contexts for the PF */
3969 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
3970 val |= FIELD_PREP(I40E_PFQF_CTL_0_PEDSIZE_MASK, settings->pe_cntx_num);
3971
3972 /* Program required FCoE hash buckets for the PF */
3973 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
3974 val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCHSIZE_MASK,
3975 settings->fcoe_filt_num);
3976 /* Program required FCoE DDP contexts for the PF */
3977 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
3978 val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCDSIZE_MASK,
3979 settings->fcoe_cntx_num);
3980
3981 /* Program Hash LUT size for the PF */
3982 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
3983 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
3984 hash_lut_size = 1;
3985 val |= FIELD_PREP(I40E_PFQF_CTL_0_HASHLUTSIZE_MASK, hash_lut_size);
3986
3987 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
3988 if (settings->enable_fdir)
3989 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
3990 if (settings->enable_ethtype)
3991 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
3992 if (settings->enable_macvlan)
3993 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
3994
3995 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
3996
3997 return 0;
3998}
3999
4000/**
4001 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4002 * @hw: pointer to the hw struct
4003 * @mac_addr: MAC address to use in the filter
4004 * @ethtype: Ethertype to use in the filter
4005 * @flags: Flags that needs to be applied to the filter
4006 * @vsi_seid: seid of the control VSI
4007 * @queue: VSI queue number to send the packet to
4008 * @is_add: Add control packet filter if True else remove
4009 * @stats: Structure to hold information on control filter counts
4010 * @cmd_details: pointer to command details structure or NULL
4011 *
4012 * This command will Add or Remove control packet filter for a control VSI.
4013 * In return it will update the total number of perfect filter count in
4014 * the stats member.
4015 **/
4016int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4017 u8 *mac_addr, u16 ethtype, u16 flags,
4018 u16 vsi_seid, u16 queue, bool is_add,
4019 struct i40e_control_filter_stats *stats,
4020 struct i40e_asq_cmd_details *cmd_details)
4021{
4022 struct i40e_aq_desc desc;
4023 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4024 (struct i40e_aqc_add_remove_control_packet_filter *)
4025 &desc.params.raw;
4026 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4027 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4028 &desc.params.raw;
4029 int status;
4030
4031 if (vsi_seid == 0)
4032 return -EINVAL;
4033
4034 if (is_add) {
4035 i40e_fill_default_direct_cmd_desc(&desc,
4036 i40e_aqc_opc_add_control_packet_filter);
4037 cmd->queue = cpu_to_le16(queue);
4038 } else {
4039 i40e_fill_default_direct_cmd_desc(&desc,
4040 i40e_aqc_opc_remove_control_packet_filter);
4041 }
4042
4043 if (mac_addr)
4044 ether_addr_copy(cmd->mac, mac_addr);
4045
4046 cmd->etype = cpu_to_le16(ethtype);
4047 cmd->flags = cpu_to_le16(flags);
4048 cmd->seid = cpu_to_le16(vsi_seid);
4049
4050 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4051
4052 if (!status && stats) {
4053 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4054 stats->etype_used = le16_to_cpu(resp->etype_used);
4055 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4056 stats->etype_free = le16_to_cpu(resp->etype_free);
4057 }
4058
4059 return status;
4060}
4061
4062/**
4063 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4064 * @hw: pointer to the hw struct
4065 * @seid: VSI seid to add ethertype filter from
4066 **/
4067void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4068 u16 seid)
4069{
4070#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4071 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4072 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4073 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4074 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4075 int status;
4076
4077 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4078 seid, 0, true, NULL,
4079 NULL);
4080 if (status)
4081 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4082}
4083
4084/**
4085 * i40e_aq_alternate_read
4086 * @hw: pointer to the hardware structure
4087 * @reg_addr0: address of first dword to be read
4088 * @reg_val0: pointer for data read from 'reg_addr0'
4089 * @reg_addr1: address of second dword to be read
4090 * @reg_val1: pointer for data read from 'reg_addr1'
4091 *
4092 * Read one or two dwords from alternate structure. Fields are indicated
4093 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4094 * is not passed then only register at 'reg_addr0' is read.
4095 *
4096 **/
4097static int i40e_aq_alternate_read(struct i40e_hw *hw,
4098 u32 reg_addr0, u32 *reg_val0,
4099 u32 reg_addr1, u32 *reg_val1)
4100{
4101 struct i40e_aq_desc desc;
4102 struct i40e_aqc_alternate_write *cmd_resp =
4103 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4104 int status;
4105
4106 if (!reg_val0)
4107 return -EINVAL;
4108
4109 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4110 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4111 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4112
4113 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4114
4115 if (!status) {
4116 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4117
4118 if (reg_val1)
4119 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4120 }
4121
4122 return status;
4123}
4124
4125/**
4126 * i40e_aq_suspend_port_tx
4127 * @hw: pointer to the hardware structure
4128 * @seid: port seid
4129 * @cmd_details: pointer to command details structure or NULL
4130 *
4131 * Suspend port's Tx traffic
4132 **/
4133int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4134 struct i40e_asq_cmd_details *cmd_details)
4135{
4136 struct i40e_aqc_tx_sched_ind *cmd;
4137 struct i40e_aq_desc desc;
4138 int status;
4139
4140 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4141 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4142 cmd->vsi_seid = cpu_to_le16(seid);
4143 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4144
4145 return status;
4146}
4147
4148/**
4149 * i40e_aq_resume_port_tx
4150 * @hw: pointer to the hardware structure
4151 * @cmd_details: pointer to command details structure or NULL
4152 *
4153 * Resume port's Tx traffic
4154 **/
4155int i40e_aq_resume_port_tx(struct i40e_hw *hw,
4156 struct i40e_asq_cmd_details *cmd_details)
4157{
4158 struct i40e_aq_desc desc;
4159 int status;
4160
4161 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4162
4163 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4164
4165 return status;
4166}
4167
4168/**
4169 * i40e_set_pci_config_data - store PCI bus info
4170 * @hw: pointer to hardware structure
4171 * @link_status: the link status word from PCI config space
4172 *
4173 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4174 **/
4175void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4176{
4177 hw->bus.type = i40e_bus_type_pci_express;
4178
4179 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4180 case PCI_EXP_LNKSTA_NLW_X1:
4181 hw->bus.width = i40e_bus_width_pcie_x1;
4182 break;
4183 case PCI_EXP_LNKSTA_NLW_X2:
4184 hw->bus.width = i40e_bus_width_pcie_x2;
4185 break;
4186 case PCI_EXP_LNKSTA_NLW_X4:
4187 hw->bus.width = i40e_bus_width_pcie_x4;
4188 break;
4189 case PCI_EXP_LNKSTA_NLW_X8:
4190 hw->bus.width = i40e_bus_width_pcie_x8;
4191 break;
4192 default:
4193 hw->bus.width = i40e_bus_width_unknown;
4194 break;
4195 }
4196
4197 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4198 case PCI_EXP_LNKSTA_CLS_2_5GB:
4199 hw->bus.speed = i40e_bus_speed_2500;
4200 break;
4201 case PCI_EXP_LNKSTA_CLS_5_0GB:
4202 hw->bus.speed = i40e_bus_speed_5000;
4203 break;
4204 case PCI_EXP_LNKSTA_CLS_8_0GB:
4205 hw->bus.speed = i40e_bus_speed_8000;
4206 break;
4207 default:
4208 hw->bus.speed = i40e_bus_speed_unknown;
4209 break;
4210 }
4211}
4212
4213/**
4214 * i40e_aq_debug_dump
4215 * @hw: pointer to the hardware structure
4216 * @cluster_id: specific cluster to dump
4217 * @table_id: table id within cluster
4218 * @start_index: index of line in the block to read
4219 * @buff_size: dump buffer size
4220 * @buff: dump buffer
4221 * @ret_buff_size: actual buffer size returned
4222 * @ret_next_table: next block to read
4223 * @ret_next_index: next index to read
4224 * @cmd_details: pointer to command details structure or NULL
4225 *
4226 * Dump internal FW/HW data for debug purposes.
4227 *
4228 **/
4229int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4230 u8 table_id, u32 start_index, u16 buff_size,
4231 void *buff, u16 *ret_buff_size,
4232 u8 *ret_next_table, u32 *ret_next_index,
4233 struct i40e_asq_cmd_details *cmd_details)
4234{
4235 struct i40e_aq_desc desc;
4236 struct i40e_aqc_debug_dump_internals *cmd =
4237 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4238 struct i40e_aqc_debug_dump_internals *resp =
4239 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4240 int status;
4241
4242 if (buff_size == 0 || !buff)
4243 return -EINVAL;
4244
4245 i40e_fill_default_direct_cmd_desc(&desc,
4246 i40e_aqc_opc_debug_dump_internals);
4247 /* Indirect Command */
4248 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4249 if (buff_size > I40E_AQ_LARGE_BUF)
4250 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4251
4252 cmd->cluster_id = cluster_id;
4253 cmd->table_id = table_id;
4254 cmd->idx = cpu_to_le32(start_index);
4255
4256 desc.datalen = cpu_to_le16(buff_size);
4257
4258 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4259 if (!status) {
4260 if (ret_buff_size)
4261 *ret_buff_size = le16_to_cpu(desc.datalen);
4262 if (ret_next_table)
4263 *ret_next_table = resp->table_id;
4264 if (ret_next_index)
4265 *ret_next_index = le32_to_cpu(resp->idx);
4266 }
4267
4268 return status;
4269}
4270
4271/**
4272 * i40e_read_bw_from_alt_ram
4273 * @hw: pointer to the hardware structure
4274 * @max_bw: pointer for max_bw read
4275 * @min_bw: pointer for min_bw read
4276 * @min_valid: pointer for bool that is true if min_bw is a valid value
4277 * @max_valid: pointer for bool that is true if max_bw is a valid value
4278 *
4279 * Read bw from the alternate ram for the given pf
4280 **/
4281int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4282 u32 *max_bw, u32 *min_bw,
4283 bool *min_valid, bool *max_valid)
4284{
4285 u32 max_bw_addr, min_bw_addr;
4286 int status;
4287
4288 /* Calculate the address of the min/max bw registers */
4289 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4290 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4291 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4292 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4293 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4294 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4295
4296 /* Read the bandwidths from alt ram */
4297 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4298 min_bw_addr, min_bw);
4299
4300 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4301 *min_valid = true;
4302 else
4303 *min_valid = false;
4304
4305 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4306 *max_valid = true;
4307 else
4308 *max_valid = false;
4309
4310 return status;
4311}
4312
4313/**
4314 * i40e_aq_configure_partition_bw
4315 * @hw: pointer to the hardware structure
4316 * @bw_data: Buffer holding valid pfs and bw limits
4317 * @cmd_details: pointer to command details
4318 *
4319 * Configure partitions guaranteed/max bw
4320 **/
4321int
4322i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4323 struct i40e_aqc_configure_partition_bw_data *bw_data,
4324 struct i40e_asq_cmd_details *cmd_details)
4325{
4326 u16 bwd_size = sizeof(*bw_data);
4327 struct i40e_aq_desc desc;
4328 int status;
4329
4330 i40e_fill_default_direct_cmd_desc(&desc,
4331 i40e_aqc_opc_configure_partition_bw);
4332
4333 /* Indirect command */
4334 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4335 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4336
4337 if (bwd_size > I40E_AQ_LARGE_BUF)
4338 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4339
4340 desc.datalen = cpu_to_le16(bwd_size);
4341
4342 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4343 cmd_details);
4344
4345 return status;
4346}
4347
4348/**
4349 * i40e_read_phy_register_clause22
4350 * @hw: pointer to the HW structure
4351 * @reg: register address in the page
4352 * @phy_addr: PHY address on MDIO interface
4353 * @value: PHY register value
4354 *
4355 * Reads specified PHY register value
4356 **/
4357int i40e_read_phy_register_clause22(struct i40e_hw *hw,
4358 u16 reg, u8 phy_addr, u16 *value)
4359{
4360 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4361 int status = -EIO;
4362 u32 command = 0;
4363 u16 retry = 1000;
4364
4365 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4366 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4367 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4368 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4369 (I40E_GLGEN_MSCA_MDICMD_MASK);
4370 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4371 do {
4372 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4373 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4374 status = 0;
4375 break;
4376 }
4377 udelay(10);
4378 retry--;
4379 } while (retry);
4380
4381 if (status) {
4382 i40e_debug(hw, I40E_DEBUG_PHY,
4383 "PHY: Can't write command to external PHY.\n");
4384 } else {
4385 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4386 *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
4387 }
4388
4389 return status;
4390}
4391
4392/**
4393 * i40e_write_phy_register_clause22
4394 * @hw: pointer to the HW structure
4395 * @reg: register address in the page
4396 * @phy_addr: PHY address on MDIO interface
4397 * @value: PHY register value
4398 *
4399 * Writes specified PHY register value
4400 **/
4401int i40e_write_phy_register_clause22(struct i40e_hw *hw,
4402 u16 reg, u8 phy_addr, u16 value)
4403{
4404 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4405 int status = -EIO;
4406 u32 command = 0;
4407 u16 retry = 1000;
4408
4409 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4410 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4411
4412 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4413 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4414 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4415 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4416 (I40E_GLGEN_MSCA_MDICMD_MASK);
4417
4418 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4419 do {
4420 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4421 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4422 status = 0;
4423 break;
4424 }
4425 udelay(10);
4426 retry--;
4427 } while (retry);
4428
4429 return status;
4430}
4431
4432/**
4433 * i40e_read_phy_register_clause45
4434 * @hw: pointer to the HW structure
4435 * @page: registers page number
4436 * @reg: register address in the page
4437 * @phy_addr: PHY address on MDIO interface
4438 * @value: PHY register value
4439 *
4440 * Reads specified PHY register value
4441 **/
4442int i40e_read_phy_register_clause45(struct i40e_hw *hw,
4443 u8 page, u16 reg, u8 phy_addr, u16 *value)
4444{
4445 u8 port_num = hw->func_caps.mdio_port_num;
4446 int status = -EIO;
4447 u32 command = 0;
4448 u16 retry = 1000;
4449
4450 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4451 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4452 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4453 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4454 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4455 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4456 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4457 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4458 do {
4459 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4460 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4461 status = 0;
4462 break;
4463 }
4464 usleep_range(10, 20);
4465 retry--;
4466 } while (retry);
4467
4468 if (status) {
4469 i40e_debug(hw, I40E_DEBUG_PHY,
4470 "PHY: Can't write command to external PHY.\n");
4471 goto phy_read_end;
4472 }
4473
4474 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4475 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4476 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4477 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4478 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4479 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4480 status = -EIO;
4481 retry = 1000;
4482 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4483 do {
4484 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4485 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4486 status = 0;
4487 break;
4488 }
4489 usleep_range(10, 20);
4490 retry--;
4491 } while (retry);
4492
4493 if (!status) {
4494 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4495 *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
4496 } else {
4497 i40e_debug(hw, I40E_DEBUG_PHY,
4498 "PHY: Can't read register value from external PHY.\n");
4499 }
4500
4501phy_read_end:
4502 return status;
4503}
4504
4505/**
4506 * i40e_write_phy_register_clause45
4507 * @hw: pointer to the HW structure
4508 * @page: registers page number
4509 * @reg: register address in the page
4510 * @phy_addr: PHY address on MDIO interface
4511 * @value: PHY register value
4512 *
4513 * Writes value to specified PHY register
4514 **/
4515int i40e_write_phy_register_clause45(struct i40e_hw *hw,
4516 u8 page, u16 reg, u8 phy_addr, u16 value)
4517{
4518 u8 port_num = hw->func_caps.mdio_port_num;
4519 int status = -EIO;
4520 u16 retry = 1000;
4521 u32 command = 0;
4522
4523 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4524 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4525 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4526 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4527 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4528 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4529 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4530 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4531 do {
4532 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4533 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4534 status = 0;
4535 break;
4536 }
4537 usleep_range(10, 20);
4538 retry--;
4539 } while (retry);
4540 if (status) {
4541 i40e_debug(hw, I40E_DEBUG_PHY,
4542 "PHY: Can't write command to external PHY.\n");
4543 goto phy_write_end;
4544 }
4545
4546 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4547 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4548
4549 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4550 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4551 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4552 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4553 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4554 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4555 status = -EIO;
4556 retry = 1000;
4557 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4558 do {
4559 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4560 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4561 status = 0;
4562 break;
4563 }
4564 usleep_range(10, 20);
4565 retry--;
4566 } while (retry);
4567
4568phy_write_end:
4569 return status;
4570}
4571
4572/**
4573 * i40e_write_phy_register
4574 * @hw: pointer to the HW structure
4575 * @page: registers page number
4576 * @reg: register address in the page
4577 * @phy_addr: PHY address on MDIO interface
4578 * @value: PHY register value
4579 *
4580 * Writes value to specified PHY register
4581 **/
4582int i40e_write_phy_register(struct i40e_hw *hw,
4583 u8 page, u16 reg, u8 phy_addr, u16 value)
4584{
4585 int status;
4586
4587 switch (hw->device_id) {
4588 case I40E_DEV_ID_1G_BASE_T_X722:
4589 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4590 value);
4591 break;
4592 case I40E_DEV_ID_1G_BASE_T_BC:
4593 case I40E_DEV_ID_5G_BASE_T_BC:
4594 case I40E_DEV_ID_10G_BASE_T:
4595 case I40E_DEV_ID_10G_BASE_T4:
4596 case I40E_DEV_ID_10G_BASE_T_BC:
4597 case I40E_DEV_ID_10G_BASE_T_X722:
4598 case I40E_DEV_ID_25G_B:
4599 case I40E_DEV_ID_25G_SFP28:
4600 status = i40e_write_phy_register_clause45(hw, page, reg,
4601 phy_addr, value);
4602 break;
4603 default:
4604 status = -EIO;
4605 break;
4606 }
4607
4608 return status;
4609}
4610
4611/**
4612 * i40e_read_phy_register
4613 * @hw: pointer to the HW structure
4614 * @page: registers page number
4615 * @reg: register address in the page
4616 * @phy_addr: PHY address on MDIO interface
4617 * @value: PHY register value
4618 *
4619 * Reads specified PHY register value
4620 **/
4621int i40e_read_phy_register(struct i40e_hw *hw,
4622 u8 page, u16 reg, u8 phy_addr, u16 *value)
4623{
4624 int status;
4625
4626 switch (hw->device_id) {
4627 case I40E_DEV_ID_1G_BASE_T_X722:
4628 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4629 value);
4630 break;
4631 case I40E_DEV_ID_1G_BASE_T_BC:
4632 case I40E_DEV_ID_5G_BASE_T_BC:
4633 case I40E_DEV_ID_10G_BASE_T:
4634 case I40E_DEV_ID_10G_BASE_T4:
4635 case I40E_DEV_ID_10G_BASE_T_BC:
4636 case I40E_DEV_ID_10G_BASE_T_X722:
4637 case I40E_DEV_ID_25G_B:
4638 case I40E_DEV_ID_25G_SFP28:
4639 status = i40e_read_phy_register_clause45(hw, page, reg,
4640 phy_addr, value);
4641 break;
4642 default:
4643 status = -EIO;
4644 break;
4645 }
4646
4647 return status;
4648}
4649
4650/**
4651 * i40e_get_phy_address
4652 * @hw: pointer to the HW structure
4653 * @dev_num: PHY port num that address we want
4654 *
4655 * Gets PHY address for current port
4656 **/
4657u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4658{
4659 u8 port_num = hw->func_caps.mdio_port_num;
4660 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4661
4662 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4663}
4664
4665/**
4666 * i40e_blink_phy_link_led
4667 * @hw: pointer to the HW structure
4668 * @time: time how long led will blinks in secs
4669 * @interval: gap between LED on and off in msecs
4670 *
4671 * Blinks PHY link LED
4672 **/
4673int i40e_blink_phy_link_led(struct i40e_hw *hw,
4674 u32 time, u32 interval)
4675{
4676 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4677 u16 gpio_led_port;
4678 u8 phy_addr = 0;
4679 int status = 0;
4680 u16 led_ctl;
4681 u8 port_num;
4682 u16 led_reg;
4683 u32 i;
4684
4685 i = rd32(hw, I40E_PFGEN_PORTNUM);
4686 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4687 phy_addr = i40e_get_phy_address(hw, port_num);
4688
4689 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4690 led_addr++) {
4691 status = i40e_read_phy_register_clause45(hw,
4692 I40E_PHY_COM_REG_PAGE,
4693 led_addr, phy_addr,
4694 &led_reg);
4695 if (status)
4696 goto phy_blinking_end;
4697 led_ctl = led_reg;
4698 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4699 led_reg = 0;
4700 status = i40e_write_phy_register_clause45(hw,
4701 I40E_PHY_COM_REG_PAGE,
4702 led_addr, phy_addr,
4703 led_reg);
4704 if (status)
4705 goto phy_blinking_end;
4706 break;
4707 }
4708 }
4709
4710 if (time > 0 && interval > 0) {
4711 for (i = 0; i < time * 1000; i += interval) {
4712 status = i40e_read_phy_register_clause45(hw,
4713 I40E_PHY_COM_REG_PAGE,
4714 led_addr, phy_addr, &led_reg);
4715 if (status)
4716 goto restore_config;
4717 if (led_reg & I40E_PHY_LED_MANUAL_ON)
4718 led_reg = 0;
4719 else
4720 led_reg = I40E_PHY_LED_MANUAL_ON;
4721 status = i40e_write_phy_register_clause45(hw,
4722 I40E_PHY_COM_REG_PAGE,
4723 led_addr, phy_addr, led_reg);
4724 if (status)
4725 goto restore_config;
4726 msleep(interval);
4727 }
4728 }
4729
4730restore_config:
4731 status = i40e_write_phy_register_clause45(hw,
4732 I40E_PHY_COM_REG_PAGE,
4733 led_addr, phy_addr, led_ctl);
4734
4735phy_blinking_end:
4736 return status;
4737}
4738
4739/**
4740 * i40e_led_get_reg - read LED register
4741 * @hw: pointer to the HW structure
4742 * @led_addr: LED register address
4743 * @reg_val: read register value
4744 **/
4745static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
4746 u32 *reg_val)
4747{
4748 u8 phy_addr = 0;
4749 u8 port_num;
4750 int status;
4751 u32 i;
4752
4753 *reg_val = 0;
4754 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
4755 status =
4756 i40e_aq_get_phy_register(hw,
4757 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
4758 I40E_PHY_COM_REG_PAGE, true,
4759 I40E_PHY_LED_PROV_REG_1,
4760 reg_val, NULL);
4761 } else {
4762 i = rd32(hw, I40E_PFGEN_PORTNUM);
4763 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4764 phy_addr = i40e_get_phy_address(hw, port_num);
4765 status = i40e_read_phy_register_clause45(hw,
4766 I40E_PHY_COM_REG_PAGE,
4767 led_addr, phy_addr,
4768 (u16 *)reg_val);
4769 }
4770 return status;
4771}
4772
4773/**
4774 * i40e_led_set_reg - write LED register
4775 * @hw: pointer to the HW structure
4776 * @led_addr: LED register address
4777 * @reg_val: register value to write
4778 **/
4779static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
4780 u32 reg_val)
4781{
4782 u8 phy_addr = 0;
4783 u8 port_num;
4784 int status;
4785 u32 i;
4786
4787 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
4788 status =
4789 i40e_aq_set_phy_register(hw,
4790 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
4791 I40E_PHY_COM_REG_PAGE, true,
4792 I40E_PHY_LED_PROV_REG_1,
4793 reg_val, NULL);
4794 } else {
4795 i = rd32(hw, I40E_PFGEN_PORTNUM);
4796 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4797 phy_addr = i40e_get_phy_address(hw, port_num);
4798 status = i40e_write_phy_register_clause45(hw,
4799 I40E_PHY_COM_REG_PAGE,
4800 led_addr, phy_addr,
4801 (u16)reg_val);
4802 }
4803
4804 return status;
4805}
4806
4807/**
4808 * i40e_led_get_phy - return current on/off mode
4809 * @hw: pointer to the hw struct
4810 * @led_addr: address of led register to use
4811 * @val: original value of register to use
4812 *
4813 **/
4814int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
4815 u16 *val)
4816{
4817 u16 gpio_led_port;
4818 u8 phy_addr = 0;
4819 u32 reg_val_aq;
4820 int status = 0;
4821 u16 temp_addr;
4822 u16 reg_val;
4823 u8 port_num;
4824 u32 i;
4825
4826 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
4827 status =
4828 i40e_aq_get_phy_register(hw,
4829 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
4830 I40E_PHY_COM_REG_PAGE, true,
4831 I40E_PHY_LED_PROV_REG_1,
4832 ®_val_aq, NULL);
4833 if (status == 0)
4834 *val = (u16)reg_val_aq;
4835 return status;
4836 }
4837 temp_addr = I40E_PHY_LED_PROV_REG_1;
4838 i = rd32(hw, I40E_PFGEN_PORTNUM);
4839 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4840 phy_addr = i40e_get_phy_address(hw, port_num);
4841
4842 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4843 temp_addr++) {
4844 status = i40e_read_phy_register_clause45(hw,
4845 I40E_PHY_COM_REG_PAGE,
4846 temp_addr, phy_addr,
4847 ®_val);
4848 if (status)
4849 return status;
4850 *val = reg_val;
4851 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
4852 *led_addr = temp_addr;
4853 break;
4854 }
4855 }
4856 return status;
4857}
4858
4859/**
4860 * i40e_led_set_phy
4861 * @hw: pointer to the HW structure
4862 * @on: true or false
4863 * @led_addr: address of led register to use
4864 * @mode: original val plus bit for set or ignore
4865 *
4866 * Set led's on or off when controlled by the PHY
4867 *
4868 **/
4869int i40e_led_set_phy(struct i40e_hw *hw, bool on,
4870 u16 led_addr, u32 mode)
4871{
4872 u32 led_ctl = 0;
4873 u32 led_reg = 0;
4874 int status = 0;
4875
4876 status = i40e_led_get_reg(hw, led_addr, &led_reg);
4877 if (status)
4878 return status;
4879 led_ctl = led_reg;
4880 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4881 led_reg = 0;
4882 status = i40e_led_set_reg(hw, led_addr, led_reg);
4883 if (status)
4884 return status;
4885 }
4886 status = i40e_led_get_reg(hw, led_addr, &led_reg);
4887 if (status)
4888 goto restore_config;
4889 if (on)
4890 led_reg = I40E_PHY_LED_MANUAL_ON;
4891 else
4892 led_reg = 0;
4893
4894 status = i40e_led_set_reg(hw, led_addr, led_reg);
4895 if (status)
4896 goto restore_config;
4897 if (mode & I40E_PHY_LED_MODE_ORIG) {
4898 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
4899 status = i40e_led_set_reg(hw, led_addr, led_ctl);
4900 }
4901 return status;
4902
4903restore_config:
4904 status = i40e_led_set_reg(hw, led_addr, led_ctl);
4905 return status;
4906}
4907
4908/**
4909 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
4910 * @hw: pointer to the hw struct
4911 * @reg_addr: register address
4912 * @reg_val: ptr to register value
4913 * @cmd_details: pointer to command details structure or NULL
4914 *
4915 * Use the firmware to read the Rx control register,
4916 * especially useful if the Rx unit is under heavy pressure
4917 **/
4918int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
4919 u32 reg_addr, u32 *reg_val,
4920 struct i40e_asq_cmd_details *cmd_details)
4921{
4922 struct i40e_aq_desc desc;
4923 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
4924 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
4925 int status;
4926
4927 if (!reg_val)
4928 return -EINVAL;
4929
4930 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
4931
4932 cmd_resp->address = cpu_to_le32(reg_addr);
4933
4934 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4935
4936 if (status == 0)
4937 *reg_val = le32_to_cpu(cmd_resp->value);
4938
4939 return status;
4940}
4941
4942/**
4943 * i40e_read_rx_ctl - read from an Rx control register
4944 * @hw: pointer to the hw struct
4945 * @reg_addr: register address
4946 **/
4947u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
4948{
4949 bool use_register = false;
4950 int status = 0;
4951 int retry = 5;
4952 u32 val = 0;
4953
4954 if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
4955 use_register = true;
4956
4957 if (!use_register) {
4958do_retry:
4959 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
4960 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
4961 usleep_range(1000, 2000);
4962 retry--;
4963 goto do_retry;
4964 }
4965 }
4966
4967 /* if the AQ access failed, try the old-fashioned way */
4968 if (status || use_register)
4969 val = rd32(hw, reg_addr);
4970
4971 return val;
4972}
4973
4974/**
4975 * i40e_aq_rx_ctl_write_register
4976 * @hw: pointer to the hw struct
4977 * @reg_addr: register address
4978 * @reg_val: register value
4979 * @cmd_details: pointer to command details structure or NULL
4980 *
4981 * Use the firmware to write to an Rx control register,
4982 * especially useful if the Rx unit is under heavy pressure
4983 **/
4984int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
4985 u32 reg_addr, u32 reg_val,
4986 struct i40e_asq_cmd_details *cmd_details)
4987{
4988 struct i40e_aq_desc desc;
4989 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
4990 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
4991 int status;
4992
4993 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
4994
4995 cmd->address = cpu_to_le32(reg_addr);
4996 cmd->value = cpu_to_le32(reg_val);
4997
4998 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4999
5000 return status;
5001}
5002
5003/**
5004 * i40e_write_rx_ctl - write to an Rx control register
5005 * @hw: pointer to the hw struct
5006 * @reg_addr: register address
5007 * @reg_val: register value
5008 **/
5009void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5010{
5011 bool use_register = false;
5012 int status = 0;
5013 int retry = 5;
5014
5015 if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
5016 use_register = true;
5017
5018 if (!use_register) {
5019do_retry:
5020 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5021 reg_val, NULL);
5022 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5023 usleep_range(1000, 2000);
5024 retry--;
5025 goto do_retry;
5026 }
5027 }
5028
5029 /* if the AQ access failed, try the old-fashioned way */
5030 if (status || use_register)
5031 wr32(hw, reg_addr, reg_val);
5032}
5033
5034/**
5035 * i40e_mdio_if_number_selection - MDIO I/F number selection
5036 * @hw: pointer to the hw struct
5037 * @set_mdio: use MDIO I/F number specified by mdio_num
5038 * @mdio_num: MDIO I/F number
5039 * @cmd: pointer to PHY Register command structure
5040 **/
5041static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5042 u8 mdio_num,
5043 struct i40e_aqc_phy_register_access *cmd)
5044{
5045 if (!set_mdio ||
5046 cmd->phy_interface != I40E_AQ_PHY_REG_ACCESS_EXTERNAL)
5047 return;
5048
5049 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) {
5050 cmd->cmd_flags |=
5051 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5052 FIELD_PREP(I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK,
5053 mdio_num);
5054 } else {
5055 i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n");
5056 }
5057}
5058
5059/**
5060 * i40e_aq_set_phy_register_ext
5061 * @hw: pointer to the hw struct
5062 * @phy_select: select which phy should be accessed
5063 * @dev_addr: PHY device address
5064 * @page_change: flag to indicate if phy page should be updated
5065 * @set_mdio: use MDIO I/F number specified by mdio_num
5066 * @mdio_num: MDIO I/F number
5067 * @reg_addr: PHY register address
5068 * @reg_val: new register value
5069 * @cmd_details: pointer to command details structure or NULL
5070 *
5071 * Write the external PHY register.
5072 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5073 * may use simple wrapper i40e_aq_set_phy_register.
5074 **/
5075int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5076 u8 phy_select, u8 dev_addr, bool page_change,
5077 bool set_mdio, u8 mdio_num,
5078 u32 reg_addr, u32 reg_val,
5079 struct i40e_asq_cmd_details *cmd_details)
5080{
5081 struct i40e_aq_desc desc;
5082 struct i40e_aqc_phy_register_access *cmd =
5083 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5084 int status;
5085
5086 i40e_fill_default_direct_cmd_desc(&desc,
5087 i40e_aqc_opc_set_phy_register);
5088
5089 cmd->phy_interface = phy_select;
5090 cmd->dev_address = dev_addr;
5091 cmd->reg_address = cpu_to_le32(reg_addr);
5092 cmd->reg_value = cpu_to_le32(reg_val);
5093
5094 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5095
5096 if (!page_change)
5097 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5098
5099 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5100
5101 return status;
5102}
5103
5104/**
5105 * i40e_aq_get_phy_register_ext
5106 * @hw: pointer to the hw struct
5107 * @phy_select: select which phy should be accessed
5108 * @dev_addr: PHY device address
5109 * @page_change: flag to indicate if phy page should be updated
5110 * @set_mdio: use MDIO I/F number specified by mdio_num
5111 * @mdio_num: MDIO I/F number
5112 * @reg_addr: PHY register address
5113 * @reg_val: read register value
5114 * @cmd_details: pointer to command details structure or NULL
5115 *
5116 * Read the external PHY register.
5117 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5118 * may use simple wrapper i40e_aq_get_phy_register.
5119 **/
5120int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5121 u8 phy_select, u8 dev_addr, bool page_change,
5122 bool set_mdio, u8 mdio_num,
5123 u32 reg_addr, u32 *reg_val,
5124 struct i40e_asq_cmd_details *cmd_details)
5125{
5126 struct i40e_aq_desc desc;
5127 struct i40e_aqc_phy_register_access *cmd =
5128 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5129 int status;
5130
5131 i40e_fill_default_direct_cmd_desc(&desc,
5132 i40e_aqc_opc_get_phy_register);
5133
5134 cmd->phy_interface = phy_select;
5135 cmd->dev_address = dev_addr;
5136 cmd->reg_address = cpu_to_le32(reg_addr);
5137
5138 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5139
5140 if (!page_change)
5141 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5142
5143 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5144 if (!status)
5145 *reg_val = le32_to_cpu(cmd->reg_value);
5146
5147 return status;
5148}
5149
5150/**
5151 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5152 * @hw: pointer to the hw struct
5153 * @buff: command buffer (size in bytes = buff_size)
5154 * @buff_size: buffer size in bytes
5155 * @track_id: package tracking id
5156 * @error_offset: returns error offset
5157 * @error_info: returns error information
5158 * @cmd_details: pointer to command details structure or NULL
5159 **/
5160int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5161 u16 buff_size, u32 track_id,
5162 u32 *error_offset, u32 *error_info,
5163 struct i40e_asq_cmd_details *cmd_details)
5164{
5165 struct i40e_aq_desc desc;
5166 struct i40e_aqc_write_personalization_profile *cmd =
5167 (struct i40e_aqc_write_personalization_profile *)
5168 &desc.params.raw;
5169 struct i40e_aqc_write_ddp_resp *resp;
5170 int status;
5171
5172 i40e_fill_default_direct_cmd_desc(&desc,
5173 i40e_aqc_opc_write_personalization_profile);
5174
5175 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5176 if (buff_size > I40E_AQ_LARGE_BUF)
5177 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5178
5179 desc.datalen = cpu_to_le16(buff_size);
5180
5181 cmd->profile_track_id = cpu_to_le32(track_id);
5182
5183 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5184 if (!status) {
5185 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5186 if (error_offset)
5187 *error_offset = le32_to_cpu(resp->error_offset);
5188 if (error_info)
5189 *error_info = le32_to_cpu(resp->error_info);
5190 }
5191
5192 return status;
5193}
5194
5195/**
5196 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5197 * @hw: pointer to the hw struct
5198 * @buff: command buffer (size in bytes = buff_size)
5199 * @buff_size: buffer size in bytes
5200 * @flags: AdminQ command flags
5201 * @cmd_details: pointer to command details structure or NULL
5202 **/
5203int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5204 u16 buff_size, u8 flags,
5205 struct i40e_asq_cmd_details *cmd_details)
5206{
5207 struct i40e_aq_desc desc;
5208 struct i40e_aqc_get_applied_profiles *cmd =
5209 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5210 int status;
5211
5212 i40e_fill_default_direct_cmd_desc(&desc,
5213 i40e_aqc_opc_get_personalization_profile_list);
5214
5215 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5216 if (buff_size > I40E_AQ_LARGE_BUF)
5217 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5218 desc.datalen = cpu_to_le16(buff_size);
5219
5220 cmd->flags = flags;
5221
5222 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5223
5224 return status;
5225}
5226
5227/**
5228 * i40e_find_segment_in_package
5229 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5230 * @pkg_hdr: pointer to the package header to be searched
5231 *
5232 * This function searches a package file for a particular segment type. On
5233 * success it returns a pointer to the segment header, otherwise it will
5234 * return NULL.
5235 **/
5236struct i40e_generic_seg_header *
5237i40e_find_segment_in_package(u32 segment_type,
5238 struct i40e_package_header *pkg_hdr)
5239{
5240 struct i40e_generic_seg_header *segment;
5241 u32 i;
5242
5243 /* Search all package segments for the requested segment type */
5244 for (i = 0; i < pkg_hdr->segment_count; i++) {
5245 segment =
5246 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5247 pkg_hdr->segment_offset[i]);
5248
5249 if (segment->type == segment_type)
5250 return segment;
5251 }
5252
5253 return NULL;
5254}
5255
5256/* Get section table in profile */
5257#define I40E_SECTION_TABLE(profile, sec_tbl) \
5258 do { \
5259 struct i40e_profile_segment *p = (profile); \
5260 u32 count; \
5261 u32 *nvm; \
5262 count = p->device_table_count; \
5263 nvm = (u32 *)&p->device_table[count]; \
5264 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5265 } while (0)
5266
5267/* Get section header in profile */
5268#define I40E_SECTION_HEADER(profile, offset) \
5269 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5270
5271/**
5272 * i40e_find_section_in_profile
5273 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5274 * @profile: pointer to the i40e segment header to be searched
5275 *
5276 * This function searches i40e segment for a particular section type. On
5277 * success it returns a pointer to the section header, otherwise it will
5278 * return NULL.
5279 **/
5280struct i40e_profile_section_header *
5281i40e_find_section_in_profile(u32 section_type,
5282 struct i40e_profile_segment *profile)
5283{
5284 struct i40e_profile_section_header *sec;
5285 struct i40e_section_table *sec_tbl;
5286 u32 sec_off;
5287 u32 i;
5288
5289 if (profile->header.type != SEGMENT_TYPE_I40E)
5290 return NULL;
5291
5292 I40E_SECTION_TABLE(profile, sec_tbl);
5293
5294 for (i = 0; i < sec_tbl->section_count; i++) {
5295 sec_off = sec_tbl->section_offset[i];
5296 sec = I40E_SECTION_HEADER(profile, sec_off);
5297 if (sec->section.type == section_type)
5298 return sec;
5299 }
5300
5301 return NULL;
5302}
5303
5304/**
5305 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5306 * @hw: pointer to the hw struct
5307 * @aq: command buffer containing all data to execute AQ
5308 **/
5309static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5310 struct i40e_profile_aq_section *aq)
5311{
5312 struct i40e_aq_desc desc;
5313 u8 *msg = NULL;
5314 u16 msglen;
5315 int status;
5316
5317 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5318 desc.flags |= cpu_to_le16(aq->flags);
5319 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5320
5321 msglen = aq->datalen;
5322 if (msglen) {
5323 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5324 I40E_AQ_FLAG_RD));
5325 if (msglen > I40E_AQ_LARGE_BUF)
5326 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5327 desc.datalen = cpu_to_le16(msglen);
5328 msg = &aq->data[0];
5329 }
5330
5331 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5332
5333 if (status) {
5334 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5335 "unable to exec DDP AQ opcode %u, error %d\n",
5336 aq->opcode, status);
5337 return status;
5338 }
5339
5340 /* copy returned desc to aq_buf */
5341 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5342
5343 return 0;
5344}
5345
5346/**
5347 * i40e_validate_profile
5348 * @hw: pointer to the hardware structure
5349 * @profile: pointer to the profile segment of the package to be validated
5350 * @track_id: package tracking id
5351 * @rollback: flag if the profile is for rollback.
5352 *
5353 * Validates supported devices and profile's sections.
5354 */
5355static int
5356i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5357 u32 track_id, bool rollback)
5358{
5359 struct i40e_profile_section_header *sec = NULL;
5360 struct i40e_section_table *sec_tbl;
5361 u32 vendor_dev_id;
5362 int status = 0;
5363 u32 dev_cnt;
5364 u32 sec_off;
5365 u32 i;
5366
5367 if (track_id == I40E_DDP_TRACKID_INVALID) {
5368 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5369 return -EOPNOTSUPP;
5370 }
5371
5372 dev_cnt = profile->device_table_count;
5373 for (i = 0; i < dev_cnt; i++) {
5374 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5375 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5376 hw->device_id == (vendor_dev_id & 0xFFFF))
5377 break;
5378 }
5379 if (dev_cnt && i == dev_cnt) {
5380 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5381 "Device doesn't support DDP\n");
5382 return -ENODEV;
5383 }
5384
5385 I40E_SECTION_TABLE(profile, sec_tbl);
5386
5387 /* Validate sections types */
5388 for (i = 0; i < sec_tbl->section_count; i++) {
5389 sec_off = sec_tbl->section_offset[i];
5390 sec = I40E_SECTION_HEADER(profile, sec_off);
5391 if (rollback) {
5392 if (sec->section.type == SECTION_TYPE_MMIO ||
5393 sec->section.type == SECTION_TYPE_AQ ||
5394 sec->section.type == SECTION_TYPE_RB_AQ) {
5395 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5396 "Not a roll-back package\n");
5397 return -EOPNOTSUPP;
5398 }
5399 } else {
5400 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5401 sec->section.type == SECTION_TYPE_RB_MMIO) {
5402 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5403 "Not an original package\n");
5404 return -EOPNOTSUPP;
5405 }
5406 }
5407 }
5408
5409 return status;
5410}
5411
5412/**
5413 * i40e_write_profile
5414 * @hw: pointer to the hardware structure
5415 * @profile: pointer to the profile segment of the package to be downloaded
5416 * @track_id: package tracking id
5417 *
5418 * Handles the download of a complete package.
5419 */
5420int
5421i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5422 u32 track_id)
5423{
5424 struct i40e_profile_section_header *sec = NULL;
5425 struct i40e_profile_aq_section *ddp_aq;
5426 struct i40e_section_table *sec_tbl;
5427 u32 offset = 0, info = 0;
5428 u32 section_size = 0;
5429 int status = 0;
5430 u32 sec_off;
5431 u32 i;
5432
5433 status = i40e_validate_profile(hw, profile, track_id, false);
5434 if (status)
5435 return status;
5436
5437 I40E_SECTION_TABLE(profile, sec_tbl);
5438
5439 for (i = 0; i < sec_tbl->section_count; i++) {
5440 sec_off = sec_tbl->section_offset[i];
5441 sec = I40E_SECTION_HEADER(profile, sec_off);
5442 /* Process generic admin command */
5443 if (sec->section.type == SECTION_TYPE_AQ) {
5444 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5445 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5446 if (status) {
5447 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5448 "Failed to execute aq: section %d, opcode %u\n",
5449 i, ddp_aq->opcode);
5450 break;
5451 }
5452 sec->section.type = SECTION_TYPE_RB_AQ;
5453 }
5454
5455 /* Skip any non-mmio sections */
5456 if (sec->section.type != SECTION_TYPE_MMIO)
5457 continue;
5458
5459 section_size = sec->section.size +
5460 sizeof(struct i40e_profile_section_header);
5461
5462 /* Write MMIO section */
5463 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5464 track_id, &offset, &info, NULL);
5465 if (status) {
5466 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5467 "Failed to write profile: section %d, offset %d, info %d\n",
5468 i, offset, info);
5469 break;
5470 }
5471 }
5472 return status;
5473}
5474
5475/**
5476 * i40e_rollback_profile
5477 * @hw: pointer to the hardware structure
5478 * @profile: pointer to the profile segment of the package to be removed
5479 * @track_id: package tracking id
5480 *
5481 * Rolls back previously loaded package.
5482 */
5483int
5484i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5485 u32 track_id)
5486{
5487 struct i40e_profile_section_header *sec = NULL;
5488 struct i40e_section_table *sec_tbl;
5489 u32 offset = 0, info = 0;
5490 u32 section_size = 0;
5491 int status = 0;
5492 u32 sec_off;
5493 int i;
5494
5495 status = i40e_validate_profile(hw, profile, track_id, true);
5496 if (status)
5497 return status;
5498
5499 I40E_SECTION_TABLE(profile, sec_tbl);
5500
5501 /* For rollback write sections in reverse */
5502 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5503 sec_off = sec_tbl->section_offset[i];
5504 sec = I40E_SECTION_HEADER(profile, sec_off);
5505
5506 /* Skip any non-rollback sections */
5507 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5508 continue;
5509
5510 section_size = sec->section.size +
5511 sizeof(struct i40e_profile_section_header);
5512
5513 /* Write roll-back MMIO section */
5514 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5515 track_id, &offset, &info, NULL);
5516 if (status) {
5517 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5518 "Failed to write profile: section %d, offset %d, info %d\n",
5519 i, offset, info);
5520 break;
5521 }
5522 }
5523 return status;
5524}
5525
5526/**
5527 * i40e_add_pinfo_to_list
5528 * @hw: pointer to the hardware structure
5529 * @profile: pointer to the profile segment of the package
5530 * @profile_info_sec: buffer for information section
5531 * @track_id: package tracking id
5532 *
5533 * Register a profile to the list of loaded profiles.
5534 */
5535int
5536i40e_add_pinfo_to_list(struct i40e_hw *hw,
5537 struct i40e_profile_segment *profile,
5538 u8 *profile_info_sec, u32 track_id)
5539{
5540 struct i40e_profile_section_header *sec = NULL;
5541 struct i40e_profile_info *pinfo;
5542 u32 offset = 0, info = 0;
5543 int status = 0;
5544
5545 sec = (struct i40e_profile_section_header *)profile_info_sec;
5546 sec->tbl_size = 1;
5547 sec->data_end = sizeof(struct i40e_profile_section_header) +
5548 sizeof(struct i40e_profile_info);
5549 sec->section.type = SECTION_TYPE_INFO;
5550 sec->section.offset = sizeof(struct i40e_profile_section_header);
5551 sec->section.size = sizeof(struct i40e_profile_info);
5552 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5553 sec->section.offset);
5554 pinfo->track_id = track_id;
5555 pinfo->version = profile->version;
5556 pinfo->op = I40E_DDP_ADD_TRACKID;
5557 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5558
5559 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5560 track_id, &offset, &info, NULL);
5561
5562 return status;
5563}
5564
5565/**
5566 * i40e_aq_add_cloud_filters
5567 * @hw: pointer to the hardware structure
5568 * @seid: VSI seid to add cloud filters from
5569 * @filters: Buffer which contains the filters to be added
5570 * @filter_count: number of filters contained in the buffer
5571 *
5572 * Set the cloud filters for a given VSI. The contents of the
5573 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5574 * of the function.
5575 *
5576 **/
5577int
5578i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5579 struct i40e_aqc_cloud_filters_element_data *filters,
5580 u8 filter_count)
5581{
5582 struct i40e_aq_desc desc;
5583 struct i40e_aqc_add_remove_cloud_filters *cmd =
5584 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5585 u16 buff_len;
5586 int status;
5587
5588 i40e_fill_default_direct_cmd_desc(&desc,
5589 i40e_aqc_opc_add_cloud_filters);
5590
5591 buff_len = filter_count * sizeof(*filters);
5592 desc.datalen = cpu_to_le16(buff_len);
5593 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5594 cmd->num_filters = filter_count;
5595 cmd->seid = cpu_to_le16(seid);
5596
5597 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5598
5599 return status;
5600}
5601
5602/**
5603 * i40e_aq_add_cloud_filters_bb
5604 * @hw: pointer to the hardware structure
5605 * @seid: VSI seid to add cloud filters from
5606 * @filters: Buffer which contains the filters in big buffer to be added
5607 * @filter_count: number of filters contained in the buffer
5608 *
5609 * Set the big buffer cloud filters for a given VSI. The contents of the
5610 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5611 * function.
5612 *
5613 **/
5614int
5615i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5616 struct i40e_aqc_cloud_filters_element_bb *filters,
5617 u8 filter_count)
5618{
5619 struct i40e_aq_desc desc;
5620 struct i40e_aqc_add_remove_cloud_filters *cmd =
5621 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5622 u16 buff_len;
5623 int status;
5624 int i;
5625
5626 i40e_fill_default_direct_cmd_desc(&desc,
5627 i40e_aqc_opc_add_cloud_filters);
5628
5629 buff_len = filter_count * sizeof(*filters);
5630 desc.datalen = cpu_to_le16(buff_len);
5631 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5632 cmd->num_filters = filter_count;
5633 cmd->seid = cpu_to_le16(seid);
5634 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5635
5636 for (i = 0; i < filter_count; i++) {
5637 u16 tnl_type;
5638 u32 ti;
5639
5640 tnl_type = le16_get_bits(filters[i].element.flags,
5641 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
5642
5643 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5644 * one more byte further than normally used for Tenant ID in
5645 * other tunnel types.
5646 */
5647 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5648 ti = le32_to_cpu(filters[i].element.tenant_id);
5649 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5650 }
5651 }
5652
5653 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5654
5655 return status;
5656}
5657
5658/**
5659 * i40e_aq_rem_cloud_filters
5660 * @hw: pointer to the hardware structure
5661 * @seid: VSI seid to remove cloud filters from
5662 * @filters: Buffer which contains the filters to be removed
5663 * @filter_count: number of filters contained in the buffer
5664 *
5665 * Remove the cloud filters for a given VSI. The contents of the
5666 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5667 * of the function.
5668 *
5669 **/
5670int
5671i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5672 struct i40e_aqc_cloud_filters_element_data *filters,
5673 u8 filter_count)
5674{
5675 struct i40e_aq_desc desc;
5676 struct i40e_aqc_add_remove_cloud_filters *cmd =
5677 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5678 u16 buff_len;
5679 int status;
5680
5681 i40e_fill_default_direct_cmd_desc(&desc,
5682 i40e_aqc_opc_remove_cloud_filters);
5683
5684 buff_len = filter_count * sizeof(*filters);
5685 desc.datalen = cpu_to_le16(buff_len);
5686 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5687 cmd->num_filters = filter_count;
5688 cmd->seid = cpu_to_le16(seid);
5689
5690 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5691
5692 return status;
5693}
5694
5695/**
5696 * i40e_aq_rem_cloud_filters_bb
5697 * @hw: pointer to the hardware structure
5698 * @seid: VSI seid to remove cloud filters from
5699 * @filters: Buffer which contains the filters in big buffer to be removed
5700 * @filter_count: number of filters contained in the buffer
5701 *
5702 * Remove the big buffer cloud filters for a given VSI. The contents of the
5703 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5704 * function.
5705 *
5706 **/
5707int
5708i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5709 struct i40e_aqc_cloud_filters_element_bb *filters,
5710 u8 filter_count)
5711{
5712 struct i40e_aq_desc desc;
5713 struct i40e_aqc_add_remove_cloud_filters *cmd =
5714 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5715 u16 buff_len;
5716 int status;
5717 int i;
5718
5719 i40e_fill_default_direct_cmd_desc(&desc,
5720 i40e_aqc_opc_remove_cloud_filters);
5721
5722 buff_len = filter_count * sizeof(*filters);
5723 desc.datalen = cpu_to_le16(buff_len);
5724 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5725 cmd->num_filters = filter_count;
5726 cmd->seid = cpu_to_le16(seid);
5727 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5728
5729 for (i = 0; i < filter_count; i++) {
5730 u16 tnl_type;
5731 u32 ti;
5732
5733 tnl_type = le16_get_bits(filters[i].element.flags,
5734 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
5735
5736 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5737 * one more byte further than normally used for Tenant ID in
5738 * other tunnel types.
5739 */
5740 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5741 ti = le32_to_cpu(filters[i].element.tenant_id);
5742 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5743 }
5744 }
5745
5746 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5747
5748 return status;
5749}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4#include "i40e.h"
5#include "i40e_type.h"
6#include "i40e_adminq.h"
7#include "i40e_prototype.h"
8#include <linux/avf/virtchnl.h>
9
10/**
11 * i40e_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure
13 *
14 * This function sets the mac type of the adapter based on the
15 * vendor ID and device ID stored in the hw structure.
16 **/
17i40e_status i40e_set_mac_type(struct i40e_hw *hw)
18{
19 i40e_status status = 0;
20
21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
22 switch (hw->device_id) {
23 case I40E_DEV_ID_SFP_XL710:
24 case I40E_DEV_ID_QEMU:
25 case I40E_DEV_ID_KX_B:
26 case I40E_DEV_ID_KX_C:
27 case I40E_DEV_ID_QSFP_A:
28 case I40E_DEV_ID_QSFP_B:
29 case I40E_DEV_ID_QSFP_C:
30 case I40E_DEV_ID_1G_BASE_T_BC:
31 case I40E_DEV_ID_5G_BASE_T_BC:
32 case I40E_DEV_ID_10G_BASE_T:
33 case I40E_DEV_ID_10G_BASE_T4:
34 case I40E_DEV_ID_10G_BASE_T_BC:
35 case I40E_DEV_ID_10G_B:
36 case I40E_DEV_ID_10G_SFP:
37 case I40E_DEV_ID_20G_KR2:
38 case I40E_DEV_ID_20G_KR2_A:
39 case I40E_DEV_ID_25G_B:
40 case I40E_DEV_ID_25G_SFP28:
41 case I40E_DEV_ID_X710_N3000:
42 case I40E_DEV_ID_XXV710_N3000:
43 hw->mac.type = I40E_MAC_XL710;
44 break;
45 case I40E_DEV_ID_KX_X722:
46 case I40E_DEV_ID_QSFP_X722:
47 case I40E_DEV_ID_SFP_X722:
48 case I40E_DEV_ID_1G_BASE_T_X722:
49 case I40E_DEV_ID_10G_BASE_T_X722:
50 case I40E_DEV_ID_SFP_I_X722:
51 case I40E_DEV_ID_SFP_X722_A:
52 hw->mac.type = I40E_MAC_X722;
53 break;
54 default:
55 hw->mac.type = I40E_MAC_GENERIC;
56 break;
57 }
58 } else {
59 status = I40E_ERR_DEVICE_NOT_SUPPORTED;
60 }
61
62 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
63 hw->mac.type, status);
64 return status;
65}
66
67/**
68 * i40e_aq_str - convert AQ err code to a string
69 * @hw: pointer to the HW structure
70 * @aq_err: the AQ error code to convert
71 **/
72const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
73{
74 switch (aq_err) {
75 case I40E_AQ_RC_OK:
76 return "OK";
77 case I40E_AQ_RC_EPERM:
78 return "I40E_AQ_RC_EPERM";
79 case I40E_AQ_RC_ENOENT:
80 return "I40E_AQ_RC_ENOENT";
81 case I40E_AQ_RC_ESRCH:
82 return "I40E_AQ_RC_ESRCH";
83 case I40E_AQ_RC_EINTR:
84 return "I40E_AQ_RC_EINTR";
85 case I40E_AQ_RC_EIO:
86 return "I40E_AQ_RC_EIO";
87 case I40E_AQ_RC_ENXIO:
88 return "I40E_AQ_RC_ENXIO";
89 case I40E_AQ_RC_E2BIG:
90 return "I40E_AQ_RC_E2BIG";
91 case I40E_AQ_RC_EAGAIN:
92 return "I40E_AQ_RC_EAGAIN";
93 case I40E_AQ_RC_ENOMEM:
94 return "I40E_AQ_RC_ENOMEM";
95 case I40E_AQ_RC_EACCES:
96 return "I40E_AQ_RC_EACCES";
97 case I40E_AQ_RC_EFAULT:
98 return "I40E_AQ_RC_EFAULT";
99 case I40E_AQ_RC_EBUSY:
100 return "I40E_AQ_RC_EBUSY";
101 case I40E_AQ_RC_EEXIST:
102 return "I40E_AQ_RC_EEXIST";
103 case I40E_AQ_RC_EINVAL:
104 return "I40E_AQ_RC_EINVAL";
105 case I40E_AQ_RC_ENOTTY:
106 return "I40E_AQ_RC_ENOTTY";
107 case I40E_AQ_RC_ENOSPC:
108 return "I40E_AQ_RC_ENOSPC";
109 case I40E_AQ_RC_ENOSYS:
110 return "I40E_AQ_RC_ENOSYS";
111 case I40E_AQ_RC_ERANGE:
112 return "I40E_AQ_RC_ERANGE";
113 case I40E_AQ_RC_EFLUSHED:
114 return "I40E_AQ_RC_EFLUSHED";
115 case I40E_AQ_RC_BAD_ADDR:
116 return "I40E_AQ_RC_BAD_ADDR";
117 case I40E_AQ_RC_EMODE:
118 return "I40E_AQ_RC_EMODE";
119 case I40E_AQ_RC_EFBIG:
120 return "I40E_AQ_RC_EFBIG";
121 }
122
123 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
124 return hw->err_str;
125}
126
127/**
128 * i40e_stat_str - convert status err code to a string
129 * @hw: pointer to the HW structure
130 * @stat_err: the status error code to convert
131 **/
132const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
133{
134 switch (stat_err) {
135 case 0:
136 return "OK";
137 case I40E_ERR_NVM:
138 return "I40E_ERR_NVM";
139 case I40E_ERR_NVM_CHECKSUM:
140 return "I40E_ERR_NVM_CHECKSUM";
141 case I40E_ERR_PHY:
142 return "I40E_ERR_PHY";
143 case I40E_ERR_CONFIG:
144 return "I40E_ERR_CONFIG";
145 case I40E_ERR_PARAM:
146 return "I40E_ERR_PARAM";
147 case I40E_ERR_MAC_TYPE:
148 return "I40E_ERR_MAC_TYPE";
149 case I40E_ERR_UNKNOWN_PHY:
150 return "I40E_ERR_UNKNOWN_PHY";
151 case I40E_ERR_LINK_SETUP:
152 return "I40E_ERR_LINK_SETUP";
153 case I40E_ERR_ADAPTER_STOPPED:
154 return "I40E_ERR_ADAPTER_STOPPED";
155 case I40E_ERR_INVALID_MAC_ADDR:
156 return "I40E_ERR_INVALID_MAC_ADDR";
157 case I40E_ERR_DEVICE_NOT_SUPPORTED:
158 return "I40E_ERR_DEVICE_NOT_SUPPORTED";
159 case I40E_ERR_PRIMARY_REQUESTS_PENDING:
160 return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
161 case I40E_ERR_INVALID_LINK_SETTINGS:
162 return "I40E_ERR_INVALID_LINK_SETTINGS";
163 case I40E_ERR_AUTONEG_NOT_COMPLETE:
164 return "I40E_ERR_AUTONEG_NOT_COMPLETE";
165 case I40E_ERR_RESET_FAILED:
166 return "I40E_ERR_RESET_FAILED";
167 case I40E_ERR_SWFW_SYNC:
168 return "I40E_ERR_SWFW_SYNC";
169 case I40E_ERR_NO_AVAILABLE_VSI:
170 return "I40E_ERR_NO_AVAILABLE_VSI";
171 case I40E_ERR_NO_MEMORY:
172 return "I40E_ERR_NO_MEMORY";
173 case I40E_ERR_BAD_PTR:
174 return "I40E_ERR_BAD_PTR";
175 case I40E_ERR_RING_FULL:
176 return "I40E_ERR_RING_FULL";
177 case I40E_ERR_INVALID_PD_ID:
178 return "I40E_ERR_INVALID_PD_ID";
179 case I40E_ERR_INVALID_QP_ID:
180 return "I40E_ERR_INVALID_QP_ID";
181 case I40E_ERR_INVALID_CQ_ID:
182 return "I40E_ERR_INVALID_CQ_ID";
183 case I40E_ERR_INVALID_CEQ_ID:
184 return "I40E_ERR_INVALID_CEQ_ID";
185 case I40E_ERR_INVALID_AEQ_ID:
186 return "I40E_ERR_INVALID_AEQ_ID";
187 case I40E_ERR_INVALID_SIZE:
188 return "I40E_ERR_INVALID_SIZE";
189 case I40E_ERR_INVALID_ARP_INDEX:
190 return "I40E_ERR_INVALID_ARP_INDEX";
191 case I40E_ERR_INVALID_FPM_FUNC_ID:
192 return "I40E_ERR_INVALID_FPM_FUNC_ID";
193 case I40E_ERR_QP_INVALID_MSG_SIZE:
194 return "I40E_ERR_QP_INVALID_MSG_SIZE";
195 case I40E_ERR_QP_TOOMANY_WRS_POSTED:
196 return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
197 case I40E_ERR_INVALID_FRAG_COUNT:
198 return "I40E_ERR_INVALID_FRAG_COUNT";
199 case I40E_ERR_QUEUE_EMPTY:
200 return "I40E_ERR_QUEUE_EMPTY";
201 case I40E_ERR_INVALID_ALIGNMENT:
202 return "I40E_ERR_INVALID_ALIGNMENT";
203 case I40E_ERR_FLUSHED_QUEUE:
204 return "I40E_ERR_FLUSHED_QUEUE";
205 case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
206 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
207 case I40E_ERR_INVALID_IMM_DATA_SIZE:
208 return "I40E_ERR_INVALID_IMM_DATA_SIZE";
209 case I40E_ERR_TIMEOUT:
210 return "I40E_ERR_TIMEOUT";
211 case I40E_ERR_OPCODE_MISMATCH:
212 return "I40E_ERR_OPCODE_MISMATCH";
213 case I40E_ERR_CQP_COMPL_ERROR:
214 return "I40E_ERR_CQP_COMPL_ERROR";
215 case I40E_ERR_INVALID_VF_ID:
216 return "I40E_ERR_INVALID_VF_ID";
217 case I40E_ERR_INVALID_HMCFN_ID:
218 return "I40E_ERR_INVALID_HMCFN_ID";
219 case I40E_ERR_BACKING_PAGE_ERROR:
220 return "I40E_ERR_BACKING_PAGE_ERROR";
221 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
222 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
223 case I40E_ERR_INVALID_PBLE_INDEX:
224 return "I40E_ERR_INVALID_PBLE_INDEX";
225 case I40E_ERR_INVALID_SD_INDEX:
226 return "I40E_ERR_INVALID_SD_INDEX";
227 case I40E_ERR_INVALID_PAGE_DESC_INDEX:
228 return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
229 case I40E_ERR_INVALID_SD_TYPE:
230 return "I40E_ERR_INVALID_SD_TYPE";
231 case I40E_ERR_MEMCPY_FAILED:
232 return "I40E_ERR_MEMCPY_FAILED";
233 case I40E_ERR_INVALID_HMC_OBJ_INDEX:
234 return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
235 case I40E_ERR_INVALID_HMC_OBJ_COUNT:
236 return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
237 case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
238 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
239 case I40E_ERR_SRQ_ENABLED:
240 return "I40E_ERR_SRQ_ENABLED";
241 case I40E_ERR_ADMIN_QUEUE_ERROR:
242 return "I40E_ERR_ADMIN_QUEUE_ERROR";
243 case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
244 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
245 case I40E_ERR_BUF_TOO_SHORT:
246 return "I40E_ERR_BUF_TOO_SHORT";
247 case I40E_ERR_ADMIN_QUEUE_FULL:
248 return "I40E_ERR_ADMIN_QUEUE_FULL";
249 case I40E_ERR_ADMIN_QUEUE_NO_WORK:
250 return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
251 case I40E_ERR_BAD_IWARP_CQE:
252 return "I40E_ERR_BAD_IWARP_CQE";
253 case I40E_ERR_NVM_BLANK_MODE:
254 return "I40E_ERR_NVM_BLANK_MODE";
255 case I40E_ERR_NOT_IMPLEMENTED:
256 return "I40E_ERR_NOT_IMPLEMENTED";
257 case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
258 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
259 case I40E_ERR_DIAG_TEST_FAILED:
260 return "I40E_ERR_DIAG_TEST_FAILED";
261 case I40E_ERR_NOT_READY:
262 return "I40E_ERR_NOT_READY";
263 case I40E_NOT_SUPPORTED:
264 return "I40E_NOT_SUPPORTED";
265 case I40E_ERR_FIRMWARE_API_VERSION:
266 return "I40E_ERR_FIRMWARE_API_VERSION";
267 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
268 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
269 }
270
271 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
272 return hw->err_str;
273}
274
275/**
276 * i40e_debug_aq
277 * @hw: debug mask related to admin queue
278 * @mask: debug mask
279 * @desc: pointer to admin queue descriptor
280 * @buffer: pointer to command buffer
281 * @buf_len: max length of buffer
282 *
283 * Dumps debug log about adminq command with descriptor contents.
284 **/
285void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
286 void *buffer, u16 buf_len)
287{
288 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
289 u32 effective_mask = hw->debug_mask & mask;
290 char prefix[27];
291 u16 len;
292 u8 *buf = (u8 *)buffer;
293
294 if (!effective_mask || !desc)
295 return;
296
297 len = le16_to_cpu(aq_desc->datalen);
298
299 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
300 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
301 le16_to_cpu(aq_desc->opcode),
302 le16_to_cpu(aq_desc->flags),
303 le16_to_cpu(aq_desc->datalen),
304 le16_to_cpu(aq_desc->retval));
305 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
306 "\tcookie (h,l) 0x%08X 0x%08X\n",
307 le32_to_cpu(aq_desc->cookie_high),
308 le32_to_cpu(aq_desc->cookie_low));
309 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
310 "\tparam (0,1) 0x%08X 0x%08X\n",
311 le32_to_cpu(aq_desc->params.internal.param0),
312 le32_to_cpu(aq_desc->params.internal.param1));
313 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
314 "\taddr (h,l) 0x%08X 0x%08X\n",
315 le32_to_cpu(aq_desc->params.external.addr_high),
316 le32_to_cpu(aq_desc->params.external.addr_low));
317
318 if (buffer && buf_len != 0 && len != 0 &&
319 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
320 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
321 if (buf_len < len)
322 len = buf_len;
323
324 snprintf(prefix, sizeof(prefix),
325 "i40e %02x:%02x.%x: \t0x",
326 hw->bus.bus_id,
327 hw->bus.device,
328 hw->bus.func);
329
330 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
331 16, 1, buf, len, false);
332 }
333}
334
335/**
336 * i40e_check_asq_alive
337 * @hw: pointer to the hw struct
338 *
339 * Returns true if Queue is enabled else false.
340 **/
341bool i40e_check_asq_alive(struct i40e_hw *hw)
342{
343 if (hw->aq.asq.len)
344 return !!(rd32(hw, hw->aq.asq.len) &
345 I40E_PF_ATQLEN_ATQENABLE_MASK);
346 else
347 return false;
348}
349
350/**
351 * i40e_aq_queue_shutdown
352 * @hw: pointer to the hw struct
353 * @unloading: is the driver unloading itself
354 *
355 * Tell the Firmware that we're shutting down the AdminQ and whether
356 * or not the driver is unloading as well.
357 **/
358i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
359 bool unloading)
360{
361 struct i40e_aq_desc desc;
362 struct i40e_aqc_queue_shutdown *cmd =
363 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
364 i40e_status status;
365
366 i40e_fill_default_direct_cmd_desc(&desc,
367 i40e_aqc_opc_queue_shutdown);
368
369 if (unloading)
370 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
371 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
372
373 return status;
374}
375
376/**
377 * i40e_aq_get_set_rss_lut
378 * @hw: pointer to the hardware structure
379 * @vsi_id: vsi fw index
380 * @pf_lut: for PF table set true, for VSI table set false
381 * @lut: pointer to the lut buffer provided by the caller
382 * @lut_size: size of the lut buffer
383 * @set: set true to set the table, false to get the table
384 *
385 * Internal function to get or set RSS look up table
386 **/
387static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
388 u16 vsi_id, bool pf_lut,
389 u8 *lut, u16 lut_size,
390 bool set)
391{
392 i40e_status status;
393 struct i40e_aq_desc desc;
394 struct i40e_aqc_get_set_rss_lut *cmd_resp =
395 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
396
397 if (set)
398 i40e_fill_default_direct_cmd_desc(&desc,
399 i40e_aqc_opc_set_rss_lut);
400 else
401 i40e_fill_default_direct_cmd_desc(&desc,
402 i40e_aqc_opc_get_rss_lut);
403
404 /* Indirect command */
405 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
406 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
407
408 cmd_resp->vsi_id =
409 cpu_to_le16((u16)((vsi_id <<
410 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
411 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
412 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
413
414 if (pf_lut)
415 cmd_resp->flags |= cpu_to_le16((u16)
416 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
417 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
418 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
419 else
420 cmd_resp->flags |= cpu_to_le16((u16)
421 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
422 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
423 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
424
425 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
426
427 return status;
428}
429
430/**
431 * i40e_aq_get_rss_lut
432 * @hw: pointer to the hardware structure
433 * @vsi_id: vsi fw index
434 * @pf_lut: for PF table set true, for VSI table set false
435 * @lut: pointer to the lut buffer provided by the caller
436 * @lut_size: size of the lut buffer
437 *
438 * get the RSS lookup table, PF or VSI type
439 **/
440i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
441 bool pf_lut, u8 *lut, u16 lut_size)
442{
443 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
444 false);
445}
446
447/**
448 * i40e_aq_set_rss_lut
449 * @hw: pointer to the hardware structure
450 * @vsi_id: vsi fw index
451 * @pf_lut: for PF table set true, for VSI table set false
452 * @lut: pointer to the lut buffer provided by the caller
453 * @lut_size: size of the lut buffer
454 *
455 * set the RSS lookup table, PF or VSI type
456 **/
457i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
458 bool pf_lut, u8 *lut, u16 lut_size)
459{
460 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
461}
462
463/**
464 * i40e_aq_get_set_rss_key
465 * @hw: pointer to the hw struct
466 * @vsi_id: vsi fw index
467 * @key: pointer to key info struct
468 * @set: set true to set the key, false to get the key
469 *
470 * get the RSS key per VSI
471 **/
472static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
473 u16 vsi_id,
474 struct i40e_aqc_get_set_rss_key_data *key,
475 bool set)
476{
477 i40e_status status;
478 struct i40e_aq_desc desc;
479 struct i40e_aqc_get_set_rss_key *cmd_resp =
480 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
481 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
482
483 if (set)
484 i40e_fill_default_direct_cmd_desc(&desc,
485 i40e_aqc_opc_set_rss_key);
486 else
487 i40e_fill_default_direct_cmd_desc(&desc,
488 i40e_aqc_opc_get_rss_key);
489
490 /* Indirect command */
491 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
492 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
493
494 cmd_resp->vsi_id =
495 cpu_to_le16((u16)((vsi_id <<
496 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
497 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
498 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
499
500 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
501
502 return status;
503}
504
505/**
506 * i40e_aq_get_rss_key
507 * @hw: pointer to the hw struct
508 * @vsi_id: vsi fw index
509 * @key: pointer to key info struct
510 *
511 **/
512i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
513 u16 vsi_id,
514 struct i40e_aqc_get_set_rss_key_data *key)
515{
516 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
517}
518
519/**
520 * i40e_aq_set_rss_key
521 * @hw: pointer to the hw struct
522 * @vsi_id: vsi fw index
523 * @key: pointer to key info struct
524 *
525 * set the RSS key per VSI
526 **/
527i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
528 u16 vsi_id,
529 struct i40e_aqc_get_set_rss_key_data *key)
530{
531 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
532}
533
534/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
535 * hardware to a bit-field that can be used by SW to more easily determine the
536 * packet type.
537 *
538 * Macros are used to shorten the table lines and make this table human
539 * readable.
540 *
541 * We store the PTYPE in the top byte of the bit field - this is just so that
542 * we can check that the table doesn't have a row missing, as the index into
543 * the table should be the PTYPE.
544 *
545 * Typical work flow:
546 *
547 * IF NOT i40e_ptype_lookup[ptype].known
548 * THEN
549 * Packet is unknown
550 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
551 * Use the rest of the fields to look at the tunnels, inner protocols, etc
552 * ELSE
553 * Use the enum i40e_rx_l2_ptype to decode the packet type
554 * ENDIF
555 */
556
557/* macro to make the table lines short, use explicit indexing with [PTYPE] */
558#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
559 [PTYPE] = { \
560 1, \
561 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
562 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
563 I40E_RX_PTYPE_##OUTER_FRAG, \
564 I40E_RX_PTYPE_TUNNEL_##T, \
565 I40E_RX_PTYPE_TUNNEL_END_##TE, \
566 I40E_RX_PTYPE_##TEF, \
567 I40E_RX_PTYPE_INNER_PROT_##I, \
568 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
569
570#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
571
572/* shorter macros makes the table fit but are terse */
573#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
574#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
575#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
576
577/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
578struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
579 /* L2 Packet types */
580 I40E_PTT_UNUSED_ENTRY(0),
581 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
582 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
583 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
584 I40E_PTT_UNUSED_ENTRY(4),
585 I40E_PTT_UNUSED_ENTRY(5),
586 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
587 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
588 I40E_PTT_UNUSED_ENTRY(8),
589 I40E_PTT_UNUSED_ENTRY(9),
590 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
591 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
592 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
593 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
594 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
595 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
596 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
597 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
598 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
599 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
600 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
601 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
602
603 /* Non Tunneled IPv4 */
604 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
605 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
606 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
607 I40E_PTT_UNUSED_ENTRY(25),
608 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
609 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
610 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
611
612 /* IPv4 --> IPv4 */
613 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
614 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
615 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
616 I40E_PTT_UNUSED_ENTRY(32),
617 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
618 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
619 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
620
621 /* IPv4 --> IPv6 */
622 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
623 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
624 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
625 I40E_PTT_UNUSED_ENTRY(39),
626 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
627 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
628 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
629
630 /* IPv4 --> GRE/NAT */
631 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
632
633 /* IPv4 --> GRE/NAT --> IPv4 */
634 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
635 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
636 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
637 I40E_PTT_UNUSED_ENTRY(47),
638 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
639 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
640 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
641
642 /* IPv4 --> GRE/NAT --> IPv6 */
643 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
644 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
645 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
646 I40E_PTT_UNUSED_ENTRY(54),
647 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
648 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
649 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
650
651 /* IPv4 --> GRE/NAT --> MAC */
652 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
653
654 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
655 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
656 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
657 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
658 I40E_PTT_UNUSED_ENTRY(62),
659 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
660 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
661 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
662
663 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
664 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
665 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
666 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
667 I40E_PTT_UNUSED_ENTRY(69),
668 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
669 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
670 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
671
672 /* IPv4 --> GRE/NAT --> MAC/VLAN */
673 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
674
675 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
676 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
677 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
678 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
679 I40E_PTT_UNUSED_ENTRY(77),
680 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
681 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
682 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
683
684 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
685 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
686 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
687 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
688 I40E_PTT_UNUSED_ENTRY(84),
689 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
690 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
691 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
692
693 /* Non Tunneled IPv6 */
694 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
695 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
696 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
697 I40E_PTT_UNUSED_ENTRY(91),
698 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
699 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
700 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
701
702 /* IPv6 --> IPv4 */
703 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
704 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
705 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
706 I40E_PTT_UNUSED_ENTRY(98),
707 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
708 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
709 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
710
711 /* IPv6 --> IPv6 */
712 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
713 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
714 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
715 I40E_PTT_UNUSED_ENTRY(105),
716 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
717 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
718 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
719
720 /* IPv6 --> GRE/NAT */
721 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
722
723 /* IPv6 --> GRE/NAT -> IPv4 */
724 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
725 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
726 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
727 I40E_PTT_UNUSED_ENTRY(113),
728 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
729 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
730 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
731
732 /* IPv6 --> GRE/NAT -> IPv6 */
733 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
734 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
735 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
736 I40E_PTT_UNUSED_ENTRY(120),
737 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
738 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
739 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
740
741 /* IPv6 --> GRE/NAT -> MAC */
742 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
743
744 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
745 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
746 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
747 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
748 I40E_PTT_UNUSED_ENTRY(128),
749 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
750 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
751 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
752
753 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
754 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
755 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
756 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
757 I40E_PTT_UNUSED_ENTRY(135),
758 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
759 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
760 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
761
762 /* IPv6 --> GRE/NAT -> MAC/VLAN */
763 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
764
765 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
766 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
767 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
768 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
769 I40E_PTT_UNUSED_ENTRY(143),
770 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
771 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
772 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
773
774 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
775 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
776 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
777 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
778 I40E_PTT_UNUSED_ENTRY(150),
779 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
780 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
781 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
782
783 /* unused entries */
784 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
785};
786
787/**
788 * i40e_init_shared_code - Initialize the shared code
789 * @hw: pointer to hardware structure
790 *
791 * This assigns the MAC type and PHY code and inits the NVM.
792 * Does not touch the hardware. This function must be called prior to any
793 * other function in the shared code. The i40e_hw structure should be
794 * memset to 0 prior to calling this function. The following fields in
795 * hw structure should be filled in prior to calling this function:
796 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
797 * subsystem_vendor_id, and revision_id
798 **/
799i40e_status i40e_init_shared_code(struct i40e_hw *hw)
800{
801 i40e_status status = 0;
802 u32 port, ari, func_rid;
803
804 i40e_set_mac_type(hw);
805
806 switch (hw->mac.type) {
807 case I40E_MAC_XL710:
808 case I40E_MAC_X722:
809 break;
810 default:
811 return I40E_ERR_DEVICE_NOT_SUPPORTED;
812 }
813
814 hw->phy.get_link_info = true;
815
816 /* Determine port number and PF number*/
817 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
818 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
819 hw->port = (u8)port;
820 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
821 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
822 func_rid = rd32(hw, I40E_PF_FUNC_RID);
823 if (ari)
824 hw->pf_id = (u8)(func_rid & 0xff);
825 else
826 hw->pf_id = (u8)(func_rid & 0x7);
827
828 status = i40e_init_nvm(hw);
829 return status;
830}
831
832/**
833 * i40e_aq_mac_address_read - Retrieve the MAC addresses
834 * @hw: pointer to the hw struct
835 * @flags: a return indicator of what addresses were added to the addr store
836 * @addrs: the requestor's mac addr store
837 * @cmd_details: pointer to command details structure or NULL
838 **/
839static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
840 u16 *flags,
841 struct i40e_aqc_mac_address_read_data *addrs,
842 struct i40e_asq_cmd_details *cmd_details)
843{
844 struct i40e_aq_desc desc;
845 struct i40e_aqc_mac_address_read *cmd_data =
846 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
847 i40e_status status;
848
849 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
850 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
851
852 status = i40e_asq_send_command(hw, &desc, addrs,
853 sizeof(*addrs), cmd_details);
854 *flags = le16_to_cpu(cmd_data->command_flags);
855
856 return status;
857}
858
859/**
860 * i40e_aq_mac_address_write - Change the MAC addresses
861 * @hw: pointer to the hw struct
862 * @flags: indicates which MAC to be written
863 * @mac_addr: address to write
864 * @cmd_details: pointer to command details structure or NULL
865 **/
866i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
867 u16 flags, u8 *mac_addr,
868 struct i40e_asq_cmd_details *cmd_details)
869{
870 struct i40e_aq_desc desc;
871 struct i40e_aqc_mac_address_write *cmd_data =
872 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
873 i40e_status status;
874
875 i40e_fill_default_direct_cmd_desc(&desc,
876 i40e_aqc_opc_mac_address_write);
877 cmd_data->command_flags = cpu_to_le16(flags);
878 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
879 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
880 ((u32)mac_addr[3] << 16) |
881 ((u32)mac_addr[4] << 8) |
882 mac_addr[5]);
883
884 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
885
886 return status;
887}
888
889/**
890 * i40e_get_mac_addr - get MAC address
891 * @hw: pointer to the HW structure
892 * @mac_addr: pointer to MAC address
893 *
894 * Reads the adapter's MAC address from register
895 **/
896i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
897{
898 struct i40e_aqc_mac_address_read_data addrs;
899 i40e_status status;
900 u16 flags = 0;
901
902 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
903
904 if (flags & I40E_AQC_LAN_ADDR_VALID)
905 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
906
907 return status;
908}
909
910/**
911 * i40e_get_port_mac_addr - get Port MAC address
912 * @hw: pointer to the HW structure
913 * @mac_addr: pointer to Port MAC address
914 *
915 * Reads the adapter's Port MAC address
916 **/
917i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
918{
919 struct i40e_aqc_mac_address_read_data addrs;
920 i40e_status status;
921 u16 flags = 0;
922
923 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
924 if (status)
925 return status;
926
927 if (flags & I40E_AQC_PORT_ADDR_VALID)
928 ether_addr_copy(mac_addr, addrs.port_mac);
929 else
930 status = I40E_ERR_INVALID_MAC_ADDR;
931
932 return status;
933}
934
935/**
936 * i40e_pre_tx_queue_cfg - pre tx queue configure
937 * @hw: pointer to the HW structure
938 * @queue: target PF queue index
939 * @enable: state change request
940 *
941 * Handles hw requirement to indicate intention to enable
942 * or disable target queue.
943 **/
944void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
945{
946 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
947 u32 reg_block = 0;
948 u32 reg_val;
949
950 if (abs_queue_idx >= 128) {
951 reg_block = abs_queue_idx / 128;
952 abs_queue_idx %= 128;
953 }
954
955 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
956 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
957 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
958
959 if (enable)
960 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
961 else
962 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
963
964 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
965}
966
967/**
968 * i40e_read_pba_string - Reads part number string from EEPROM
969 * @hw: pointer to hardware structure
970 * @pba_num: stores the part number string from the EEPROM
971 * @pba_num_size: part number string buffer length
972 *
973 * Reads the part number string from the EEPROM.
974 **/
975i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
976 u32 pba_num_size)
977{
978 i40e_status status = 0;
979 u16 pba_word = 0;
980 u16 pba_size = 0;
981 u16 pba_ptr = 0;
982 u16 i = 0;
983
984 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
985 if (status || (pba_word != 0xFAFA)) {
986 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
987 return status;
988 }
989
990 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
991 if (status) {
992 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
993 return status;
994 }
995
996 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
997 if (status) {
998 hw_dbg(hw, "Failed to read PBA Block size.\n");
999 return status;
1000 }
1001
1002 /* Subtract one to get PBA word count (PBA Size word is included in
1003 * total size)
1004 */
1005 pba_size--;
1006 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1007 hw_dbg(hw, "Buffer too small for PBA data.\n");
1008 return I40E_ERR_PARAM;
1009 }
1010
1011 for (i = 0; i < pba_size; i++) {
1012 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1013 if (status) {
1014 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1015 return status;
1016 }
1017
1018 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1019 pba_num[(i * 2) + 1] = pba_word & 0xFF;
1020 }
1021 pba_num[(pba_size * 2)] = '\0';
1022
1023 return status;
1024}
1025
1026/**
1027 * i40e_get_media_type - Gets media type
1028 * @hw: pointer to the hardware structure
1029 **/
1030static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1031{
1032 enum i40e_media_type media;
1033
1034 switch (hw->phy.link_info.phy_type) {
1035 case I40E_PHY_TYPE_10GBASE_SR:
1036 case I40E_PHY_TYPE_10GBASE_LR:
1037 case I40E_PHY_TYPE_1000BASE_SX:
1038 case I40E_PHY_TYPE_1000BASE_LX:
1039 case I40E_PHY_TYPE_40GBASE_SR4:
1040 case I40E_PHY_TYPE_40GBASE_LR4:
1041 case I40E_PHY_TYPE_25GBASE_LR:
1042 case I40E_PHY_TYPE_25GBASE_SR:
1043 media = I40E_MEDIA_TYPE_FIBER;
1044 break;
1045 case I40E_PHY_TYPE_100BASE_TX:
1046 case I40E_PHY_TYPE_1000BASE_T:
1047 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1048 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1049 case I40E_PHY_TYPE_10GBASE_T:
1050 media = I40E_MEDIA_TYPE_BASET;
1051 break;
1052 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1053 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1054 case I40E_PHY_TYPE_10GBASE_CR1:
1055 case I40E_PHY_TYPE_40GBASE_CR4:
1056 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1057 case I40E_PHY_TYPE_40GBASE_AOC:
1058 case I40E_PHY_TYPE_10GBASE_AOC:
1059 case I40E_PHY_TYPE_25GBASE_CR:
1060 case I40E_PHY_TYPE_25GBASE_AOC:
1061 case I40E_PHY_TYPE_25GBASE_ACC:
1062 media = I40E_MEDIA_TYPE_DA;
1063 break;
1064 case I40E_PHY_TYPE_1000BASE_KX:
1065 case I40E_PHY_TYPE_10GBASE_KX4:
1066 case I40E_PHY_TYPE_10GBASE_KR:
1067 case I40E_PHY_TYPE_40GBASE_KR4:
1068 case I40E_PHY_TYPE_20GBASE_KR2:
1069 case I40E_PHY_TYPE_25GBASE_KR:
1070 media = I40E_MEDIA_TYPE_BACKPLANE;
1071 break;
1072 case I40E_PHY_TYPE_SGMII:
1073 case I40E_PHY_TYPE_XAUI:
1074 case I40E_PHY_TYPE_XFI:
1075 case I40E_PHY_TYPE_XLAUI:
1076 case I40E_PHY_TYPE_XLPPI:
1077 default:
1078 media = I40E_MEDIA_TYPE_UNKNOWN;
1079 break;
1080 }
1081
1082 return media;
1083}
1084
1085/**
1086 * i40e_poll_globr - Poll for Global Reset completion
1087 * @hw: pointer to the hardware structure
1088 * @retry_limit: how many times to retry before failure
1089 **/
1090static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1091 u32 retry_limit)
1092{
1093 u32 cnt, reg = 0;
1094
1095 for (cnt = 0; cnt < retry_limit; cnt++) {
1096 reg = rd32(hw, I40E_GLGEN_RSTAT);
1097 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1098 return 0;
1099 msleep(100);
1100 }
1101
1102 hw_dbg(hw, "Global reset failed.\n");
1103 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1104
1105 return I40E_ERR_RESET_FAILED;
1106}
1107
1108#define I40E_PF_RESET_WAIT_COUNT_A0 200
1109#define I40E_PF_RESET_WAIT_COUNT 200
1110/**
1111 * i40e_pf_reset - Reset the PF
1112 * @hw: pointer to the hardware structure
1113 *
1114 * Assuming someone else has triggered a global reset,
1115 * assure the global reset is complete and then reset the PF
1116 **/
1117i40e_status i40e_pf_reset(struct i40e_hw *hw)
1118{
1119 u32 cnt = 0;
1120 u32 cnt1 = 0;
1121 u32 reg = 0;
1122 u32 grst_del;
1123
1124 /* Poll for Global Reset steady state in case of recent GRST.
1125 * The grst delay value is in 100ms units, and we'll wait a
1126 * couple counts longer to be sure we don't just miss the end.
1127 */
1128 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1129 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1130 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1131
1132 /* It can take upto 15 secs for GRST steady state.
1133 * Bump it to 16 secs max to be safe.
1134 */
1135 grst_del = grst_del * 20;
1136
1137 for (cnt = 0; cnt < grst_del; cnt++) {
1138 reg = rd32(hw, I40E_GLGEN_RSTAT);
1139 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1140 break;
1141 msleep(100);
1142 }
1143 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1144 hw_dbg(hw, "Global reset polling failed to complete.\n");
1145 return I40E_ERR_RESET_FAILED;
1146 }
1147
1148 /* Now Wait for the FW to be ready */
1149 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1150 reg = rd32(hw, I40E_GLNVM_ULD);
1151 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1152 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1153 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1154 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1155 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1156 break;
1157 }
1158 usleep_range(10000, 20000);
1159 }
1160 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1161 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1162 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1163 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1164 return I40E_ERR_RESET_FAILED;
1165 }
1166
1167 /* If there was a Global Reset in progress when we got here,
1168 * we don't need to do the PF Reset
1169 */
1170 if (!cnt) {
1171 u32 reg2 = 0;
1172 if (hw->revision_id == 0)
1173 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1174 else
1175 cnt = I40E_PF_RESET_WAIT_COUNT;
1176 reg = rd32(hw, I40E_PFGEN_CTRL);
1177 wr32(hw, I40E_PFGEN_CTRL,
1178 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1179 for (; cnt; cnt--) {
1180 reg = rd32(hw, I40E_PFGEN_CTRL);
1181 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1182 break;
1183 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1184 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1185 break;
1186 usleep_range(1000, 2000);
1187 }
1188 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1189 if (i40e_poll_globr(hw, grst_del))
1190 return I40E_ERR_RESET_FAILED;
1191 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1192 hw_dbg(hw, "PF reset polling failed to complete.\n");
1193 return I40E_ERR_RESET_FAILED;
1194 }
1195 }
1196
1197 i40e_clear_pxe_mode(hw);
1198
1199 return 0;
1200}
1201
1202/**
1203 * i40e_clear_hw - clear out any left over hw state
1204 * @hw: pointer to the hw struct
1205 *
1206 * Clear queues and interrupts, typically called at init time,
1207 * but after the capabilities have been found so we know how many
1208 * queues and msix vectors have been allocated.
1209 **/
1210void i40e_clear_hw(struct i40e_hw *hw)
1211{
1212 u32 num_queues, base_queue;
1213 u32 num_pf_int;
1214 u32 num_vf_int;
1215 u32 num_vfs;
1216 u32 i, j;
1217 u32 val;
1218 u32 eol = 0x7ff;
1219
1220 /* get number of interrupts, queues, and VFs */
1221 val = rd32(hw, I40E_GLPCI_CNF2);
1222 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1223 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1224 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1225 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1226
1227 val = rd32(hw, I40E_PFLAN_QALLOC);
1228 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1229 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1230 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1231 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1232 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1233 num_queues = (j - base_queue) + 1;
1234 else
1235 num_queues = 0;
1236
1237 val = rd32(hw, I40E_PF_VT_PFALLOC);
1238 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1239 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1240 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1241 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1242 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1243 num_vfs = (j - i) + 1;
1244 else
1245 num_vfs = 0;
1246
1247 /* stop all the interrupts */
1248 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1249 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1250 for (i = 0; i < num_pf_int - 2; i++)
1251 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1252
1253 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1254 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1255 wr32(hw, I40E_PFINT_LNKLST0, val);
1256 for (i = 0; i < num_pf_int - 2; i++)
1257 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1258 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1259 for (i = 0; i < num_vfs; i++)
1260 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1261 for (i = 0; i < num_vf_int - 2; i++)
1262 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1263
1264 /* warn the HW of the coming Tx disables */
1265 for (i = 0; i < num_queues; i++) {
1266 u32 abs_queue_idx = base_queue + i;
1267 u32 reg_block = 0;
1268
1269 if (abs_queue_idx >= 128) {
1270 reg_block = abs_queue_idx / 128;
1271 abs_queue_idx %= 128;
1272 }
1273
1274 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1275 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1276 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1277 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1278
1279 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1280 }
1281 udelay(400);
1282
1283 /* stop all the queues */
1284 for (i = 0; i < num_queues; i++) {
1285 wr32(hw, I40E_QINT_TQCTL(i), 0);
1286 wr32(hw, I40E_QTX_ENA(i), 0);
1287 wr32(hw, I40E_QINT_RQCTL(i), 0);
1288 wr32(hw, I40E_QRX_ENA(i), 0);
1289 }
1290
1291 /* short wait for all queue disables to settle */
1292 udelay(50);
1293}
1294
1295/**
1296 * i40e_clear_pxe_mode - clear pxe operations mode
1297 * @hw: pointer to the hw struct
1298 *
1299 * Make sure all PXE mode settings are cleared, including things
1300 * like descriptor fetch/write-back mode.
1301 **/
1302void i40e_clear_pxe_mode(struct i40e_hw *hw)
1303{
1304 u32 reg;
1305
1306 if (i40e_check_asq_alive(hw))
1307 i40e_aq_clear_pxe_mode(hw, NULL);
1308
1309 /* Clear single descriptor fetch/write-back mode */
1310 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1311
1312 if (hw->revision_id == 0) {
1313 /* As a work around clear PXE_MODE instead of setting it */
1314 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1315 } else {
1316 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1317 }
1318}
1319
1320/**
1321 * i40e_led_is_mine - helper to find matching led
1322 * @hw: pointer to the hw struct
1323 * @idx: index into GPIO registers
1324 *
1325 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1326 */
1327static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1328{
1329 u32 gpio_val = 0;
1330 u32 port;
1331
1332 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1333 !hw->func_caps.led[idx])
1334 return 0;
1335 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1336 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1337 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1338
1339 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1340 * if it is not our port then ignore
1341 */
1342 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1343 (port != hw->port))
1344 return 0;
1345
1346 return gpio_val;
1347}
1348
1349#define I40E_FW_LED BIT(4)
1350#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1351 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1352
1353#define I40E_LED0 22
1354
1355#define I40E_PIN_FUNC_SDP 0x0
1356#define I40E_PIN_FUNC_LED 0x1
1357
1358/**
1359 * i40e_led_get - return current on/off mode
1360 * @hw: pointer to the hw struct
1361 *
1362 * The value returned is the 'mode' field as defined in the
1363 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1364 * values are variations of possible behaviors relating to
1365 * blink, link, and wire.
1366 **/
1367u32 i40e_led_get(struct i40e_hw *hw)
1368{
1369 u32 mode = 0;
1370 int i;
1371
1372 /* as per the documentation GPIO 22-29 are the LED
1373 * GPIO pins named LED0..LED7
1374 */
1375 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1376 u32 gpio_val = i40e_led_is_mine(hw, i);
1377
1378 if (!gpio_val)
1379 continue;
1380
1381 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1382 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1383 break;
1384 }
1385
1386 return mode;
1387}
1388
1389/**
1390 * i40e_led_set - set new on/off mode
1391 * @hw: pointer to the hw struct
1392 * @mode: 0=off, 0xf=on (else see manual for mode details)
1393 * @blink: true if the LED should blink when on, false if steady
1394 *
1395 * if this function is used to turn on the blink it should
1396 * be used to disable the blink when restoring the original state.
1397 **/
1398void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1399{
1400 int i;
1401
1402 if (mode & ~I40E_LED_MODE_VALID) {
1403 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1404 return;
1405 }
1406
1407 /* as per the documentation GPIO 22-29 are the LED
1408 * GPIO pins named LED0..LED7
1409 */
1410 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1411 u32 gpio_val = i40e_led_is_mine(hw, i);
1412
1413 if (!gpio_val)
1414 continue;
1415
1416 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1417 u32 pin_func = 0;
1418
1419 if (mode & I40E_FW_LED)
1420 pin_func = I40E_PIN_FUNC_SDP;
1421 else
1422 pin_func = I40E_PIN_FUNC_LED;
1423
1424 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1425 gpio_val |= ((pin_func <<
1426 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1427 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1428 }
1429 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1430 /* this & is a bit of paranoia, but serves as a range check */
1431 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1432 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1433
1434 if (blink)
1435 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1436 else
1437 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1438
1439 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1440 break;
1441 }
1442}
1443
1444/* Admin command wrappers */
1445
1446/**
1447 * i40e_aq_get_phy_capabilities
1448 * @hw: pointer to the hw struct
1449 * @abilities: structure for PHY capabilities to be filled
1450 * @qualified_modules: report Qualified Modules
1451 * @report_init: report init capabilities (active are default)
1452 * @cmd_details: pointer to command details structure or NULL
1453 *
1454 * Returns the various PHY abilities supported on the Port.
1455 **/
1456i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1457 bool qualified_modules, bool report_init,
1458 struct i40e_aq_get_phy_abilities_resp *abilities,
1459 struct i40e_asq_cmd_details *cmd_details)
1460{
1461 struct i40e_aq_desc desc;
1462 i40e_status status;
1463 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1464 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1465
1466 if (!abilities)
1467 return I40E_ERR_PARAM;
1468
1469 do {
1470 i40e_fill_default_direct_cmd_desc(&desc,
1471 i40e_aqc_opc_get_phy_abilities);
1472
1473 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1474 if (abilities_size > I40E_AQ_LARGE_BUF)
1475 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1476
1477 if (qualified_modules)
1478 desc.params.external.param0 |=
1479 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1480
1481 if (report_init)
1482 desc.params.external.param0 |=
1483 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1484
1485 status = i40e_asq_send_command(hw, &desc, abilities,
1486 abilities_size, cmd_details);
1487
1488 switch (hw->aq.asq_last_status) {
1489 case I40E_AQ_RC_EIO:
1490 status = I40E_ERR_UNKNOWN_PHY;
1491 break;
1492 case I40E_AQ_RC_EAGAIN:
1493 usleep_range(1000, 2000);
1494 total_delay++;
1495 status = I40E_ERR_TIMEOUT;
1496 break;
1497 /* also covers I40E_AQ_RC_OK */
1498 default:
1499 break;
1500 }
1501
1502 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1503 (total_delay < max_delay));
1504
1505 if (status)
1506 return status;
1507
1508 if (report_init) {
1509 if (hw->mac.type == I40E_MAC_XL710 &&
1510 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1511 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1512 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1513 } else {
1514 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1515 hw->phy.phy_types |=
1516 ((u64)abilities->phy_type_ext << 32);
1517 }
1518 }
1519
1520 return status;
1521}
1522
1523/**
1524 * i40e_aq_set_phy_config
1525 * @hw: pointer to the hw struct
1526 * @config: structure with PHY configuration to be set
1527 * @cmd_details: pointer to command details structure or NULL
1528 *
1529 * Set the various PHY configuration parameters
1530 * supported on the Port.One or more of the Set PHY config parameters may be
1531 * ignored in an MFP mode as the PF may not have the privilege to set some
1532 * of the PHY Config parameters. This status will be indicated by the
1533 * command response.
1534 **/
1535enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1536 struct i40e_aq_set_phy_config *config,
1537 struct i40e_asq_cmd_details *cmd_details)
1538{
1539 struct i40e_aq_desc desc;
1540 struct i40e_aq_set_phy_config *cmd =
1541 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1542 enum i40e_status_code status;
1543
1544 if (!config)
1545 return I40E_ERR_PARAM;
1546
1547 i40e_fill_default_direct_cmd_desc(&desc,
1548 i40e_aqc_opc_set_phy_config);
1549
1550 *cmd = *config;
1551
1552 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1553
1554 return status;
1555}
1556
1557static noinline_for_stack enum i40e_status_code
1558i40e_set_fc_status(struct i40e_hw *hw,
1559 struct i40e_aq_get_phy_abilities_resp *abilities,
1560 bool atomic_restart)
1561{
1562 struct i40e_aq_set_phy_config config;
1563 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1564 u8 pause_mask = 0x0;
1565
1566 switch (fc_mode) {
1567 case I40E_FC_FULL:
1568 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1569 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1570 break;
1571 case I40E_FC_RX_PAUSE:
1572 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1573 break;
1574 case I40E_FC_TX_PAUSE:
1575 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1576 break;
1577 default:
1578 break;
1579 }
1580
1581 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1582 /* clear the old pause settings */
1583 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1584 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1585 /* set the new abilities */
1586 config.abilities |= pause_mask;
1587 /* If the abilities have changed, then set the new config */
1588 if (config.abilities == abilities->abilities)
1589 return 0;
1590
1591 /* Auto restart link so settings take effect */
1592 if (atomic_restart)
1593 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1594 /* Copy over all the old settings */
1595 config.phy_type = abilities->phy_type;
1596 config.phy_type_ext = abilities->phy_type_ext;
1597 config.link_speed = abilities->link_speed;
1598 config.eee_capability = abilities->eee_capability;
1599 config.eeer = abilities->eeer_val;
1600 config.low_power_ctrl = abilities->d3_lpan;
1601 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1602 I40E_AQ_PHY_FEC_CONFIG_MASK;
1603
1604 return i40e_aq_set_phy_config(hw, &config, NULL);
1605}
1606
1607/**
1608 * i40e_set_fc
1609 * @hw: pointer to the hw struct
1610 * @aq_failures: buffer to return AdminQ failure information
1611 * @atomic_restart: whether to enable atomic link restart
1612 *
1613 * Set the requested flow control mode using set_phy_config.
1614 **/
1615enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1616 bool atomic_restart)
1617{
1618 struct i40e_aq_get_phy_abilities_resp abilities;
1619 enum i40e_status_code status;
1620
1621 *aq_failures = 0x0;
1622
1623 /* Get the current phy config */
1624 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1625 NULL);
1626 if (status) {
1627 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1628 return status;
1629 }
1630
1631 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1632 if (status)
1633 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1634
1635 /* Update the link info */
1636 status = i40e_update_link_info(hw);
1637 if (status) {
1638 /* Wait a little bit (on 40G cards it sometimes takes a really
1639 * long time for link to come back from the atomic reset)
1640 * and try once more
1641 */
1642 msleep(1000);
1643 status = i40e_update_link_info(hw);
1644 }
1645 if (status)
1646 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1647
1648 return status;
1649}
1650
1651/**
1652 * i40e_aq_clear_pxe_mode
1653 * @hw: pointer to the hw struct
1654 * @cmd_details: pointer to command details structure or NULL
1655 *
1656 * Tell the firmware that the driver is taking over from PXE
1657 **/
1658i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1659 struct i40e_asq_cmd_details *cmd_details)
1660{
1661 i40e_status status;
1662 struct i40e_aq_desc desc;
1663 struct i40e_aqc_clear_pxe *cmd =
1664 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1665
1666 i40e_fill_default_direct_cmd_desc(&desc,
1667 i40e_aqc_opc_clear_pxe_mode);
1668
1669 cmd->rx_cnt = 0x2;
1670
1671 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1672
1673 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1674
1675 return status;
1676}
1677
1678/**
1679 * i40e_aq_set_link_restart_an
1680 * @hw: pointer to the hw struct
1681 * @enable_link: if true: enable link, if false: disable link
1682 * @cmd_details: pointer to command details structure or NULL
1683 *
1684 * Sets up the link and restarts the Auto-Negotiation over the link.
1685 **/
1686i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1687 bool enable_link,
1688 struct i40e_asq_cmd_details *cmd_details)
1689{
1690 struct i40e_aq_desc desc;
1691 struct i40e_aqc_set_link_restart_an *cmd =
1692 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1693 i40e_status status;
1694
1695 i40e_fill_default_direct_cmd_desc(&desc,
1696 i40e_aqc_opc_set_link_restart_an);
1697
1698 cmd->command = I40E_AQ_PHY_RESTART_AN;
1699 if (enable_link)
1700 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1701 else
1702 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1703
1704 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1705
1706 return status;
1707}
1708
1709/**
1710 * i40e_aq_get_link_info
1711 * @hw: pointer to the hw struct
1712 * @enable_lse: enable/disable LinkStatusEvent reporting
1713 * @link: pointer to link status structure - optional
1714 * @cmd_details: pointer to command details structure or NULL
1715 *
1716 * Returns the link status of the adapter.
1717 **/
1718i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1719 bool enable_lse, struct i40e_link_status *link,
1720 struct i40e_asq_cmd_details *cmd_details)
1721{
1722 struct i40e_aq_desc desc;
1723 struct i40e_aqc_get_link_status *resp =
1724 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1725 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1726 i40e_status status;
1727 bool tx_pause, rx_pause;
1728 u16 command_flags;
1729
1730 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1731
1732 if (enable_lse)
1733 command_flags = I40E_AQ_LSE_ENABLE;
1734 else
1735 command_flags = I40E_AQ_LSE_DISABLE;
1736 resp->command_flags = cpu_to_le16(command_flags);
1737
1738 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1739
1740 if (status)
1741 goto aq_get_link_info_exit;
1742
1743 /* save off old link status information */
1744 hw->phy.link_info_old = *hw_link_info;
1745
1746 /* update link status */
1747 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1748 hw->phy.media_type = i40e_get_media_type(hw);
1749 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1750 hw_link_info->link_info = resp->link_info;
1751 hw_link_info->an_info = resp->an_info;
1752 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1753 I40E_AQ_CONFIG_FEC_RS_ENA);
1754 hw_link_info->ext_info = resp->ext_info;
1755 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1756 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1757 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1758
1759 /* update fc info */
1760 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1761 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1762 if (tx_pause & rx_pause)
1763 hw->fc.current_mode = I40E_FC_FULL;
1764 else if (tx_pause)
1765 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1766 else if (rx_pause)
1767 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1768 else
1769 hw->fc.current_mode = I40E_FC_NONE;
1770
1771 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1772 hw_link_info->crc_enable = true;
1773 else
1774 hw_link_info->crc_enable = false;
1775
1776 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1777 hw_link_info->lse_enable = true;
1778 else
1779 hw_link_info->lse_enable = false;
1780
1781 if ((hw->mac.type == I40E_MAC_XL710) &&
1782 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1783 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1784 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1785
1786 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1787 hw->mac.type != I40E_MAC_X722) {
1788 __le32 tmp;
1789
1790 memcpy(&tmp, resp->link_type, sizeof(tmp));
1791 hw->phy.phy_types = le32_to_cpu(tmp);
1792 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1793 }
1794
1795 /* save link status information */
1796 if (link)
1797 *link = *hw_link_info;
1798
1799 /* flag cleared so helper functions don't call AQ again */
1800 hw->phy.get_link_info = false;
1801
1802aq_get_link_info_exit:
1803 return status;
1804}
1805
1806/**
1807 * i40e_aq_set_phy_int_mask
1808 * @hw: pointer to the hw struct
1809 * @mask: interrupt mask to be set
1810 * @cmd_details: pointer to command details structure or NULL
1811 *
1812 * Set link interrupt mask.
1813 **/
1814i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1815 u16 mask,
1816 struct i40e_asq_cmd_details *cmd_details)
1817{
1818 struct i40e_aq_desc desc;
1819 struct i40e_aqc_set_phy_int_mask *cmd =
1820 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1821 i40e_status status;
1822
1823 i40e_fill_default_direct_cmd_desc(&desc,
1824 i40e_aqc_opc_set_phy_int_mask);
1825
1826 cmd->event_mask = cpu_to_le16(mask);
1827
1828 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1829
1830 return status;
1831}
1832
1833/**
1834 * i40e_aq_set_mac_loopback
1835 * @hw: pointer to the HW struct
1836 * @ena_lpbk: Enable or Disable loopback
1837 * @cmd_details: pointer to command details structure or NULL
1838 *
1839 * Enable/disable loopback on a given port
1840 */
1841i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
1842 struct i40e_asq_cmd_details *cmd_details)
1843{
1844 struct i40e_aq_desc desc;
1845 struct i40e_aqc_set_lb_mode *cmd =
1846 (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
1847
1848 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
1849 if (ena_lpbk) {
1850 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
1851 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
1852 else
1853 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL);
1854 }
1855
1856 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1857}
1858
1859/**
1860 * i40e_aq_set_phy_debug
1861 * @hw: pointer to the hw struct
1862 * @cmd_flags: debug command flags
1863 * @cmd_details: pointer to command details structure or NULL
1864 *
1865 * Reset the external PHY.
1866 **/
1867i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1868 struct i40e_asq_cmd_details *cmd_details)
1869{
1870 struct i40e_aq_desc desc;
1871 struct i40e_aqc_set_phy_debug *cmd =
1872 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1873 i40e_status status;
1874
1875 i40e_fill_default_direct_cmd_desc(&desc,
1876 i40e_aqc_opc_set_phy_debug);
1877
1878 cmd->command_flags = cmd_flags;
1879
1880 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1881
1882 return status;
1883}
1884
1885/**
1886 * i40e_is_aq_api_ver_ge
1887 * @aq: pointer to AdminQ info containing HW API version to compare
1888 * @maj: API major value
1889 * @min: API minor value
1890 *
1891 * Assert whether current HW API version is greater/equal than provided.
1892 **/
1893static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1894 u16 min)
1895{
1896 return (aq->api_maj_ver > maj ||
1897 (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1898}
1899
1900/**
1901 * i40e_aq_add_vsi
1902 * @hw: pointer to the hw struct
1903 * @vsi_ctx: pointer to a vsi context struct
1904 * @cmd_details: pointer to command details structure or NULL
1905 *
1906 * Add a VSI context to the hardware.
1907**/
1908i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1909 struct i40e_vsi_context *vsi_ctx,
1910 struct i40e_asq_cmd_details *cmd_details)
1911{
1912 struct i40e_aq_desc desc;
1913 struct i40e_aqc_add_get_update_vsi *cmd =
1914 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1915 struct i40e_aqc_add_get_update_vsi_completion *resp =
1916 (struct i40e_aqc_add_get_update_vsi_completion *)
1917 &desc.params.raw;
1918 i40e_status status;
1919
1920 i40e_fill_default_direct_cmd_desc(&desc,
1921 i40e_aqc_opc_add_vsi);
1922
1923 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1924 cmd->connection_type = vsi_ctx->connection_type;
1925 cmd->vf_id = vsi_ctx->vf_num;
1926 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1927
1928 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1929
1930 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1931 sizeof(vsi_ctx->info),
1932 cmd_details, true);
1933
1934 if (status)
1935 goto aq_add_vsi_exit;
1936
1937 vsi_ctx->seid = le16_to_cpu(resp->seid);
1938 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1939 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1940 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1941
1942aq_add_vsi_exit:
1943 return status;
1944}
1945
1946/**
1947 * i40e_aq_set_default_vsi
1948 * @hw: pointer to the hw struct
1949 * @seid: vsi number
1950 * @cmd_details: pointer to command details structure or NULL
1951 **/
1952i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
1953 u16 seid,
1954 struct i40e_asq_cmd_details *cmd_details)
1955{
1956 struct i40e_aq_desc desc;
1957 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1958 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1959 &desc.params.raw;
1960 i40e_status status;
1961
1962 i40e_fill_default_direct_cmd_desc(&desc,
1963 i40e_aqc_opc_set_vsi_promiscuous_modes);
1964
1965 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1966 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1967 cmd->seid = cpu_to_le16(seid);
1968
1969 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1970
1971 return status;
1972}
1973
1974/**
1975 * i40e_aq_clear_default_vsi
1976 * @hw: pointer to the hw struct
1977 * @seid: vsi number
1978 * @cmd_details: pointer to command details structure or NULL
1979 **/
1980i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1981 u16 seid,
1982 struct i40e_asq_cmd_details *cmd_details)
1983{
1984 struct i40e_aq_desc desc;
1985 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1986 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1987 &desc.params.raw;
1988 i40e_status status;
1989
1990 i40e_fill_default_direct_cmd_desc(&desc,
1991 i40e_aqc_opc_set_vsi_promiscuous_modes);
1992
1993 cmd->promiscuous_flags = cpu_to_le16(0);
1994 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1995 cmd->seid = cpu_to_le16(seid);
1996
1997 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1998
1999 return status;
2000}
2001
2002/**
2003 * i40e_aq_set_vsi_unicast_promiscuous
2004 * @hw: pointer to the hw struct
2005 * @seid: vsi number
2006 * @set: set unicast promiscuous enable/disable
2007 * @cmd_details: pointer to command details structure or NULL
2008 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
2009 **/
2010i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
2011 u16 seid, bool set,
2012 struct i40e_asq_cmd_details *cmd_details,
2013 bool rx_only_promisc)
2014{
2015 struct i40e_aq_desc desc;
2016 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2017 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2018 i40e_status status;
2019 u16 flags = 0;
2020
2021 i40e_fill_default_direct_cmd_desc(&desc,
2022 i40e_aqc_opc_set_vsi_promiscuous_modes);
2023
2024 if (set) {
2025 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2026 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2027 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2028 }
2029
2030 cmd->promiscuous_flags = cpu_to_le16(flags);
2031
2032 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2033 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2034 cmd->valid_flags |=
2035 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2036
2037 cmd->seid = cpu_to_le16(seid);
2038 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2039
2040 return status;
2041}
2042
2043/**
2044 * i40e_aq_set_vsi_multicast_promiscuous
2045 * @hw: pointer to the hw struct
2046 * @seid: vsi number
2047 * @set: set multicast promiscuous enable/disable
2048 * @cmd_details: pointer to command details structure or NULL
2049 **/
2050i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2051 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2052{
2053 struct i40e_aq_desc desc;
2054 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2055 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2056 i40e_status status;
2057 u16 flags = 0;
2058
2059 i40e_fill_default_direct_cmd_desc(&desc,
2060 i40e_aqc_opc_set_vsi_promiscuous_modes);
2061
2062 if (set)
2063 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2064
2065 cmd->promiscuous_flags = cpu_to_le16(flags);
2066
2067 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2068
2069 cmd->seid = cpu_to_le16(seid);
2070 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2071
2072 return status;
2073}
2074
2075/**
2076 * i40e_aq_set_vsi_mc_promisc_on_vlan
2077 * @hw: pointer to the hw struct
2078 * @seid: vsi number
2079 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2080 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2081 * @cmd_details: pointer to command details structure or NULL
2082 **/
2083enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2084 u16 seid, bool enable,
2085 u16 vid,
2086 struct i40e_asq_cmd_details *cmd_details)
2087{
2088 struct i40e_aq_desc desc;
2089 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2090 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2091 enum i40e_status_code status;
2092 u16 flags = 0;
2093
2094 i40e_fill_default_direct_cmd_desc(&desc,
2095 i40e_aqc_opc_set_vsi_promiscuous_modes);
2096
2097 if (enable)
2098 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2099
2100 cmd->promiscuous_flags = cpu_to_le16(flags);
2101 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2102 cmd->seid = cpu_to_le16(seid);
2103 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2104
2105 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
2106 cmd_details, true);
2107
2108 return status;
2109}
2110
2111/**
2112 * i40e_aq_set_vsi_uc_promisc_on_vlan
2113 * @hw: pointer to the hw struct
2114 * @seid: vsi number
2115 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2116 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2117 * @cmd_details: pointer to command details structure or NULL
2118 **/
2119enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2120 u16 seid, bool enable,
2121 u16 vid,
2122 struct i40e_asq_cmd_details *cmd_details)
2123{
2124 struct i40e_aq_desc desc;
2125 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2126 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2127 enum i40e_status_code status;
2128 u16 flags = 0;
2129
2130 i40e_fill_default_direct_cmd_desc(&desc,
2131 i40e_aqc_opc_set_vsi_promiscuous_modes);
2132
2133 if (enable) {
2134 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2135 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2136 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2137 }
2138
2139 cmd->promiscuous_flags = cpu_to_le16(flags);
2140 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2141 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2142 cmd->valid_flags |=
2143 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2144 cmd->seid = cpu_to_le16(seid);
2145 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2146
2147 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
2148 cmd_details, true);
2149
2150 return status;
2151}
2152
2153/**
2154 * i40e_aq_set_vsi_bc_promisc_on_vlan
2155 * @hw: pointer to the hw struct
2156 * @seid: vsi number
2157 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2158 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2159 * @cmd_details: pointer to command details structure or NULL
2160 **/
2161i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2162 u16 seid, bool enable, u16 vid,
2163 struct i40e_asq_cmd_details *cmd_details)
2164{
2165 struct i40e_aq_desc desc;
2166 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2167 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2168 i40e_status status;
2169 u16 flags = 0;
2170
2171 i40e_fill_default_direct_cmd_desc(&desc,
2172 i40e_aqc_opc_set_vsi_promiscuous_modes);
2173
2174 if (enable)
2175 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2176
2177 cmd->promiscuous_flags = cpu_to_le16(flags);
2178 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2179 cmd->seid = cpu_to_le16(seid);
2180 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2181
2182 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2183
2184 return status;
2185}
2186
2187/**
2188 * i40e_aq_set_vsi_broadcast
2189 * @hw: pointer to the hw struct
2190 * @seid: vsi number
2191 * @set_filter: true to set filter, false to clear filter
2192 * @cmd_details: pointer to command details structure or NULL
2193 *
2194 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2195 **/
2196i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2197 u16 seid, bool set_filter,
2198 struct i40e_asq_cmd_details *cmd_details)
2199{
2200 struct i40e_aq_desc desc;
2201 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2202 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2203 i40e_status status;
2204
2205 i40e_fill_default_direct_cmd_desc(&desc,
2206 i40e_aqc_opc_set_vsi_promiscuous_modes);
2207
2208 if (set_filter)
2209 cmd->promiscuous_flags
2210 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2211 else
2212 cmd->promiscuous_flags
2213 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2214
2215 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2216 cmd->seid = cpu_to_le16(seid);
2217 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2218
2219 return status;
2220}
2221
2222/**
2223 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2224 * @hw: pointer to the hw struct
2225 * @seid: vsi number
2226 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2227 * @cmd_details: pointer to command details structure or NULL
2228 **/
2229i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2230 u16 seid, bool enable,
2231 struct i40e_asq_cmd_details *cmd_details)
2232{
2233 struct i40e_aq_desc desc;
2234 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2235 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2236 i40e_status status;
2237 u16 flags = 0;
2238
2239 i40e_fill_default_direct_cmd_desc(&desc,
2240 i40e_aqc_opc_set_vsi_promiscuous_modes);
2241 if (enable)
2242 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2243
2244 cmd->promiscuous_flags = cpu_to_le16(flags);
2245 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2246 cmd->seid = cpu_to_le16(seid);
2247
2248 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2249
2250 return status;
2251}
2252
2253/**
2254 * i40e_aq_get_vsi_params - get VSI configuration info
2255 * @hw: pointer to the hw struct
2256 * @vsi_ctx: pointer to a vsi context struct
2257 * @cmd_details: pointer to command details structure or NULL
2258 **/
2259i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2260 struct i40e_vsi_context *vsi_ctx,
2261 struct i40e_asq_cmd_details *cmd_details)
2262{
2263 struct i40e_aq_desc desc;
2264 struct i40e_aqc_add_get_update_vsi *cmd =
2265 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2266 struct i40e_aqc_add_get_update_vsi_completion *resp =
2267 (struct i40e_aqc_add_get_update_vsi_completion *)
2268 &desc.params.raw;
2269 i40e_status status;
2270
2271 i40e_fill_default_direct_cmd_desc(&desc,
2272 i40e_aqc_opc_get_vsi_parameters);
2273
2274 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2275
2276 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2277
2278 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2279 sizeof(vsi_ctx->info), NULL);
2280
2281 if (status)
2282 goto aq_get_vsi_params_exit;
2283
2284 vsi_ctx->seid = le16_to_cpu(resp->seid);
2285 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2286 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2287 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2288
2289aq_get_vsi_params_exit:
2290 return status;
2291}
2292
2293/**
2294 * i40e_aq_update_vsi_params
2295 * @hw: pointer to the hw struct
2296 * @vsi_ctx: pointer to a vsi context struct
2297 * @cmd_details: pointer to command details structure or NULL
2298 *
2299 * Update a VSI context.
2300 **/
2301i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2302 struct i40e_vsi_context *vsi_ctx,
2303 struct i40e_asq_cmd_details *cmd_details)
2304{
2305 struct i40e_aq_desc desc;
2306 struct i40e_aqc_add_get_update_vsi *cmd =
2307 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2308 struct i40e_aqc_add_get_update_vsi_completion *resp =
2309 (struct i40e_aqc_add_get_update_vsi_completion *)
2310 &desc.params.raw;
2311 i40e_status status;
2312
2313 i40e_fill_default_direct_cmd_desc(&desc,
2314 i40e_aqc_opc_update_vsi_parameters);
2315 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2316
2317 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2318
2319 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
2320 sizeof(vsi_ctx->info),
2321 cmd_details, true);
2322
2323 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2324 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2325
2326 return status;
2327}
2328
2329/**
2330 * i40e_aq_get_switch_config
2331 * @hw: pointer to the hardware structure
2332 * @buf: pointer to the result buffer
2333 * @buf_size: length of input buffer
2334 * @start_seid: seid to start for the report, 0 == beginning
2335 * @cmd_details: pointer to command details structure or NULL
2336 *
2337 * Fill the buf with switch configuration returned from AdminQ command
2338 **/
2339i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2340 struct i40e_aqc_get_switch_config_resp *buf,
2341 u16 buf_size, u16 *start_seid,
2342 struct i40e_asq_cmd_details *cmd_details)
2343{
2344 struct i40e_aq_desc desc;
2345 struct i40e_aqc_switch_seid *scfg =
2346 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2347 i40e_status status;
2348
2349 i40e_fill_default_direct_cmd_desc(&desc,
2350 i40e_aqc_opc_get_switch_config);
2351 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2352 if (buf_size > I40E_AQ_LARGE_BUF)
2353 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2354 scfg->seid = cpu_to_le16(*start_seid);
2355
2356 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2357 *start_seid = le16_to_cpu(scfg->seid);
2358
2359 return status;
2360}
2361
2362/**
2363 * i40e_aq_set_switch_config
2364 * @hw: pointer to the hardware structure
2365 * @flags: bit flag values to set
2366 * @mode: cloud filter mode
2367 * @valid_flags: which bit flags to set
2368 * @mode: cloud filter mode
2369 * @cmd_details: pointer to command details structure or NULL
2370 *
2371 * Set switch configuration bits
2372 **/
2373enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2374 u16 flags,
2375 u16 valid_flags, u8 mode,
2376 struct i40e_asq_cmd_details *cmd_details)
2377{
2378 struct i40e_aq_desc desc;
2379 struct i40e_aqc_set_switch_config *scfg =
2380 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2381 enum i40e_status_code status;
2382
2383 i40e_fill_default_direct_cmd_desc(&desc,
2384 i40e_aqc_opc_set_switch_config);
2385 scfg->flags = cpu_to_le16(flags);
2386 scfg->valid_flags = cpu_to_le16(valid_flags);
2387 scfg->mode = mode;
2388 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2389 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2390 scfg->first_tag = cpu_to_le16(hw->first_tag);
2391 scfg->second_tag = cpu_to_le16(hw->second_tag);
2392 }
2393 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2394
2395 return status;
2396}
2397
2398/**
2399 * i40e_aq_get_firmware_version
2400 * @hw: pointer to the hw struct
2401 * @fw_major_version: firmware major version
2402 * @fw_minor_version: firmware minor version
2403 * @fw_build: firmware build number
2404 * @api_major_version: major queue version
2405 * @api_minor_version: minor queue version
2406 * @cmd_details: pointer to command details structure or NULL
2407 *
2408 * Get the firmware version from the admin queue commands
2409 **/
2410i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2411 u16 *fw_major_version, u16 *fw_minor_version,
2412 u32 *fw_build,
2413 u16 *api_major_version, u16 *api_minor_version,
2414 struct i40e_asq_cmd_details *cmd_details)
2415{
2416 struct i40e_aq_desc desc;
2417 struct i40e_aqc_get_version *resp =
2418 (struct i40e_aqc_get_version *)&desc.params.raw;
2419 i40e_status status;
2420
2421 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2422
2423 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2424
2425 if (!status) {
2426 if (fw_major_version)
2427 *fw_major_version = le16_to_cpu(resp->fw_major);
2428 if (fw_minor_version)
2429 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2430 if (fw_build)
2431 *fw_build = le32_to_cpu(resp->fw_build);
2432 if (api_major_version)
2433 *api_major_version = le16_to_cpu(resp->api_major);
2434 if (api_minor_version)
2435 *api_minor_version = le16_to_cpu(resp->api_minor);
2436 }
2437
2438 return status;
2439}
2440
2441/**
2442 * i40e_aq_send_driver_version
2443 * @hw: pointer to the hw struct
2444 * @dv: driver's major, minor version
2445 * @cmd_details: pointer to command details structure or NULL
2446 *
2447 * Send the driver version to the firmware
2448 **/
2449i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2450 struct i40e_driver_version *dv,
2451 struct i40e_asq_cmd_details *cmd_details)
2452{
2453 struct i40e_aq_desc desc;
2454 struct i40e_aqc_driver_version *cmd =
2455 (struct i40e_aqc_driver_version *)&desc.params.raw;
2456 i40e_status status;
2457 u16 len;
2458
2459 if (dv == NULL)
2460 return I40E_ERR_PARAM;
2461
2462 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2463
2464 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2465 cmd->driver_major_ver = dv->major_version;
2466 cmd->driver_minor_ver = dv->minor_version;
2467 cmd->driver_build_ver = dv->build_version;
2468 cmd->driver_subbuild_ver = dv->subbuild_version;
2469
2470 len = 0;
2471 while (len < sizeof(dv->driver_string) &&
2472 (dv->driver_string[len] < 0x80) &&
2473 dv->driver_string[len])
2474 len++;
2475 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2476 len, cmd_details);
2477
2478 return status;
2479}
2480
2481/**
2482 * i40e_get_link_status - get status of the HW network link
2483 * @hw: pointer to the hw struct
2484 * @link_up: pointer to bool (true/false = linkup/linkdown)
2485 *
2486 * Variable link_up true if link is up, false if link is down.
2487 * The variable link_up is invalid if returned value of status != 0
2488 *
2489 * Side effect: LinkStatusEvent reporting becomes enabled
2490 **/
2491i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2492{
2493 i40e_status status = 0;
2494
2495 if (hw->phy.get_link_info) {
2496 status = i40e_update_link_info(hw);
2497
2498 if (status)
2499 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2500 status);
2501 }
2502
2503 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2504
2505 return status;
2506}
2507
2508/**
2509 * i40e_update_link_info - update status of the HW network link
2510 * @hw: pointer to the hw struct
2511 **/
2512noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
2513{
2514 struct i40e_aq_get_phy_abilities_resp abilities;
2515 i40e_status status = 0;
2516
2517 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2518 if (status)
2519 return status;
2520
2521 /* extra checking needed to ensure link info to user is timely */
2522 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2523 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2524 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2525 status = i40e_aq_get_phy_capabilities(hw, false, false,
2526 &abilities, NULL);
2527 if (status)
2528 return status;
2529
2530 if (abilities.fec_cfg_curr_mod_ext_info &
2531 I40E_AQ_ENABLE_FEC_AUTO)
2532 hw->phy.link_info.req_fec_info =
2533 (I40E_AQ_REQUEST_FEC_KR |
2534 I40E_AQ_REQUEST_FEC_RS);
2535 else
2536 hw->phy.link_info.req_fec_info =
2537 abilities.fec_cfg_curr_mod_ext_info &
2538 (I40E_AQ_REQUEST_FEC_KR |
2539 I40E_AQ_REQUEST_FEC_RS);
2540
2541 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2542 sizeof(hw->phy.link_info.module_type));
2543 }
2544
2545 return status;
2546}
2547
2548/**
2549 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2550 * @hw: pointer to the hw struct
2551 * @uplink_seid: the MAC or other gizmo SEID
2552 * @downlink_seid: the VSI SEID
2553 * @enabled_tc: bitmap of TCs to be enabled
2554 * @default_port: true for default port VSI, false for control port
2555 * @veb_seid: pointer to where to put the resulting VEB SEID
2556 * @enable_stats: true to turn on VEB stats
2557 * @cmd_details: pointer to command details structure or NULL
2558 *
2559 * This asks the FW to add a VEB between the uplink and downlink
2560 * elements. If the uplink SEID is 0, this will be a floating VEB.
2561 **/
2562i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2563 u16 downlink_seid, u8 enabled_tc,
2564 bool default_port, u16 *veb_seid,
2565 bool enable_stats,
2566 struct i40e_asq_cmd_details *cmd_details)
2567{
2568 struct i40e_aq_desc desc;
2569 struct i40e_aqc_add_veb *cmd =
2570 (struct i40e_aqc_add_veb *)&desc.params.raw;
2571 struct i40e_aqc_add_veb_completion *resp =
2572 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2573 i40e_status status;
2574 u16 veb_flags = 0;
2575
2576 /* SEIDs need to either both be set or both be 0 for floating VEB */
2577 if (!!uplink_seid != !!downlink_seid)
2578 return I40E_ERR_PARAM;
2579
2580 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2581
2582 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2583 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2584 cmd->enable_tcs = enabled_tc;
2585 if (!uplink_seid)
2586 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2587 if (default_port)
2588 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2589 else
2590 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2591
2592 /* reverse logic here: set the bitflag to disable the stats */
2593 if (!enable_stats)
2594 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2595
2596 cmd->veb_flags = cpu_to_le16(veb_flags);
2597
2598 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2599
2600 if (!status && veb_seid)
2601 *veb_seid = le16_to_cpu(resp->veb_seid);
2602
2603 return status;
2604}
2605
2606/**
2607 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2608 * @hw: pointer to the hw struct
2609 * @veb_seid: the SEID of the VEB to query
2610 * @switch_id: the uplink switch id
2611 * @floating: set to true if the VEB is floating
2612 * @statistic_index: index of the stats counter block for this VEB
2613 * @vebs_used: number of VEB's used by function
2614 * @vebs_free: total VEB's not reserved by any function
2615 * @cmd_details: pointer to command details structure or NULL
2616 *
2617 * This retrieves the parameters for a particular VEB, specified by
2618 * uplink_seid, and returns them to the caller.
2619 **/
2620i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2621 u16 veb_seid, u16 *switch_id,
2622 bool *floating, u16 *statistic_index,
2623 u16 *vebs_used, u16 *vebs_free,
2624 struct i40e_asq_cmd_details *cmd_details)
2625{
2626 struct i40e_aq_desc desc;
2627 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2628 (struct i40e_aqc_get_veb_parameters_completion *)
2629 &desc.params.raw;
2630 i40e_status status;
2631
2632 if (veb_seid == 0)
2633 return I40E_ERR_PARAM;
2634
2635 i40e_fill_default_direct_cmd_desc(&desc,
2636 i40e_aqc_opc_get_veb_parameters);
2637 cmd_resp->seid = cpu_to_le16(veb_seid);
2638
2639 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2640 if (status)
2641 goto get_veb_exit;
2642
2643 if (switch_id)
2644 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2645 if (statistic_index)
2646 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2647 if (vebs_used)
2648 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2649 if (vebs_free)
2650 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2651 if (floating) {
2652 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2653
2654 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2655 *floating = true;
2656 else
2657 *floating = false;
2658 }
2659
2660get_veb_exit:
2661 return status;
2662}
2663
2664/**
2665 * i40e_prepare_add_macvlan
2666 * @mv_list: list of macvlans to be added
2667 * @desc: pointer to AQ descriptor structure
2668 * @count: length of the list
2669 * @seid: VSI for the mac address
2670 *
2671 * Internal helper function that prepares the add macvlan request
2672 * and returns the buffer size.
2673 **/
2674static u16
2675i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
2676 struct i40e_aq_desc *desc, u16 count, u16 seid)
2677{
2678 struct i40e_aqc_macvlan *cmd =
2679 (struct i40e_aqc_macvlan *)&desc->params.raw;
2680 u16 buf_size;
2681 int i;
2682
2683 buf_size = count * sizeof(*mv_list);
2684
2685 /* prep the rest of the request */
2686 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
2687 cmd->num_addresses = cpu_to_le16(count);
2688 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2689 cmd->seid[1] = 0;
2690 cmd->seid[2] = 0;
2691
2692 for (i = 0; i < count; i++)
2693 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2694 mv_list[i].flags |=
2695 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2696
2697 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2698 if (buf_size > I40E_AQ_LARGE_BUF)
2699 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2700
2701 return buf_size;
2702}
2703
2704/**
2705 * i40e_aq_add_macvlan
2706 * @hw: pointer to the hw struct
2707 * @seid: VSI for the mac address
2708 * @mv_list: list of macvlans to be added
2709 * @count: length of the list
2710 * @cmd_details: pointer to command details structure or NULL
2711 *
2712 * Add MAC/VLAN addresses to the HW filtering
2713 **/
2714i40e_status
2715i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2716 struct i40e_aqc_add_macvlan_element_data *mv_list,
2717 u16 count, struct i40e_asq_cmd_details *cmd_details)
2718{
2719 struct i40e_aq_desc desc;
2720 u16 buf_size;
2721
2722 if (count == 0 || !mv_list || !hw)
2723 return I40E_ERR_PARAM;
2724
2725 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2726
2727 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2728 cmd_details, true);
2729}
2730
2731/**
2732 * i40e_aq_add_macvlan_v2
2733 * @hw: pointer to the hw struct
2734 * @seid: VSI for the mac address
2735 * @mv_list: list of macvlans to be added
2736 * @count: length of the list
2737 * @cmd_details: pointer to command details structure or NULL
2738 * @aq_status: pointer to Admin Queue status return value
2739 *
2740 * Add MAC/VLAN addresses to the HW filtering.
2741 * The _v2 version returns the last Admin Queue status in aq_status
2742 * to avoid race conditions in access to hw->aq.asq_last_status.
2743 * It also calls _v2 versions of asq_send_command functions to
2744 * get the aq_status on the stack.
2745 **/
2746i40e_status
2747i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
2748 struct i40e_aqc_add_macvlan_element_data *mv_list,
2749 u16 count, struct i40e_asq_cmd_details *cmd_details,
2750 enum i40e_admin_queue_err *aq_status)
2751{
2752 struct i40e_aq_desc desc;
2753 u16 buf_size;
2754
2755 if (count == 0 || !mv_list || !hw)
2756 return I40E_ERR_PARAM;
2757
2758 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2759
2760 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2761 cmd_details, true, aq_status);
2762}
2763
2764/**
2765 * i40e_aq_remove_macvlan
2766 * @hw: pointer to the hw struct
2767 * @seid: VSI for the mac address
2768 * @mv_list: list of macvlans to be removed
2769 * @count: length of the list
2770 * @cmd_details: pointer to command details structure or NULL
2771 *
2772 * Remove MAC/VLAN addresses from the HW filtering
2773 **/
2774i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2775 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2776 u16 count, struct i40e_asq_cmd_details *cmd_details)
2777{
2778 struct i40e_aq_desc desc;
2779 struct i40e_aqc_macvlan *cmd =
2780 (struct i40e_aqc_macvlan *)&desc.params.raw;
2781 i40e_status status;
2782 u16 buf_size;
2783
2784 if (count == 0 || !mv_list || !hw)
2785 return I40E_ERR_PARAM;
2786
2787 buf_size = count * sizeof(*mv_list);
2788
2789 /* prep the rest of the request */
2790 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2791 cmd->num_addresses = cpu_to_le16(count);
2792 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2793 cmd->seid[1] = 0;
2794 cmd->seid[2] = 0;
2795
2796 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2797 if (buf_size > I40E_AQ_LARGE_BUF)
2798 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2799
2800 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2801 cmd_details, true);
2802
2803 return status;
2804}
2805
2806/**
2807 * i40e_aq_remove_macvlan_v2
2808 * @hw: pointer to the hw struct
2809 * @seid: VSI for the mac address
2810 * @mv_list: list of macvlans to be removed
2811 * @count: length of the list
2812 * @cmd_details: pointer to command details structure or NULL
2813 * @aq_status: pointer to Admin Queue status return value
2814 *
2815 * Remove MAC/VLAN addresses from the HW filtering.
2816 * The _v2 version returns the last Admin Queue status in aq_status
2817 * to avoid race conditions in access to hw->aq.asq_last_status.
2818 * It also calls _v2 versions of asq_send_command functions to
2819 * get the aq_status on the stack.
2820 **/
2821i40e_status
2822i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
2823 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2824 u16 count, struct i40e_asq_cmd_details *cmd_details,
2825 enum i40e_admin_queue_err *aq_status)
2826{
2827 struct i40e_aqc_macvlan *cmd;
2828 struct i40e_aq_desc desc;
2829 u16 buf_size;
2830
2831 if (count == 0 || !mv_list || !hw)
2832 return I40E_ERR_PARAM;
2833
2834 buf_size = count * sizeof(*mv_list);
2835
2836 /* prep the rest of the request */
2837 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2838 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
2839 cmd->num_addresses = cpu_to_le16(count);
2840 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2841 cmd->seid[1] = 0;
2842 cmd->seid[2] = 0;
2843
2844 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2845 if (buf_size > I40E_AQ_LARGE_BUF)
2846 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2847
2848 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2849 cmd_details, true, aq_status);
2850}
2851
2852/**
2853 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2854 * @hw: pointer to the hw struct
2855 * @opcode: AQ opcode for add or delete mirror rule
2856 * @sw_seid: Switch SEID (to which rule refers)
2857 * @rule_type: Rule Type (ingress/egress/VLAN)
2858 * @id: Destination VSI SEID or Rule ID
2859 * @count: length of the list
2860 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2861 * @cmd_details: pointer to command details structure or NULL
2862 * @rule_id: Rule ID returned from FW
2863 * @rules_used: Number of rules used in internal switch
2864 * @rules_free: Number of rules free in internal switch
2865 *
2866 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2867 * VEBs/VEPA elements only
2868 **/
2869static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2870 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2871 u16 count, __le16 *mr_list,
2872 struct i40e_asq_cmd_details *cmd_details,
2873 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2874{
2875 struct i40e_aq_desc desc;
2876 struct i40e_aqc_add_delete_mirror_rule *cmd =
2877 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2878 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2879 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2880 i40e_status status;
2881 u16 buf_size;
2882
2883 buf_size = count * sizeof(*mr_list);
2884
2885 /* prep the rest of the request */
2886 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2887 cmd->seid = cpu_to_le16(sw_seid);
2888 cmd->rule_type = cpu_to_le16(rule_type &
2889 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2890 cmd->num_entries = cpu_to_le16(count);
2891 /* Dest VSI for add, rule_id for delete */
2892 cmd->destination = cpu_to_le16(id);
2893 if (mr_list) {
2894 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2895 I40E_AQ_FLAG_RD));
2896 if (buf_size > I40E_AQ_LARGE_BUF)
2897 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2898 }
2899
2900 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2901 cmd_details);
2902 if (!status ||
2903 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2904 if (rule_id)
2905 *rule_id = le16_to_cpu(resp->rule_id);
2906 if (rules_used)
2907 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2908 if (rules_free)
2909 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2910 }
2911 return status;
2912}
2913
2914/**
2915 * i40e_aq_add_mirrorrule - add a mirror rule
2916 * @hw: pointer to the hw struct
2917 * @sw_seid: Switch SEID (to which rule refers)
2918 * @rule_type: Rule Type (ingress/egress/VLAN)
2919 * @dest_vsi: SEID of VSI to which packets will be mirrored
2920 * @count: length of the list
2921 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2922 * @cmd_details: pointer to command details structure or NULL
2923 * @rule_id: Rule ID returned from FW
2924 * @rules_used: Number of rules used in internal switch
2925 * @rules_free: Number of rules free in internal switch
2926 *
2927 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2928 **/
2929i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2930 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2931 struct i40e_asq_cmd_details *cmd_details,
2932 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2933{
2934 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2935 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2936 if (count == 0 || !mr_list)
2937 return I40E_ERR_PARAM;
2938 }
2939
2940 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2941 rule_type, dest_vsi, count, mr_list,
2942 cmd_details, rule_id, rules_used, rules_free);
2943}
2944
2945/**
2946 * i40e_aq_delete_mirrorrule - delete a mirror rule
2947 * @hw: pointer to the hw struct
2948 * @sw_seid: Switch SEID (to which rule refers)
2949 * @rule_type: Rule Type (ingress/egress/VLAN)
2950 * @count: length of the list
2951 * @rule_id: Rule ID that is returned in the receive desc as part of
2952 * add_mirrorrule.
2953 * @mr_list: list of mirrored VLAN IDs to be removed
2954 * @cmd_details: pointer to command details structure or NULL
2955 * @rules_used: Number of rules used in internal switch
2956 * @rules_free: Number of rules free in internal switch
2957 *
2958 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2959 **/
2960i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2961 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2962 struct i40e_asq_cmd_details *cmd_details,
2963 u16 *rules_used, u16 *rules_free)
2964{
2965 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2966 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2967 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2968 * mirroring. For other rule_type, count and rule_type should
2969 * not matter.
2970 */
2971 if (count == 0 || !mr_list)
2972 return I40E_ERR_PARAM;
2973 }
2974
2975 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2976 rule_type, rule_id, count, mr_list,
2977 cmd_details, NULL, rules_used, rules_free);
2978}
2979
2980/**
2981 * i40e_aq_send_msg_to_vf
2982 * @hw: pointer to the hardware structure
2983 * @vfid: VF id to send msg
2984 * @v_opcode: opcodes for VF-PF communication
2985 * @v_retval: return error code
2986 * @msg: pointer to the msg buffer
2987 * @msglen: msg length
2988 * @cmd_details: pointer to command details
2989 *
2990 * send msg to vf
2991 **/
2992i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2993 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2994 struct i40e_asq_cmd_details *cmd_details)
2995{
2996 struct i40e_aq_desc desc;
2997 struct i40e_aqc_pf_vf_message *cmd =
2998 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2999 i40e_status status;
3000
3001 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
3002 cmd->id = cpu_to_le32(vfid);
3003 desc.cookie_high = cpu_to_le32(v_opcode);
3004 desc.cookie_low = cpu_to_le32(v_retval);
3005 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
3006 if (msglen) {
3007 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
3008 I40E_AQ_FLAG_RD));
3009 if (msglen > I40E_AQ_LARGE_BUF)
3010 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3011 desc.datalen = cpu_to_le16(msglen);
3012 }
3013 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
3014
3015 return status;
3016}
3017
3018/**
3019 * i40e_aq_debug_read_register
3020 * @hw: pointer to the hw struct
3021 * @reg_addr: register address
3022 * @reg_val: register value
3023 * @cmd_details: pointer to command details structure or NULL
3024 *
3025 * Read the register using the admin queue commands
3026 **/
3027i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
3028 u32 reg_addr, u64 *reg_val,
3029 struct i40e_asq_cmd_details *cmd_details)
3030{
3031 struct i40e_aq_desc desc;
3032 struct i40e_aqc_debug_reg_read_write *cmd_resp =
3033 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3034 i40e_status status;
3035
3036 if (reg_val == NULL)
3037 return I40E_ERR_PARAM;
3038
3039 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
3040
3041 cmd_resp->address = cpu_to_le32(reg_addr);
3042
3043 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3044
3045 if (!status) {
3046 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
3047 (u64)le32_to_cpu(cmd_resp->value_low);
3048 }
3049
3050 return status;
3051}
3052
3053/**
3054 * i40e_aq_debug_write_register
3055 * @hw: pointer to the hw struct
3056 * @reg_addr: register address
3057 * @reg_val: register value
3058 * @cmd_details: pointer to command details structure or NULL
3059 *
3060 * Write to a register using the admin queue commands
3061 **/
3062i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
3063 u32 reg_addr, u64 reg_val,
3064 struct i40e_asq_cmd_details *cmd_details)
3065{
3066 struct i40e_aq_desc desc;
3067 struct i40e_aqc_debug_reg_read_write *cmd =
3068 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3069 i40e_status status;
3070
3071 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
3072
3073 cmd->address = cpu_to_le32(reg_addr);
3074 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
3075 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
3076
3077 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3078
3079 return status;
3080}
3081
3082/**
3083 * i40e_aq_request_resource
3084 * @hw: pointer to the hw struct
3085 * @resource: resource id
3086 * @access: access type
3087 * @sdp_number: resource number
3088 * @timeout: the maximum time in ms that the driver may hold the resource
3089 * @cmd_details: pointer to command details structure or NULL
3090 *
3091 * requests common resource using the admin queue commands
3092 **/
3093i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3094 enum i40e_aq_resources_ids resource,
3095 enum i40e_aq_resource_access_type access,
3096 u8 sdp_number, u64 *timeout,
3097 struct i40e_asq_cmd_details *cmd_details)
3098{
3099 struct i40e_aq_desc desc;
3100 struct i40e_aqc_request_resource *cmd_resp =
3101 (struct i40e_aqc_request_resource *)&desc.params.raw;
3102 i40e_status status;
3103
3104 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3105
3106 cmd_resp->resource_id = cpu_to_le16(resource);
3107 cmd_resp->access_type = cpu_to_le16(access);
3108 cmd_resp->resource_number = cpu_to_le32(sdp_number);
3109
3110 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3111 /* The completion specifies the maximum time in ms that the driver
3112 * may hold the resource in the Timeout field.
3113 * If the resource is held by someone else, the command completes with
3114 * busy return value and the timeout field indicates the maximum time
3115 * the current owner of the resource has to free it.
3116 */
3117 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3118 *timeout = le32_to_cpu(cmd_resp->timeout);
3119
3120 return status;
3121}
3122
3123/**
3124 * i40e_aq_release_resource
3125 * @hw: pointer to the hw struct
3126 * @resource: resource id
3127 * @sdp_number: resource number
3128 * @cmd_details: pointer to command details structure or NULL
3129 *
3130 * release common resource using the admin queue commands
3131 **/
3132i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3133 enum i40e_aq_resources_ids resource,
3134 u8 sdp_number,
3135 struct i40e_asq_cmd_details *cmd_details)
3136{
3137 struct i40e_aq_desc desc;
3138 struct i40e_aqc_request_resource *cmd =
3139 (struct i40e_aqc_request_resource *)&desc.params.raw;
3140 i40e_status status;
3141
3142 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3143
3144 cmd->resource_id = cpu_to_le16(resource);
3145 cmd->resource_number = cpu_to_le32(sdp_number);
3146
3147 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3148
3149 return status;
3150}
3151
3152/**
3153 * i40e_aq_read_nvm
3154 * @hw: pointer to the hw struct
3155 * @module_pointer: module pointer location in words from the NVM beginning
3156 * @offset: byte offset from the module beginning
3157 * @length: length of the section to be read (in bytes from the offset)
3158 * @data: command buffer (size [bytes] = length)
3159 * @last_command: tells if this is the last command in a series
3160 * @cmd_details: pointer to command details structure or NULL
3161 *
3162 * Read the NVM using the admin queue commands
3163 **/
3164i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3165 u32 offset, u16 length, void *data,
3166 bool last_command,
3167 struct i40e_asq_cmd_details *cmd_details)
3168{
3169 struct i40e_aq_desc desc;
3170 struct i40e_aqc_nvm_update *cmd =
3171 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3172 i40e_status status;
3173
3174 /* In offset the highest byte must be zeroed. */
3175 if (offset & 0xFF000000) {
3176 status = I40E_ERR_PARAM;
3177 goto i40e_aq_read_nvm_exit;
3178 }
3179
3180 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3181
3182 /* If this is the last command in a series, set the proper flag. */
3183 if (last_command)
3184 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3185 cmd->module_pointer = module_pointer;
3186 cmd->offset = cpu_to_le32(offset);
3187 cmd->length = cpu_to_le16(length);
3188
3189 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3190 if (length > I40E_AQ_LARGE_BUF)
3191 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3192
3193 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3194
3195i40e_aq_read_nvm_exit:
3196 return status;
3197}
3198
3199/**
3200 * i40e_aq_erase_nvm
3201 * @hw: pointer to the hw struct
3202 * @module_pointer: module pointer location in words from the NVM beginning
3203 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3204 * @length: length of the section to be erased (expressed in 4 KB)
3205 * @last_command: tells if this is the last command in a series
3206 * @cmd_details: pointer to command details structure or NULL
3207 *
3208 * Erase the NVM sector using the admin queue commands
3209 **/
3210i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3211 u32 offset, u16 length, bool last_command,
3212 struct i40e_asq_cmd_details *cmd_details)
3213{
3214 struct i40e_aq_desc desc;
3215 struct i40e_aqc_nvm_update *cmd =
3216 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3217 i40e_status status;
3218
3219 /* In offset the highest byte must be zeroed. */
3220 if (offset & 0xFF000000) {
3221 status = I40E_ERR_PARAM;
3222 goto i40e_aq_erase_nvm_exit;
3223 }
3224
3225 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3226
3227 /* If this is the last command in a series, set the proper flag. */
3228 if (last_command)
3229 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3230 cmd->module_pointer = module_pointer;
3231 cmd->offset = cpu_to_le32(offset);
3232 cmd->length = cpu_to_le16(length);
3233
3234 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3235
3236i40e_aq_erase_nvm_exit:
3237 return status;
3238}
3239
3240/**
3241 * i40e_parse_discover_capabilities
3242 * @hw: pointer to the hw struct
3243 * @buff: pointer to a buffer containing device/function capability records
3244 * @cap_count: number of capability records in the list
3245 * @list_type_opc: type of capabilities list to parse
3246 *
3247 * Parse the device/function capabilities list.
3248 **/
3249static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3250 u32 cap_count,
3251 enum i40e_admin_queue_opc list_type_opc)
3252{
3253 struct i40e_aqc_list_capabilities_element_resp *cap;
3254 u32 valid_functions, num_functions;
3255 u32 number, logical_id, phys_id;
3256 struct i40e_hw_capabilities *p;
3257 u16 id, ocp_cfg_word0;
3258 i40e_status status;
3259 u8 major_rev;
3260 u32 i = 0;
3261
3262 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3263
3264 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3265 p = &hw->dev_caps;
3266 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3267 p = &hw->func_caps;
3268 else
3269 return;
3270
3271 for (i = 0; i < cap_count; i++, cap++) {
3272 id = le16_to_cpu(cap->id);
3273 number = le32_to_cpu(cap->number);
3274 logical_id = le32_to_cpu(cap->logical_id);
3275 phys_id = le32_to_cpu(cap->phys_id);
3276 major_rev = cap->major_rev;
3277
3278 switch (id) {
3279 case I40E_AQ_CAP_ID_SWITCH_MODE:
3280 p->switch_mode = number;
3281 break;
3282 case I40E_AQ_CAP_ID_MNG_MODE:
3283 p->management_mode = number;
3284 if (major_rev > 1) {
3285 p->mng_protocols_over_mctp = logical_id;
3286 i40e_debug(hw, I40E_DEBUG_INIT,
3287 "HW Capability: Protocols over MCTP = %d\n",
3288 p->mng_protocols_over_mctp);
3289 } else {
3290 p->mng_protocols_over_mctp = 0;
3291 }
3292 break;
3293 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3294 p->npar_enable = number;
3295 break;
3296 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3297 p->os2bmc = number;
3298 break;
3299 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3300 p->valid_functions = number;
3301 break;
3302 case I40E_AQ_CAP_ID_SRIOV:
3303 if (number == 1)
3304 p->sr_iov_1_1 = true;
3305 break;
3306 case I40E_AQ_CAP_ID_VF:
3307 p->num_vfs = number;
3308 p->vf_base_id = logical_id;
3309 break;
3310 case I40E_AQ_CAP_ID_VMDQ:
3311 if (number == 1)
3312 p->vmdq = true;
3313 break;
3314 case I40E_AQ_CAP_ID_8021QBG:
3315 if (number == 1)
3316 p->evb_802_1_qbg = true;
3317 break;
3318 case I40E_AQ_CAP_ID_8021QBR:
3319 if (number == 1)
3320 p->evb_802_1_qbh = true;
3321 break;
3322 case I40E_AQ_CAP_ID_VSI:
3323 p->num_vsis = number;
3324 break;
3325 case I40E_AQ_CAP_ID_DCB:
3326 if (number == 1) {
3327 p->dcb = true;
3328 p->enabled_tcmap = logical_id;
3329 p->maxtc = phys_id;
3330 }
3331 break;
3332 case I40E_AQ_CAP_ID_FCOE:
3333 if (number == 1)
3334 p->fcoe = true;
3335 break;
3336 case I40E_AQ_CAP_ID_ISCSI:
3337 if (number == 1)
3338 p->iscsi = true;
3339 break;
3340 case I40E_AQ_CAP_ID_RSS:
3341 p->rss = true;
3342 p->rss_table_size = number;
3343 p->rss_table_entry_width = logical_id;
3344 break;
3345 case I40E_AQ_CAP_ID_RXQ:
3346 p->num_rx_qp = number;
3347 p->base_queue = phys_id;
3348 break;
3349 case I40E_AQ_CAP_ID_TXQ:
3350 p->num_tx_qp = number;
3351 p->base_queue = phys_id;
3352 break;
3353 case I40E_AQ_CAP_ID_MSIX:
3354 p->num_msix_vectors = number;
3355 i40e_debug(hw, I40E_DEBUG_INIT,
3356 "HW Capability: MSIX vector count = %d\n",
3357 p->num_msix_vectors);
3358 break;
3359 case I40E_AQ_CAP_ID_VF_MSIX:
3360 p->num_msix_vectors_vf = number;
3361 break;
3362 case I40E_AQ_CAP_ID_FLEX10:
3363 if (major_rev == 1) {
3364 if (number == 1) {
3365 p->flex10_enable = true;
3366 p->flex10_capable = true;
3367 }
3368 } else {
3369 /* Capability revision >= 2 */
3370 if (number & 1)
3371 p->flex10_enable = true;
3372 if (number & 2)
3373 p->flex10_capable = true;
3374 }
3375 p->flex10_mode = logical_id;
3376 p->flex10_status = phys_id;
3377 break;
3378 case I40E_AQ_CAP_ID_CEM:
3379 if (number == 1)
3380 p->mgmt_cem = true;
3381 break;
3382 case I40E_AQ_CAP_ID_IWARP:
3383 if (number == 1)
3384 p->iwarp = true;
3385 break;
3386 case I40E_AQ_CAP_ID_LED:
3387 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3388 p->led[phys_id] = true;
3389 break;
3390 case I40E_AQ_CAP_ID_SDP:
3391 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3392 p->sdp[phys_id] = true;
3393 break;
3394 case I40E_AQ_CAP_ID_MDIO:
3395 if (number == 1) {
3396 p->mdio_port_num = phys_id;
3397 p->mdio_port_mode = logical_id;
3398 }
3399 break;
3400 case I40E_AQ_CAP_ID_1588:
3401 if (number == 1)
3402 p->ieee_1588 = true;
3403 break;
3404 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3405 p->fd = true;
3406 p->fd_filters_guaranteed = number;
3407 p->fd_filters_best_effort = logical_id;
3408 break;
3409 case I40E_AQ_CAP_ID_WSR_PROT:
3410 p->wr_csr_prot = (u64)number;
3411 p->wr_csr_prot |= (u64)logical_id << 32;
3412 break;
3413 case I40E_AQ_CAP_ID_NVM_MGMT:
3414 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3415 p->sec_rev_disabled = true;
3416 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3417 p->update_disabled = true;
3418 break;
3419 default:
3420 break;
3421 }
3422 }
3423
3424 if (p->fcoe)
3425 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3426
3427 /* Software override ensuring FCoE is disabled if npar or mfp
3428 * mode because it is not supported in these modes.
3429 */
3430 if (p->npar_enable || p->flex10_enable)
3431 p->fcoe = false;
3432
3433 /* count the enabled ports (aka the "not disabled" ports) */
3434 hw->num_ports = 0;
3435 for (i = 0; i < 4; i++) {
3436 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3437 u64 port_cfg = 0;
3438
3439 /* use AQ read to get the physical register offset instead
3440 * of the port relative offset
3441 */
3442 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3443 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3444 hw->num_ports++;
3445 }
3446
3447 /* OCP cards case: if a mezz is removed the Ethernet port is at
3448 * disabled state in PRTGEN_CNF register. Additional NVM read is
3449 * needed in order to check if we are dealing with OCP card.
3450 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3451 * physical ports results in wrong partition id calculation and thus
3452 * not supporting WoL.
3453 */
3454 if (hw->mac.type == I40E_MAC_X722) {
3455 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3456 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3457 2 * I40E_SR_OCP_CFG_WORD0,
3458 sizeof(ocp_cfg_word0),
3459 &ocp_cfg_word0, true, NULL);
3460 if (!status &&
3461 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3462 hw->num_ports = 4;
3463 i40e_release_nvm(hw);
3464 }
3465 }
3466
3467 valid_functions = p->valid_functions;
3468 num_functions = 0;
3469 while (valid_functions) {
3470 if (valid_functions & 1)
3471 num_functions++;
3472 valid_functions >>= 1;
3473 }
3474
3475 /* partition id is 1-based, and functions are evenly spread
3476 * across the ports as partitions
3477 */
3478 if (hw->num_ports != 0) {
3479 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3480 hw->num_partitions = num_functions / hw->num_ports;
3481 }
3482
3483 /* additional HW specific goodies that might
3484 * someday be HW version specific
3485 */
3486 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3487}
3488
3489/**
3490 * i40e_aq_discover_capabilities
3491 * @hw: pointer to the hw struct
3492 * @buff: a virtual buffer to hold the capabilities
3493 * @buff_size: Size of the virtual buffer
3494 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3495 * @list_type_opc: capabilities type to discover - pass in the command opcode
3496 * @cmd_details: pointer to command details structure or NULL
3497 *
3498 * Get the device capabilities descriptions from the firmware
3499 **/
3500i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3501 void *buff, u16 buff_size, u16 *data_size,
3502 enum i40e_admin_queue_opc list_type_opc,
3503 struct i40e_asq_cmd_details *cmd_details)
3504{
3505 struct i40e_aqc_list_capabilites *cmd;
3506 struct i40e_aq_desc desc;
3507 i40e_status status = 0;
3508
3509 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3510
3511 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3512 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3513 status = I40E_ERR_PARAM;
3514 goto exit;
3515 }
3516
3517 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3518
3519 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3520 if (buff_size > I40E_AQ_LARGE_BUF)
3521 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3522
3523 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3524 *data_size = le16_to_cpu(desc.datalen);
3525
3526 if (status)
3527 goto exit;
3528
3529 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3530 list_type_opc);
3531
3532exit:
3533 return status;
3534}
3535
3536/**
3537 * i40e_aq_update_nvm
3538 * @hw: pointer to the hw struct
3539 * @module_pointer: module pointer location in words from the NVM beginning
3540 * @offset: byte offset from the module beginning
3541 * @length: length of the section to be written (in bytes from the offset)
3542 * @data: command buffer (size [bytes] = length)
3543 * @last_command: tells if this is the last command in a series
3544 * @preservation_flags: Preservation mode flags
3545 * @cmd_details: pointer to command details structure or NULL
3546 *
3547 * Update the NVM using the admin queue commands
3548 **/
3549i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3550 u32 offset, u16 length, void *data,
3551 bool last_command, u8 preservation_flags,
3552 struct i40e_asq_cmd_details *cmd_details)
3553{
3554 struct i40e_aq_desc desc;
3555 struct i40e_aqc_nvm_update *cmd =
3556 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3557 i40e_status status;
3558
3559 /* In offset the highest byte must be zeroed. */
3560 if (offset & 0xFF000000) {
3561 status = I40E_ERR_PARAM;
3562 goto i40e_aq_update_nvm_exit;
3563 }
3564
3565 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3566
3567 /* If this is the last command in a series, set the proper flag. */
3568 if (last_command)
3569 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3570 if (hw->mac.type == I40E_MAC_X722) {
3571 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3572 cmd->command_flags |=
3573 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3574 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3575 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3576 cmd->command_flags |=
3577 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3578 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3579 }
3580 cmd->module_pointer = module_pointer;
3581 cmd->offset = cpu_to_le32(offset);
3582 cmd->length = cpu_to_le16(length);
3583
3584 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3585 if (length > I40E_AQ_LARGE_BUF)
3586 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3587
3588 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3589
3590i40e_aq_update_nvm_exit:
3591 return status;
3592}
3593
3594/**
3595 * i40e_aq_rearrange_nvm
3596 * @hw: pointer to the hw struct
3597 * @rearrange_nvm: defines direction of rearrangement
3598 * @cmd_details: pointer to command details structure or NULL
3599 *
3600 * Rearrange NVM structure, available only for transition FW
3601 **/
3602i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3603 u8 rearrange_nvm,
3604 struct i40e_asq_cmd_details *cmd_details)
3605{
3606 struct i40e_aqc_nvm_update *cmd;
3607 i40e_status status;
3608 struct i40e_aq_desc desc;
3609
3610 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3611
3612 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3613
3614 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3615 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3616
3617 if (!rearrange_nvm) {
3618 status = I40E_ERR_PARAM;
3619 goto i40e_aq_rearrange_nvm_exit;
3620 }
3621
3622 cmd->command_flags |= rearrange_nvm;
3623 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3624
3625i40e_aq_rearrange_nvm_exit:
3626 return status;
3627}
3628
3629/**
3630 * i40e_aq_get_lldp_mib
3631 * @hw: pointer to the hw struct
3632 * @bridge_type: type of bridge requested
3633 * @mib_type: Local, Remote or both Local and Remote MIBs
3634 * @buff: pointer to a user supplied buffer to store the MIB block
3635 * @buff_size: size of the buffer (in bytes)
3636 * @local_len : length of the returned Local LLDP MIB
3637 * @remote_len: length of the returned Remote LLDP MIB
3638 * @cmd_details: pointer to command details structure or NULL
3639 *
3640 * Requests the complete LLDP MIB (entire packet).
3641 **/
3642i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3643 u8 mib_type, void *buff, u16 buff_size,
3644 u16 *local_len, u16 *remote_len,
3645 struct i40e_asq_cmd_details *cmd_details)
3646{
3647 struct i40e_aq_desc desc;
3648 struct i40e_aqc_lldp_get_mib *cmd =
3649 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3650 struct i40e_aqc_lldp_get_mib *resp =
3651 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3652 i40e_status status;
3653
3654 if (buff_size == 0 || !buff)
3655 return I40E_ERR_PARAM;
3656
3657 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3658 /* Indirect Command */
3659 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3660
3661 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3662 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3663 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3664
3665 desc.datalen = cpu_to_le16(buff_size);
3666
3667 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3668 if (buff_size > I40E_AQ_LARGE_BUF)
3669 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3670
3671 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3672 if (!status) {
3673 if (local_len != NULL)
3674 *local_len = le16_to_cpu(resp->local_len);
3675 if (remote_len != NULL)
3676 *remote_len = le16_to_cpu(resp->remote_len);
3677 }
3678
3679 return status;
3680}
3681
3682/**
3683 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3684 * @hw: pointer to the hw struct
3685 * @mib_type: Local, Remote or both Local and Remote MIBs
3686 * @buff: pointer to a user supplied buffer to store the MIB block
3687 * @buff_size: size of the buffer (in bytes)
3688 * @cmd_details: pointer to command details structure or NULL
3689 *
3690 * Set the LLDP MIB.
3691 **/
3692enum i40e_status_code
3693i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3694 u8 mib_type, void *buff, u16 buff_size,
3695 struct i40e_asq_cmd_details *cmd_details)
3696{
3697 struct i40e_aqc_lldp_set_local_mib *cmd;
3698 enum i40e_status_code status;
3699 struct i40e_aq_desc desc;
3700
3701 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3702 if (buff_size == 0 || !buff)
3703 return I40E_ERR_PARAM;
3704
3705 i40e_fill_default_direct_cmd_desc(&desc,
3706 i40e_aqc_opc_lldp_set_local_mib);
3707 /* Indirect Command */
3708 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3709 if (buff_size > I40E_AQ_LARGE_BUF)
3710 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3711 desc.datalen = cpu_to_le16(buff_size);
3712
3713 cmd->type = mib_type;
3714 cmd->length = cpu_to_le16(buff_size);
3715 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3716 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3717
3718 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3719 return status;
3720}
3721
3722/**
3723 * i40e_aq_cfg_lldp_mib_change_event
3724 * @hw: pointer to the hw struct
3725 * @enable_update: Enable or Disable event posting
3726 * @cmd_details: pointer to command details structure or NULL
3727 *
3728 * Enable or Disable posting of an event on ARQ when LLDP MIB
3729 * associated with the interface changes
3730 **/
3731i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3732 bool enable_update,
3733 struct i40e_asq_cmd_details *cmd_details)
3734{
3735 struct i40e_aq_desc desc;
3736 struct i40e_aqc_lldp_update_mib *cmd =
3737 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3738 i40e_status status;
3739
3740 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3741
3742 if (!enable_update)
3743 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3744
3745 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3746
3747 return status;
3748}
3749
3750/**
3751 * i40e_aq_restore_lldp
3752 * @hw: pointer to the hw struct
3753 * @setting: pointer to factory setting variable or NULL
3754 * @restore: True if factory settings should be restored
3755 * @cmd_details: pointer to command details structure or NULL
3756 *
3757 * Restore LLDP Agent factory settings if @restore set to True. In other case
3758 * only returns factory setting in AQ response.
3759 **/
3760enum i40e_status_code
3761i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3762 struct i40e_asq_cmd_details *cmd_details)
3763{
3764 struct i40e_aq_desc desc;
3765 struct i40e_aqc_lldp_restore *cmd =
3766 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3767 i40e_status status;
3768
3769 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3770 i40e_debug(hw, I40E_DEBUG_ALL,
3771 "Restore LLDP not supported by current FW version.\n");
3772 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3773 }
3774
3775 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3776
3777 if (restore)
3778 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3779
3780 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3781
3782 if (setting)
3783 *setting = cmd->command & 1;
3784
3785 return status;
3786}
3787
3788/**
3789 * i40e_aq_stop_lldp
3790 * @hw: pointer to the hw struct
3791 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3792 * @persist: True if stop of LLDP should be persistent across power cycles
3793 * @cmd_details: pointer to command details structure or NULL
3794 *
3795 * Stop or Shutdown the embedded LLDP Agent
3796 **/
3797i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3798 bool persist,
3799 struct i40e_asq_cmd_details *cmd_details)
3800{
3801 struct i40e_aq_desc desc;
3802 struct i40e_aqc_lldp_stop *cmd =
3803 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3804 i40e_status status;
3805
3806 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3807
3808 if (shutdown_agent)
3809 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3810
3811 if (persist) {
3812 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3813 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3814 else
3815 i40e_debug(hw, I40E_DEBUG_ALL,
3816 "Persistent Stop LLDP not supported by current FW version.\n");
3817 }
3818
3819 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3820
3821 return status;
3822}
3823
3824/**
3825 * i40e_aq_start_lldp
3826 * @hw: pointer to the hw struct
3827 * @persist: True if start of LLDP should be persistent across power cycles
3828 * @cmd_details: pointer to command details structure or NULL
3829 *
3830 * Start the embedded LLDP Agent on all ports.
3831 **/
3832i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3833 struct i40e_asq_cmd_details *cmd_details)
3834{
3835 struct i40e_aq_desc desc;
3836 struct i40e_aqc_lldp_start *cmd =
3837 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3838 i40e_status status;
3839
3840 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3841
3842 cmd->command = I40E_AQ_LLDP_AGENT_START;
3843
3844 if (persist) {
3845 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3846 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3847 else
3848 i40e_debug(hw, I40E_DEBUG_ALL,
3849 "Persistent Start LLDP not supported by current FW version.\n");
3850 }
3851
3852 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3853
3854 return status;
3855}
3856
3857/**
3858 * i40e_aq_set_dcb_parameters
3859 * @hw: pointer to the hw struct
3860 * @cmd_details: pointer to command details structure or NULL
3861 * @dcb_enable: True if DCB configuration needs to be applied
3862 *
3863 **/
3864enum i40e_status_code
3865i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3866 struct i40e_asq_cmd_details *cmd_details)
3867{
3868 struct i40e_aq_desc desc;
3869 struct i40e_aqc_set_dcb_parameters *cmd =
3870 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3871 i40e_status status;
3872
3873 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3874 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3875
3876 i40e_fill_default_direct_cmd_desc(&desc,
3877 i40e_aqc_opc_set_dcb_parameters);
3878
3879 if (dcb_enable) {
3880 cmd->valid_flags = I40E_DCB_VALID;
3881 cmd->command = I40E_AQ_DCB_SET_AGENT;
3882 }
3883 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3884
3885 return status;
3886}
3887
3888/**
3889 * i40e_aq_get_cee_dcb_config
3890 * @hw: pointer to the hw struct
3891 * @buff: response buffer that stores CEE operational configuration
3892 * @buff_size: size of the buffer passed
3893 * @cmd_details: pointer to command details structure or NULL
3894 *
3895 * Get CEE DCBX mode operational configuration from firmware
3896 **/
3897i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3898 void *buff, u16 buff_size,
3899 struct i40e_asq_cmd_details *cmd_details)
3900{
3901 struct i40e_aq_desc desc;
3902 i40e_status status;
3903
3904 if (buff_size == 0 || !buff)
3905 return I40E_ERR_PARAM;
3906
3907 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3908
3909 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3910 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3911 cmd_details);
3912
3913 return status;
3914}
3915
3916/**
3917 * i40e_aq_add_udp_tunnel
3918 * @hw: pointer to the hw struct
3919 * @udp_port: the UDP port to add in Host byte order
3920 * @protocol_index: protocol index type
3921 * @filter_index: pointer to filter index
3922 * @cmd_details: pointer to command details structure or NULL
3923 *
3924 * Note: Firmware expects the udp_port value to be in Little Endian format,
3925 * and this function will call cpu_to_le16 to convert from Host byte order to
3926 * Little Endian order.
3927 **/
3928i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3929 u16 udp_port, u8 protocol_index,
3930 u8 *filter_index,
3931 struct i40e_asq_cmd_details *cmd_details)
3932{
3933 struct i40e_aq_desc desc;
3934 struct i40e_aqc_add_udp_tunnel *cmd =
3935 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3936 struct i40e_aqc_del_udp_tunnel_completion *resp =
3937 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3938 i40e_status status;
3939
3940 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3941
3942 cmd->udp_port = cpu_to_le16(udp_port);
3943 cmd->protocol_type = protocol_index;
3944
3945 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3946
3947 if (!status && filter_index)
3948 *filter_index = resp->index;
3949
3950 return status;
3951}
3952
3953/**
3954 * i40e_aq_del_udp_tunnel
3955 * @hw: pointer to the hw struct
3956 * @index: filter index
3957 * @cmd_details: pointer to command details structure or NULL
3958 **/
3959i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3960 struct i40e_asq_cmd_details *cmd_details)
3961{
3962 struct i40e_aq_desc desc;
3963 struct i40e_aqc_remove_udp_tunnel *cmd =
3964 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3965 i40e_status status;
3966
3967 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3968
3969 cmd->index = index;
3970
3971 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3972
3973 return status;
3974}
3975
3976/**
3977 * i40e_aq_delete_element - Delete switch element
3978 * @hw: pointer to the hw struct
3979 * @seid: the SEID to delete from the switch
3980 * @cmd_details: pointer to command details structure or NULL
3981 *
3982 * This deletes a switch element from the switch.
3983 **/
3984i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3985 struct i40e_asq_cmd_details *cmd_details)
3986{
3987 struct i40e_aq_desc desc;
3988 struct i40e_aqc_switch_seid *cmd =
3989 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3990 i40e_status status;
3991
3992 if (seid == 0)
3993 return I40E_ERR_PARAM;
3994
3995 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3996
3997 cmd->seid = cpu_to_le16(seid);
3998
3999 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
4000 cmd_details, true);
4001
4002 return status;
4003}
4004
4005/**
4006 * i40e_aq_dcb_updated - DCB Updated Command
4007 * @hw: pointer to the hw struct
4008 * @cmd_details: pointer to command details structure or NULL
4009 *
4010 * EMP will return when the shared RPB settings have been
4011 * recomputed and modified. The retval field in the descriptor
4012 * will be set to 0 when RPB is modified.
4013 **/
4014i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
4015 struct i40e_asq_cmd_details *cmd_details)
4016{
4017 struct i40e_aq_desc desc;
4018 i40e_status status;
4019
4020 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
4021
4022 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4023
4024 return status;
4025}
4026
4027/**
4028 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
4029 * @hw: pointer to the hw struct
4030 * @seid: seid for the physical port/switching component/vsi
4031 * @buff: Indirect buffer to hold data parameters and response
4032 * @buff_size: Indirect buffer size
4033 * @opcode: Tx scheduler AQ command opcode
4034 * @cmd_details: pointer to command details structure or NULL
4035 *
4036 * Generic command handler for Tx scheduler AQ commands
4037 **/
4038static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
4039 void *buff, u16 buff_size,
4040 enum i40e_admin_queue_opc opcode,
4041 struct i40e_asq_cmd_details *cmd_details)
4042{
4043 struct i40e_aq_desc desc;
4044 struct i40e_aqc_tx_sched_ind *cmd =
4045 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4046 i40e_status status;
4047 bool cmd_param_flag = false;
4048
4049 switch (opcode) {
4050 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
4051 case i40e_aqc_opc_configure_vsi_tc_bw:
4052 case i40e_aqc_opc_enable_switching_comp_ets:
4053 case i40e_aqc_opc_modify_switching_comp_ets:
4054 case i40e_aqc_opc_disable_switching_comp_ets:
4055 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
4056 case i40e_aqc_opc_configure_switching_comp_bw_config:
4057 cmd_param_flag = true;
4058 break;
4059 case i40e_aqc_opc_query_vsi_bw_config:
4060 case i40e_aqc_opc_query_vsi_ets_sla_config:
4061 case i40e_aqc_opc_query_switching_comp_ets_config:
4062 case i40e_aqc_opc_query_port_ets_config:
4063 case i40e_aqc_opc_query_switching_comp_bw_config:
4064 cmd_param_flag = false;
4065 break;
4066 default:
4067 return I40E_ERR_PARAM;
4068 }
4069
4070 i40e_fill_default_direct_cmd_desc(&desc, opcode);
4071
4072 /* Indirect command */
4073 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4074 if (cmd_param_flag)
4075 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4076 if (buff_size > I40E_AQ_LARGE_BUF)
4077 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4078
4079 desc.datalen = cpu_to_le16(buff_size);
4080
4081 cmd->vsi_seid = cpu_to_le16(seid);
4082
4083 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4084
4085 return status;
4086}
4087
4088/**
4089 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
4090 * @hw: pointer to the hw struct
4091 * @seid: VSI seid
4092 * @credit: BW limit credits (0 = disabled)
4093 * @max_credit: Max BW limit credits
4094 * @cmd_details: pointer to command details structure or NULL
4095 **/
4096i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
4097 u16 seid, u16 credit, u8 max_credit,
4098 struct i40e_asq_cmd_details *cmd_details)
4099{
4100 struct i40e_aq_desc desc;
4101 struct i40e_aqc_configure_vsi_bw_limit *cmd =
4102 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
4103 i40e_status status;
4104
4105 i40e_fill_default_direct_cmd_desc(&desc,
4106 i40e_aqc_opc_configure_vsi_bw_limit);
4107
4108 cmd->vsi_seid = cpu_to_le16(seid);
4109 cmd->credit = cpu_to_le16(credit);
4110 cmd->max_credit = max_credit;
4111
4112 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4113
4114 return status;
4115}
4116
4117/**
4118 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
4119 * @hw: pointer to the hw struct
4120 * @seid: VSI seid
4121 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
4122 * @cmd_details: pointer to command details structure or NULL
4123 **/
4124i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
4125 u16 seid,
4126 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
4127 struct i40e_asq_cmd_details *cmd_details)
4128{
4129 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4130 i40e_aqc_opc_configure_vsi_tc_bw,
4131 cmd_details);
4132}
4133
4134/**
4135 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4136 * @hw: pointer to the hw struct
4137 * @seid: seid of the switching component connected to Physical Port
4138 * @ets_data: Buffer holding ETS parameters
4139 * @opcode: Tx scheduler AQ command opcode
4140 * @cmd_details: pointer to command details structure or NULL
4141 **/
4142i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4143 u16 seid,
4144 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4145 enum i40e_admin_queue_opc opcode,
4146 struct i40e_asq_cmd_details *cmd_details)
4147{
4148 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4149 sizeof(*ets_data), opcode, cmd_details);
4150}
4151
4152/**
4153 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4154 * @hw: pointer to the hw struct
4155 * @seid: seid of the switching component
4156 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4157 * @cmd_details: pointer to command details structure or NULL
4158 **/
4159i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4160 u16 seid,
4161 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4162 struct i40e_asq_cmd_details *cmd_details)
4163{
4164 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4165 i40e_aqc_opc_configure_switching_comp_bw_config,
4166 cmd_details);
4167}
4168
4169/**
4170 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4171 * @hw: pointer to the hw struct
4172 * @seid: seid of the VSI
4173 * @bw_data: Buffer to hold VSI BW configuration
4174 * @cmd_details: pointer to command details structure or NULL
4175 **/
4176i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4177 u16 seid,
4178 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4179 struct i40e_asq_cmd_details *cmd_details)
4180{
4181 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4182 i40e_aqc_opc_query_vsi_bw_config,
4183 cmd_details);
4184}
4185
4186/**
4187 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4188 * @hw: pointer to the hw struct
4189 * @seid: seid of the VSI
4190 * @bw_data: Buffer to hold VSI BW configuration per TC
4191 * @cmd_details: pointer to command details structure or NULL
4192 **/
4193i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4194 u16 seid,
4195 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4196 struct i40e_asq_cmd_details *cmd_details)
4197{
4198 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4199 i40e_aqc_opc_query_vsi_ets_sla_config,
4200 cmd_details);
4201}
4202
4203/**
4204 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4205 * @hw: pointer to the hw struct
4206 * @seid: seid of the switching component
4207 * @bw_data: Buffer to hold switching component's per TC BW config
4208 * @cmd_details: pointer to command details structure or NULL
4209 **/
4210i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4211 u16 seid,
4212 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4213 struct i40e_asq_cmd_details *cmd_details)
4214{
4215 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4216 i40e_aqc_opc_query_switching_comp_ets_config,
4217 cmd_details);
4218}
4219
4220/**
4221 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4222 * @hw: pointer to the hw struct
4223 * @seid: seid of the VSI or switching component connected to Physical Port
4224 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4225 * @cmd_details: pointer to command details structure or NULL
4226 **/
4227i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4228 u16 seid,
4229 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4230 struct i40e_asq_cmd_details *cmd_details)
4231{
4232 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4233 i40e_aqc_opc_query_port_ets_config,
4234 cmd_details);
4235}
4236
4237/**
4238 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4239 * @hw: pointer to the hw struct
4240 * @seid: seid of the switching component
4241 * @bw_data: Buffer to hold switching component's BW configuration
4242 * @cmd_details: pointer to command details structure or NULL
4243 **/
4244i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4245 u16 seid,
4246 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4247 struct i40e_asq_cmd_details *cmd_details)
4248{
4249 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4250 i40e_aqc_opc_query_switching_comp_bw_config,
4251 cmd_details);
4252}
4253
4254/**
4255 * i40e_validate_filter_settings
4256 * @hw: pointer to the hardware structure
4257 * @settings: Filter control settings
4258 *
4259 * Check and validate the filter control settings passed.
4260 * The function checks for the valid filter/context sizes being
4261 * passed for FCoE and PE.
4262 *
4263 * Returns 0 if the values passed are valid and within
4264 * range else returns an error.
4265 **/
4266static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4267 struct i40e_filter_control_settings *settings)
4268{
4269 u32 fcoe_cntx_size, fcoe_filt_size;
4270 u32 fcoe_fmax;
4271 u32 val;
4272
4273 /* Validate FCoE settings passed */
4274 switch (settings->fcoe_filt_num) {
4275 case I40E_HASH_FILTER_SIZE_1K:
4276 case I40E_HASH_FILTER_SIZE_2K:
4277 case I40E_HASH_FILTER_SIZE_4K:
4278 case I40E_HASH_FILTER_SIZE_8K:
4279 case I40E_HASH_FILTER_SIZE_16K:
4280 case I40E_HASH_FILTER_SIZE_32K:
4281 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4282 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4283 break;
4284 default:
4285 return I40E_ERR_PARAM;
4286 }
4287
4288 switch (settings->fcoe_cntx_num) {
4289 case I40E_DMA_CNTX_SIZE_512:
4290 case I40E_DMA_CNTX_SIZE_1K:
4291 case I40E_DMA_CNTX_SIZE_2K:
4292 case I40E_DMA_CNTX_SIZE_4K:
4293 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4294 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4295 break;
4296 default:
4297 return I40E_ERR_PARAM;
4298 }
4299
4300 /* Validate PE settings passed */
4301 switch (settings->pe_filt_num) {
4302 case I40E_HASH_FILTER_SIZE_1K:
4303 case I40E_HASH_FILTER_SIZE_2K:
4304 case I40E_HASH_FILTER_SIZE_4K:
4305 case I40E_HASH_FILTER_SIZE_8K:
4306 case I40E_HASH_FILTER_SIZE_16K:
4307 case I40E_HASH_FILTER_SIZE_32K:
4308 case I40E_HASH_FILTER_SIZE_64K:
4309 case I40E_HASH_FILTER_SIZE_128K:
4310 case I40E_HASH_FILTER_SIZE_256K:
4311 case I40E_HASH_FILTER_SIZE_512K:
4312 case I40E_HASH_FILTER_SIZE_1M:
4313 break;
4314 default:
4315 return I40E_ERR_PARAM;
4316 }
4317
4318 switch (settings->pe_cntx_num) {
4319 case I40E_DMA_CNTX_SIZE_512:
4320 case I40E_DMA_CNTX_SIZE_1K:
4321 case I40E_DMA_CNTX_SIZE_2K:
4322 case I40E_DMA_CNTX_SIZE_4K:
4323 case I40E_DMA_CNTX_SIZE_8K:
4324 case I40E_DMA_CNTX_SIZE_16K:
4325 case I40E_DMA_CNTX_SIZE_32K:
4326 case I40E_DMA_CNTX_SIZE_64K:
4327 case I40E_DMA_CNTX_SIZE_128K:
4328 case I40E_DMA_CNTX_SIZE_256K:
4329 break;
4330 default:
4331 return I40E_ERR_PARAM;
4332 }
4333
4334 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4335 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4336 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4337 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4338 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4339 return I40E_ERR_INVALID_SIZE;
4340
4341 return 0;
4342}
4343
4344/**
4345 * i40e_set_filter_control
4346 * @hw: pointer to the hardware structure
4347 * @settings: Filter control settings
4348 *
4349 * Set the Queue Filters for PE/FCoE and enable filters required
4350 * for a single PF. It is expected that these settings are programmed
4351 * at the driver initialization time.
4352 **/
4353i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4354 struct i40e_filter_control_settings *settings)
4355{
4356 i40e_status ret = 0;
4357 u32 hash_lut_size = 0;
4358 u32 val;
4359
4360 if (!settings)
4361 return I40E_ERR_PARAM;
4362
4363 /* Validate the input settings */
4364 ret = i40e_validate_filter_settings(hw, settings);
4365 if (ret)
4366 return ret;
4367
4368 /* Read the PF Queue Filter control register */
4369 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4370
4371 /* Program required PE hash buckets for the PF */
4372 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4373 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4374 I40E_PFQF_CTL_0_PEHSIZE_MASK;
4375 /* Program required PE contexts for the PF */
4376 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4377 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4378 I40E_PFQF_CTL_0_PEDSIZE_MASK;
4379
4380 /* Program required FCoE hash buckets for the PF */
4381 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4382 val |= ((u32)settings->fcoe_filt_num <<
4383 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4384 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4385 /* Program required FCoE DDP contexts for the PF */
4386 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4387 val |= ((u32)settings->fcoe_cntx_num <<
4388 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4389 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4390
4391 /* Program Hash LUT size for the PF */
4392 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4393 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4394 hash_lut_size = 1;
4395 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4396 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4397
4398 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4399 if (settings->enable_fdir)
4400 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4401 if (settings->enable_ethtype)
4402 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4403 if (settings->enable_macvlan)
4404 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4405
4406 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4407
4408 return 0;
4409}
4410
4411/**
4412 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4413 * @hw: pointer to the hw struct
4414 * @mac_addr: MAC address to use in the filter
4415 * @ethtype: Ethertype to use in the filter
4416 * @flags: Flags that needs to be applied to the filter
4417 * @vsi_seid: seid of the control VSI
4418 * @queue: VSI queue number to send the packet to
4419 * @is_add: Add control packet filter if True else remove
4420 * @stats: Structure to hold information on control filter counts
4421 * @cmd_details: pointer to command details structure or NULL
4422 *
4423 * This command will Add or Remove control packet filter for a control VSI.
4424 * In return it will update the total number of perfect filter count in
4425 * the stats member.
4426 **/
4427i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4428 u8 *mac_addr, u16 ethtype, u16 flags,
4429 u16 vsi_seid, u16 queue, bool is_add,
4430 struct i40e_control_filter_stats *stats,
4431 struct i40e_asq_cmd_details *cmd_details)
4432{
4433 struct i40e_aq_desc desc;
4434 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4435 (struct i40e_aqc_add_remove_control_packet_filter *)
4436 &desc.params.raw;
4437 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4438 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4439 &desc.params.raw;
4440 i40e_status status;
4441
4442 if (vsi_seid == 0)
4443 return I40E_ERR_PARAM;
4444
4445 if (is_add) {
4446 i40e_fill_default_direct_cmd_desc(&desc,
4447 i40e_aqc_opc_add_control_packet_filter);
4448 cmd->queue = cpu_to_le16(queue);
4449 } else {
4450 i40e_fill_default_direct_cmd_desc(&desc,
4451 i40e_aqc_opc_remove_control_packet_filter);
4452 }
4453
4454 if (mac_addr)
4455 ether_addr_copy(cmd->mac, mac_addr);
4456
4457 cmd->etype = cpu_to_le16(ethtype);
4458 cmd->flags = cpu_to_le16(flags);
4459 cmd->seid = cpu_to_le16(vsi_seid);
4460
4461 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4462
4463 if (!status && stats) {
4464 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4465 stats->etype_used = le16_to_cpu(resp->etype_used);
4466 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4467 stats->etype_free = le16_to_cpu(resp->etype_free);
4468 }
4469
4470 return status;
4471}
4472
4473/**
4474 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4475 * @hw: pointer to the hw struct
4476 * @seid: VSI seid to add ethertype filter from
4477 **/
4478void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4479 u16 seid)
4480{
4481#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4482 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4483 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4484 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4485 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4486 i40e_status status;
4487
4488 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4489 seid, 0, true, NULL,
4490 NULL);
4491 if (status)
4492 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4493}
4494
4495/**
4496 * i40e_aq_alternate_read
4497 * @hw: pointer to the hardware structure
4498 * @reg_addr0: address of first dword to be read
4499 * @reg_val0: pointer for data read from 'reg_addr0'
4500 * @reg_addr1: address of second dword to be read
4501 * @reg_val1: pointer for data read from 'reg_addr1'
4502 *
4503 * Read one or two dwords from alternate structure. Fields are indicated
4504 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4505 * is not passed then only register at 'reg_addr0' is read.
4506 *
4507 **/
4508static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4509 u32 reg_addr0, u32 *reg_val0,
4510 u32 reg_addr1, u32 *reg_val1)
4511{
4512 struct i40e_aq_desc desc;
4513 struct i40e_aqc_alternate_write *cmd_resp =
4514 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4515 i40e_status status;
4516
4517 if (!reg_val0)
4518 return I40E_ERR_PARAM;
4519
4520 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4521 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4522 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4523
4524 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4525
4526 if (!status) {
4527 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4528
4529 if (reg_val1)
4530 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4531 }
4532
4533 return status;
4534}
4535
4536/**
4537 * i40e_aq_suspend_port_tx
4538 * @hw: pointer to the hardware structure
4539 * @seid: port seid
4540 * @cmd_details: pointer to command details structure or NULL
4541 *
4542 * Suspend port's Tx traffic
4543 **/
4544i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4545 struct i40e_asq_cmd_details *cmd_details)
4546{
4547 struct i40e_aqc_tx_sched_ind *cmd;
4548 struct i40e_aq_desc desc;
4549 i40e_status status;
4550
4551 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4552 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4553 cmd->vsi_seid = cpu_to_le16(seid);
4554 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4555
4556 return status;
4557}
4558
4559/**
4560 * i40e_aq_resume_port_tx
4561 * @hw: pointer to the hardware structure
4562 * @cmd_details: pointer to command details structure or NULL
4563 *
4564 * Resume port's Tx traffic
4565 **/
4566i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4567 struct i40e_asq_cmd_details *cmd_details)
4568{
4569 struct i40e_aq_desc desc;
4570 i40e_status status;
4571
4572 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4573
4574 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4575
4576 return status;
4577}
4578
4579/**
4580 * i40e_set_pci_config_data - store PCI bus info
4581 * @hw: pointer to hardware structure
4582 * @link_status: the link status word from PCI config space
4583 *
4584 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4585 **/
4586void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4587{
4588 hw->bus.type = i40e_bus_type_pci_express;
4589
4590 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4591 case PCI_EXP_LNKSTA_NLW_X1:
4592 hw->bus.width = i40e_bus_width_pcie_x1;
4593 break;
4594 case PCI_EXP_LNKSTA_NLW_X2:
4595 hw->bus.width = i40e_bus_width_pcie_x2;
4596 break;
4597 case PCI_EXP_LNKSTA_NLW_X4:
4598 hw->bus.width = i40e_bus_width_pcie_x4;
4599 break;
4600 case PCI_EXP_LNKSTA_NLW_X8:
4601 hw->bus.width = i40e_bus_width_pcie_x8;
4602 break;
4603 default:
4604 hw->bus.width = i40e_bus_width_unknown;
4605 break;
4606 }
4607
4608 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4609 case PCI_EXP_LNKSTA_CLS_2_5GB:
4610 hw->bus.speed = i40e_bus_speed_2500;
4611 break;
4612 case PCI_EXP_LNKSTA_CLS_5_0GB:
4613 hw->bus.speed = i40e_bus_speed_5000;
4614 break;
4615 case PCI_EXP_LNKSTA_CLS_8_0GB:
4616 hw->bus.speed = i40e_bus_speed_8000;
4617 break;
4618 default:
4619 hw->bus.speed = i40e_bus_speed_unknown;
4620 break;
4621 }
4622}
4623
4624/**
4625 * i40e_aq_debug_dump
4626 * @hw: pointer to the hardware structure
4627 * @cluster_id: specific cluster to dump
4628 * @table_id: table id within cluster
4629 * @start_index: index of line in the block to read
4630 * @buff_size: dump buffer size
4631 * @buff: dump buffer
4632 * @ret_buff_size: actual buffer size returned
4633 * @ret_next_table: next block to read
4634 * @ret_next_index: next index to read
4635 * @cmd_details: pointer to command details structure or NULL
4636 *
4637 * Dump internal FW/HW data for debug purposes.
4638 *
4639 **/
4640i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4641 u8 table_id, u32 start_index, u16 buff_size,
4642 void *buff, u16 *ret_buff_size,
4643 u8 *ret_next_table, u32 *ret_next_index,
4644 struct i40e_asq_cmd_details *cmd_details)
4645{
4646 struct i40e_aq_desc desc;
4647 struct i40e_aqc_debug_dump_internals *cmd =
4648 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4649 struct i40e_aqc_debug_dump_internals *resp =
4650 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4651 i40e_status status;
4652
4653 if (buff_size == 0 || !buff)
4654 return I40E_ERR_PARAM;
4655
4656 i40e_fill_default_direct_cmd_desc(&desc,
4657 i40e_aqc_opc_debug_dump_internals);
4658 /* Indirect Command */
4659 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4660 if (buff_size > I40E_AQ_LARGE_BUF)
4661 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4662
4663 cmd->cluster_id = cluster_id;
4664 cmd->table_id = table_id;
4665 cmd->idx = cpu_to_le32(start_index);
4666
4667 desc.datalen = cpu_to_le16(buff_size);
4668
4669 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4670 if (!status) {
4671 if (ret_buff_size)
4672 *ret_buff_size = le16_to_cpu(desc.datalen);
4673 if (ret_next_table)
4674 *ret_next_table = resp->table_id;
4675 if (ret_next_index)
4676 *ret_next_index = le32_to_cpu(resp->idx);
4677 }
4678
4679 return status;
4680}
4681
4682/**
4683 * i40e_read_bw_from_alt_ram
4684 * @hw: pointer to the hardware structure
4685 * @max_bw: pointer for max_bw read
4686 * @min_bw: pointer for min_bw read
4687 * @min_valid: pointer for bool that is true if min_bw is a valid value
4688 * @max_valid: pointer for bool that is true if max_bw is a valid value
4689 *
4690 * Read bw from the alternate ram for the given pf
4691 **/
4692i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4693 u32 *max_bw, u32 *min_bw,
4694 bool *min_valid, bool *max_valid)
4695{
4696 i40e_status status;
4697 u32 max_bw_addr, min_bw_addr;
4698
4699 /* Calculate the address of the min/max bw registers */
4700 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4701 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4702 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4703 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4704 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4705 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4706
4707 /* Read the bandwidths from alt ram */
4708 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4709 min_bw_addr, min_bw);
4710
4711 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4712 *min_valid = true;
4713 else
4714 *min_valid = false;
4715
4716 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4717 *max_valid = true;
4718 else
4719 *max_valid = false;
4720
4721 return status;
4722}
4723
4724/**
4725 * i40e_aq_configure_partition_bw
4726 * @hw: pointer to the hardware structure
4727 * @bw_data: Buffer holding valid pfs and bw limits
4728 * @cmd_details: pointer to command details
4729 *
4730 * Configure partitions guaranteed/max bw
4731 **/
4732i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4733 struct i40e_aqc_configure_partition_bw_data *bw_data,
4734 struct i40e_asq_cmd_details *cmd_details)
4735{
4736 i40e_status status;
4737 struct i40e_aq_desc desc;
4738 u16 bwd_size = sizeof(*bw_data);
4739
4740 i40e_fill_default_direct_cmd_desc(&desc,
4741 i40e_aqc_opc_configure_partition_bw);
4742
4743 /* Indirect command */
4744 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4745 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4746
4747 if (bwd_size > I40E_AQ_LARGE_BUF)
4748 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4749
4750 desc.datalen = cpu_to_le16(bwd_size);
4751
4752 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4753 cmd_details);
4754
4755 return status;
4756}
4757
4758/**
4759 * i40e_read_phy_register_clause22
4760 * @hw: pointer to the HW structure
4761 * @reg: register address in the page
4762 * @phy_addr: PHY address on MDIO interface
4763 * @value: PHY register value
4764 *
4765 * Reads specified PHY register value
4766 **/
4767i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4768 u16 reg, u8 phy_addr, u16 *value)
4769{
4770 i40e_status status = I40E_ERR_TIMEOUT;
4771 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4772 u32 command = 0;
4773 u16 retry = 1000;
4774
4775 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4776 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4777 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4778 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4779 (I40E_GLGEN_MSCA_MDICMD_MASK);
4780 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4781 do {
4782 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4783 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4784 status = 0;
4785 break;
4786 }
4787 udelay(10);
4788 retry--;
4789 } while (retry);
4790
4791 if (status) {
4792 i40e_debug(hw, I40E_DEBUG_PHY,
4793 "PHY: Can't write command to external PHY.\n");
4794 } else {
4795 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4796 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4797 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4798 }
4799
4800 return status;
4801}
4802
4803/**
4804 * i40e_write_phy_register_clause22
4805 * @hw: pointer to the HW structure
4806 * @reg: register address in the page
4807 * @phy_addr: PHY address on MDIO interface
4808 * @value: PHY register value
4809 *
4810 * Writes specified PHY register value
4811 **/
4812i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4813 u16 reg, u8 phy_addr, u16 value)
4814{
4815 i40e_status status = I40E_ERR_TIMEOUT;
4816 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4817 u32 command = 0;
4818 u16 retry = 1000;
4819
4820 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4821 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4822
4823 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4824 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4825 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4826 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4827 (I40E_GLGEN_MSCA_MDICMD_MASK);
4828
4829 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4830 do {
4831 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4832 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4833 status = 0;
4834 break;
4835 }
4836 udelay(10);
4837 retry--;
4838 } while (retry);
4839
4840 return status;
4841}
4842
4843/**
4844 * i40e_read_phy_register_clause45
4845 * @hw: pointer to the HW structure
4846 * @page: registers page number
4847 * @reg: register address in the page
4848 * @phy_addr: PHY address on MDIO interface
4849 * @value: PHY register value
4850 *
4851 * Reads specified PHY register value
4852 **/
4853i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4854 u8 page, u16 reg, u8 phy_addr, u16 *value)
4855{
4856 i40e_status status = I40E_ERR_TIMEOUT;
4857 u32 command = 0;
4858 u16 retry = 1000;
4859 u8 port_num = hw->func_caps.mdio_port_num;
4860
4861 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4862 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4863 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4864 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4865 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4866 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4867 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4868 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4869 do {
4870 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4871 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4872 status = 0;
4873 break;
4874 }
4875 usleep_range(10, 20);
4876 retry--;
4877 } while (retry);
4878
4879 if (status) {
4880 i40e_debug(hw, I40E_DEBUG_PHY,
4881 "PHY: Can't write command to external PHY.\n");
4882 goto phy_read_end;
4883 }
4884
4885 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4886 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4887 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4888 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4889 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4890 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4891 status = I40E_ERR_TIMEOUT;
4892 retry = 1000;
4893 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4894 do {
4895 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4896 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4897 status = 0;
4898 break;
4899 }
4900 usleep_range(10, 20);
4901 retry--;
4902 } while (retry);
4903
4904 if (!status) {
4905 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4906 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4907 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4908 } else {
4909 i40e_debug(hw, I40E_DEBUG_PHY,
4910 "PHY: Can't read register value from external PHY.\n");
4911 }
4912
4913phy_read_end:
4914 return status;
4915}
4916
4917/**
4918 * i40e_write_phy_register_clause45
4919 * @hw: pointer to the HW structure
4920 * @page: registers page number
4921 * @reg: register address in the page
4922 * @phy_addr: PHY address on MDIO interface
4923 * @value: PHY register value
4924 *
4925 * Writes value to specified PHY register
4926 **/
4927i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4928 u8 page, u16 reg, u8 phy_addr, u16 value)
4929{
4930 i40e_status status = I40E_ERR_TIMEOUT;
4931 u32 command = 0;
4932 u16 retry = 1000;
4933 u8 port_num = hw->func_caps.mdio_port_num;
4934
4935 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4936 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4937 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4938 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4939 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4940 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4941 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4942 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4943 do {
4944 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4945 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4946 status = 0;
4947 break;
4948 }
4949 usleep_range(10, 20);
4950 retry--;
4951 } while (retry);
4952 if (status) {
4953 i40e_debug(hw, I40E_DEBUG_PHY,
4954 "PHY: Can't write command to external PHY.\n");
4955 goto phy_write_end;
4956 }
4957
4958 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4959 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4960
4961 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4962 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4963 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4964 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4965 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4966 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4967 status = I40E_ERR_TIMEOUT;
4968 retry = 1000;
4969 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4970 do {
4971 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4972 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4973 status = 0;
4974 break;
4975 }
4976 usleep_range(10, 20);
4977 retry--;
4978 } while (retry);
4979
4980phy_write_end:
4981 return status;
4982}
4983
4984/**
4985 * i40e_write_phy_register
4986 * @hw: pointer to the HW structure
4987 * @page: registers page number
4988 * @reg: register address in the page
4989 * @phy_addr: PHY address on MDIO interface
4990 * @value: PHY register value
4991 *
4992 * Writes value to specified PHY register
4993 **/
4994i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4995 u8 page, u16 reg, u8 phy_addr, u16 value)
4996{
4997 i40e_status status;
4998
4999 switch (hw->device_id) {
5000 case I40E_DEV_ID_1G_BASE_T_X722:
5001 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
5002 value);
5003 break;
5004 case I40E_DEV_ID_1G_BASE_T_BC:
5005 case I40E_DEV_ID_5G_BASE_T_BC:
5006 case I40E_DEV_ID_10G_BASE_T:
5007 case I40E_DEV_ID_10G_BASE_T4:
5008 case I40E_DEV_ID_10G_BASE_T_BC:
5009 case I40E_DEV_ID_10G_BASE_T_X722:
5010 case I40E_DEV_ID_25G_B:
5011 case I40E_DEV_ID_25G_SFP28:
5012 status = i40e_write_phy_register_clause45(hw, page, reg,
5013 phy_addr, value);
5014 break;
5015 default:
5016 status = I40E_ERR_UNKNOWN_PHY;
5017 break;
5018 }
5019
5020 return status;
5021}
5022
5023/**
5024 * i40e_read_phy_register
5025 * @hw: pointer to the HW structure
5026 * @page: registers page number
5027 * @reg: register address in the page
5028 * @phy_addr: PHY address on MDIO interface
5029 * @value: PHY register value
5030 *
5031 * Reads specified PHY register value
5032 **/
5033i40e_status i40e_read_phy_register(struct i40e_hw *hw,
5034 u8 page, u16 reg, u8 phy_addr, u16 *value)
5035{
5036 i40e_status status;
5037
5038 switch (hw->device_id) {
5039 case I40E_DEV_ID_1G_BASE_T_X722:
5040 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
5041 value);
5042 break;
5043 case I40E_DEV_ID_1G_BASE_T_BC:
5044 case I40E_DEV_ID_5G_BASE_T_BC:
5045 case I40E_DEV_ID_10G_BASE_T:
5046 case I40E_DEV_ID_10G_BASE_T4:
5047 case I40E_DEV_ID_10G_BASE_T_BC:
5048 case I40E_DEV_ID_10G_BASE_T_X722:
5049 case I40E_DEV_ID_25G_B:
5050 case I40E_DEV_ID_25G_SFP28:
5051 status = i40e_read_phy_register_clause45(hw, page, reg,
5052 phy_addr, value);
5053 break;
5054 default:
5055 status = I40E_ERR_UNKNOWN_PHY;
5056 break;
5057 }
5058
5059 return status;
5060}
5061
5062/**
5063 * i40e_get_phy_address
5064 * @hw: pointer to the HW structure
5065 * @dev_num: PHY port num that address we want
5066 *
5067 * Gets PHY address for current port
5068 **/
5069u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
5070{
5071 u8 port_num = hw->func_caps.mdio_port_num;
5072 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
5073
5074 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
5075}
5076
5077/**
5078 * i40e_blink_phy_link_led
5079 * @hw: pointer to the HW structure
5080 * @time: time how long led will blinks in secs
5081 * @interval: gap between LED on and off in msecs
5082 *
5083 * Blinks PHY link LED
5084 **/
5085i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
5086 u32 time, u32 interval)
5087{
5088 i40e_status status = 0;
5089 u32 i;
5090 u16 led_ctl;
5091 u16 gpio_led_port;
5092 u16 led_reg;
5093 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
5094 u8 phy_addr = 0;
5095 u8 port_num;
5096
5097 i = rd32(hw, I40E_PFGEN_PORTNUM);
5098 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5099 phy_addr = i40e_get_phy_address(hw, port_num);
5100
5101 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5102 led_addr++) {
5103 status = i40e_read_phy_register_clause45(hw,
5104 I40E_PHY_COM_REG_PAGE,
5105 led_addr, phy_addr,
5106 &led_reg);
5107 if (status)
5108 goto phy_blinking_end;
5109 led_ctl = led_reg;
5110 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5111 led_reg = 0;
5112 status = i40e_write_phy_register_clause45(hw,
5113 I40E_PHY_COM_REG_PAGE,
5114 led_addr, phy_addr,
5115 led_reg);
5116 if (status)
5117 goto phy_blinking_end;
5118 break;
5119 }
5120 }
5121
5122 if (time > 0 && interval > 0) {
5123 for (i = 0; i < time * 1000; i += interval) {
5124 status = i40e_read_phy_register_clause45(hw,
5125 I40E_PHY_COM_REG_PAGE,
5126 led_addr, phy_addr, &led_reg);
5127 if (status)
5128 goto restore_config;
5129 if (led_reg & I40E_PHY_LED_MANUAL_ON)
5130 led_reg = 0;
5131 else
5132 led_reg = I40E_PHY_LED_MANUAL_ON;
5133 status = i40e_write_phy_register_clause45(hw,
5134 I40E_PHY_COM_REG_PAGE,
5135 led_addr, phy_addr, led_reg);
5136 if (status)
5137 goto restore_config;
5138 msleep(interval);
5139 }
5140 }
5141
5142restore_config:
5143 status = i40e_write_phy_register_clause45(hw,
5144 I40E_PHY_COM_REG_PAGE,
5145 led_addr, phy_addr, led_ctl);
5146
5147phy_blinking_end:
5148 return status;
5149}
5150
5151/**
5152 * i40e_led_get_reg - read LED register
5153 * @hw: pointer to the HW structure
5154 * @led_addr: LED register address
5155 * @reg_val: read register value
5156 **/
5157static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5158 u32 *reg_val)
5159{
5160 enum i40e_status_code status;
5161 u8 phy_addr = 0;
5162 u8 port_num;
5163 u32 i;
5164
5165 *reg_val = 0;
5166 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5167 status =
5168 i40e_aq_get_phy_register(hw,
5169 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5170 I40E_PHY_COM_REG_PAGE, true,
5171 I40E_PHY_LED_PROV_REG_1,
5172 reg_val, NULL);
5173 } else {
5174 i = rd32(hw, I40E_PFGEN_PORTNUM);
5175 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5176 phy_addr = i40e_get_phy_address(hw, port_num);
5177 status = i40e_read_phy_register_clause45(hw,
5178 I40E_PHY_COM_REG_PAGE,
5179 led_addr, phy_addr,
5180 (u16 *)reg_val);
5181 }
5182 return status;
5183}
5184
5185/**
5186 * i40e_led_set_reg - write LED register
5187 * @hw: pointer to the HW structure
5188 * @led_addr: LED register address
5189 * @reg_val: register value to write
5190 **/
5191static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5192 u32 reg_val)
5193{
5194 enum i40e_status_code status;
5195 u8 phy_addr = 0;
5196 u8 port_num;
5197 u32 i;
5198
5199 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5200 status =
5201 i40e_aq_set_phy_register(hw,
5202 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5203 I40E_PHY_COM_REG_PAGE, true,
5204 I40E_PHY_LED_PROV_REG_1,
5205 reg_val, NULL);
5206 } else {
5207 i = rd32(hw, I40E_PFGEN_PORTNUM);
5208 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5209 phy_addr = i40e_get_phy_address(hw, port_num);
5210 status = i40e_write_phy_register_clause45(hw,
5211 I40E_PHY_COM_REG_PAGE,
5212 led_addr, phy_addr,
5213 (u16)reg_val);
5214 }
5215
5216 return status;
5217}
5218
5219/**
5220 * i40e_led_get_phy - return current on/off mode
5221 * @hw: pointer to the hw struct
5222 * @led_addr: address of led register to use
5223 * @val: original value of register to use
5224 *
5225 **/
5226i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5227 u16 *val)
5228{
5229 i40e_status status = 0;
5230 u16 gpio_led_port;
5231 u8 phy_addr = 0;
5232 u16 reg_val;
5233 u16 temp_addr;
5234 u8 port_num;
5235 u32 i;
5236 u32 reg_val_aq;
5237
5238 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5239 status =
5240 i40e_aq_get_phy_register(hw,
5241 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5242 I40E_PHY_COM_REG_PAGE, true,
5243 I40E_PHY_LED_PROV_REG_1,
5244 ®_val_aq, NULL);
5245 if (status == I40E_SUCCESS)
5246 *val = (u16)reg_val_aq;
5247 return status;
5248 }
5249 temp_addr = I40E_PHY_LED_PROV_REG_1;
5250 i = rd32(hw, I40E_PFGEN_PORTNUM);
5251 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5252 phy_addr = i40e_get_phy_address(hw, port_num);
5253
5254 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5255 temp_addr++) {
5256 status = i40e_read_phy_register_clause45(hw,
5257 I40E_PHY_COM_REG_PAGE,
5258 temp_addr, phy_addr,
5259 ®_val);
5260 if (status)
5261 return status;
5262 *val = reg_val;
5263 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5264 *led_addr = temp_addr;
5265 break;
5266 }
5267 }
5268 return status;
5269}
5270
5271/**
5272 * i40e_led_set_phy
5273 * @hw: pointer to the HW structure
5274 * @on: true or false
5275 * @led_addr: address of led register to use
5276 * @mode: original val plus bit for set or ignore
5277 *
5278 * Set led's on or off when controlled by the PHY
5279 *
5280 **/
5281i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5282 u16 led_addr, u32 mode)
5283{
5284 i40e_status status = 0;
5285 u32 led_ctl = 0;
5286 u32 led_reg = 0;
5287
5288 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5289 if (status)
5290 return status;
5291 led_ctl = led_reg;
5292 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5293 led_reg = 0;
5294 status = i40e_led_set_reg(hw, led_addr, led_reg);
5295 if (status)
5296 return status;
5297 }
5298 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5299 if (status)
5300 goto restore_config;
5301 if (on)
5302 led_reg = I40E_PHY_LED_MANUAL_ON;
5303 else
5304 led_reg = 0;
5305
5306 status = i40e_led_set_reg(hw, led_addr, led_reg);
5307 if (status)
5308 goto restore_config;
5309 if (mode & I40E_PHY_LED_MODE_ORIG) {
5310 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5311 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5312 }
5313 return status;
5314
5315restore_config:
5316 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5317 return status;
5318}
5319
5320/**
5321 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5322 * @hw: pointer to the hw struct
5323 * @reg_addr: register address
5324 * @reg_val: ptr to register value
5325 * @cmd_details: pointer to command details structure or NULL
5326 *
5327 * Use the firmware to read the Rx control register,
5328 * especially useful if the Rx unit is under heavy pressure
5329 **/
5330i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5331 u32 reg_addr, u32 *reg_val,
5332 struct i40e_asq_cmd_details *cmd_details)
5333{
5334 struct i40e_aq_desc desc;
5335 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5336 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5337 i40e_status status;
5338
5339 if (!reg_val)
5340 return I40E_ERR_PARAM;
5341
5342 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5343
5344 cmd_resp->address = cpu_to_le32(reg_addr);
5345
5346 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5347
5348 if (status == 0)
5349 *reg_val = le32_to_cpu(cmd_resp->value);
5350
5351 return status;
5352}
5353
5354/**
5355 * i40e_read_rx_ctl - read from an Rx control register
5356 * @hw: pointer to the hw struct
5357 * @reg_addr: register address
5358 **/
5359u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5360{
5361 i40e_status status = 0;
5362 bool use_register;
5363 int retry = 5;
5364 u32 val = 0;
5365
5366 use_register = (((hw->aq.api_maj_ver == 1) &&
5367 (hw->aq.api_min_ver < 5)) ||
5368 (hw->mac.type == I40E_MAC_X722));
5369 if (!use_register) {
5370do_retry:
5371 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5372 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5373 usleep_range(1000, 2000);
5374 retry--;
5375 goto do_retry;
5376 }
5377 }
5378
5379 /* if the AQ access failed, try the old-fashioned way */
5380 if (status || use_register)
5381 val = rd32(hw, reg_addr);
5382
5383 return val;
5384}
5385
5386/**
5387 * i40e_aq_rx_ctl_write_register
5388 * @hw: pointer to the hw struct
5389 * @reg_addr: register address
5390 * @reg_val: register value
5391 * @cmd_details: pointer to command details structure or NULL
5392 *
5393 * Use the firmware to write to an Rx control register,
5394 * especially useful if the Rx unit is under heavy pressure
5395 **/
5396i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5397 u32 reg_addr, u32 reg_val,
5398 struct i40e_asq_cmd_details *cmd_details)
5399{
5400 struct i40e_aq_desc desc;
5401 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5402 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5403 i40e_status status;
5404
5405 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5406
5407 cmd->address = cpu_to_le32(reg_addr);
5408 cmd->value = cpu_to_le32(reg_val);
5409
5410 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5411
5412 return status;
5413}
5414
5415/**
5416 * i40e_write_rx_ctl - write to an Rx control register
5417 * @hw: pointer to the hw struct
5418 * @reg_addr: register address
5419 * @reg_val: register value
5420 **/
5421void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5422{
5423 i40e_status status = 0;
5424 bool use_register;
5425 int retry = 5;
5426
5427 use_register = (((hw->aq.api_maj_ver == 1) &&
5428 (hw->aq.api_min_ver < 5)) ||
5429 (hw->mac.type == I40E_MAC_X722));
5430 if (!use_register) {
5431do_retry:
5432 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5433 reg_val, NULL);
5434 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5435 usleep_range(1000, 2000);
5436 retry--;
5437 goto do_retry;
5438 }
5439 }
5440
5441 /* if the AQ access failed, try the old-fashioned way */
5442 if (status || use_register)
5443 wr32(hw, reg_addr, reg_val);
5444}
5445
5446/**
5447 * i40e_mdio_if_number_selection - MDIO I/F number selection
5448 * @hw: pointer to the hw struct
5449 * @set_mdio: use MDIO I/F number specified by mdio_num
5450 * @mdio_num: MDIO I/F number
5451 * @cmd: pointer to PHY Register command structure
5452 **/
5453static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5454 u8 mdio_num,
5455 struct i40e_aqc_phy_register_access *cmd)
5456{
5457 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5458 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5459 cmd->cmd_flags |=
5460 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5461 ((mdio_num <<
5462 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5463 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5464 else
5465 i40e_debug(hw, I40E_DEBUG_PHY,
5466 "MDIO I/F number selection not supported by current FW version.\n");
5467 }
5468}
5469
5470/**
5471 * i40e_aq_set_phy_register_ext
5472 * @hw: pointer to the hw struct
5473 * @phy_select: select which phy should be accessed
5474 * @dev_addr: PHY device address
5475 * @page_change: flag to indicate if phy page should be updated
5476 * @set_mdio: use MDIO I/F number specified by mdio_num
5477 * @mdio_num: MDIO I/F number
5478 * @reg_addr: PHY register address
5479 * @reg_val: new register value
5480 * @cmd_details: pointer to command details structure or NULL
5481 *
5482 * Write the external PHY register.
5483 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5484 * may use simple wrapper i40e_aq_set_phy_register.
5485 **/
5486enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5487 u8 phy_select, u8 dev_addr, bool page_change,
5488 bool set_mdio, u8 mdio_num,
5489 u32 reg_addr, u32 reg_val,
5490 struct i40e_asq_cmd_details *cmd_details)
5491{
5492 struct i40e_aq_desc desc;
5493 struct i40e_aqc_phy_register_access *cmd =
5494 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5495 i40e_status status;
5496
5497 i40e_fill_default_direct_cmd_desc(&desc,
5498 i40e_aqc_opc_set_phy_register);
5499
5500 cmd->phy_interface = phy_select;
5501 cmd->dev_address = dev_addr;
5502 cmd->reg_address = cpu_to_le32(reg_addr);
5503 cmd->reg_value = cpu_to_le32(reg_val);
5504
5505 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5506
5507 if (!page_change)
5508 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5509
5510 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5511
5512 return status;
5513}
5514
5515/**
5516 * i40e_aq_get_phy_register_ext
5517 * @hw: pointer to the hw struct
5518 * @phy_select: select which phy should be accessed
5519 * @dev_addr: PHY device address
5520 * @page_change: flag to indicate if phy page should be updated
5521 * @set_mdio: use MDIO I/F number specified by mdio_num
5522 * @mdio_num: MDIO I/F number
5523 * @reg_addr: PHY register address
5524 * @reg_val: read register value
5525 * @cmd_details: pointer to command details structure or NULL
5526 *
5527 * Read the external PHY register.
5528 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5529 * may use simple wrapper i40e_aq_get_phy_register.
5530 **/
5531enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5532 u8 phy_select, u8 dev_addr, bool page_change,
5533 bool set_mdio, u8 mdio_num,
5534 u32 reg_addr, u32 *reg_val,
5535 struct i40e_asq_cmd_details *cmd_details)
5536{
5537 struct i40e_aq_desc desc;
5538 struct i40e_aqc_phy_register_access *cmd =
5539 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5540 i40e_status status;
5541
5542 i40e_fill_default_direct_cmd_desc(&desc,
5543 i40e_aqc_opc_get_phy_register);
5544
5545 cmd->phy_interface = phy_select;
5546 cmd->dev_address = dev_addr;
5547 cmd->reg_address = cpu_to_le32(reg_addr);
5548
5549 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5550
5551 if (!page_change)
5552 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5553
5554 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5555 if (!status)
5556 *reg_val = le32_to_cpu(cmd->reg_value);
5557
5558 return status;
5559}
5560
5561/**
5562 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5563 * @hw: pointer to the hw struct
5564 * @buff: command buffer (size in bytes = buff_size)
5565 * @buff_size: buffer size in bytes
5566 * @track_id: package tracking id
5567 * @error_offset: returns error offset
5568 * @error_info: returns error information
5569 * @cmd_details: pointer to command details structure or NULL
5570 **/
5571enum
5572i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5573 u16 buff_size, u32 track_id,
5574 u32 *error_offset, u32 *error_info,
5575 struct i40e_asq_cmd_details *cmd_details)
5576{
5577 struct i40e_aq_desc desc;
5578 struct i40e_aqc_write_personalization_profile *cmd =
5579 (struct i40e_aqc_write_personalization_profile *)
5580 &desc.params.raw;
5581 struct i40e_aqc_write_ddp_resp *resp;
5582 i40e_status status;
5583
5584 i40e_fill_default_direct_cmd_desc(&desc,
5585 i40e_aqc_opc_write_personalization_profile);
5586
5587 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5588 if (buff_size > I40E_AQ_LARGE_BUF)
5589 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5590
5591 desc.datalen = cpu_to_le16(buff_size);
5592
5593 cmd->profile_track_id = cpu_to_le32(track_id);
5594
5595 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5596 if (!status) {
5597 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5598 if (error_offset)
5599 *error_offset = le32_to_cpu(resp->error_offset);
5600 if (error_info)
5601 *error_info = le32_to_cpu(resp->error_info);
5602 }
5603
5604 return status;
5605}
5606
5607/**
5608 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5609 * @hw: pointer to the hw struct
5610 * @buff: command buffer (size in bytes = buff_size)
5611 * @buff_size: buffer size in bytes
5612 * @flags: AdminQ command flags
5613 * @cmd_details: pointer to command details structure or NULL
5614 **/
5615enum
5616i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5617 u16 buff_size, u8 flags,
5618 struct i40e_asq_cmd_details *cmd_details)
5619{
5620 struct i40e_aq_desc desc;
5621 struct i40e_aqc_get_applied_profiles *cmd =
5622 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5623 i40e_status status;
5624
5625 i40e_fill_default_direct_cmd_desc(&desc,
5626 i40e_aqc_opc_get_personalization_profile_list);
5627
5628 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5629 if (buff_size > I40E_AQ_LARGE_BUF)
5630 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5631 desc.datalen = cpu_to_le16(buff_size);
5632
5633 cmd->flags = flags;
5634
5635 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5636
5637 return status;
5638}
5639
5640/**
5641 * i40e_find_segment_in_package
5642 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5643 * @pkg_hdr: pointer to the package header to be searched
5644 *
5645 * This function searches a package file for a particular segment type. On
5646 * success it returns a pointer to the segment header, otherwise it will
5647 * return NULL.
5648 **/
5649struct i40e_generic_seg_header *
5650i40e_find_segment_in_package(u32 segment_type,
5651 struct i40e_package_header *pkg_hdr)
5652{
5653 struct i40e_generic_seg_header *segment;
5654 u32 i;
5655
5656 /* Search all package segments for the requested segment type */
5657 for (i = 0; i < pkg_hdr->segment_count; i++) {
5658 segment =
5659 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5660 pkg_hdr->segment_offset[i]);
5661
5662 if (segment->type == segment_type)
5663 return segment;
5664 }
5665
5666 return NULL;
5667}
5668
5669/* Get section table in profile */
5670#define I40E_SECTION_TABLE(profile, sec_tbl) \
5671 do { \
5672 struct i40e_profile_segment *p = (profile); \
5673 u32 count; \
5674 u32 *nvm; \
5675 count = p->device_table_count; \
5676 nvm = (u32 *)&p->device_table[count]; \
5677 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5678 } while (0)
5679
5680/* Get section header in profile */
5681#define I40E_SECTION_HEADER(profile, offset) \
5682 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5683
5684/**
5685 * i40e_find_section_in_profile
5686 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5687 * @profile: pointer to the i40e segment header to be searched
5688 *
5689 * This function searches i40e segment for a particular section type. On
5690 * success it returns a pointer to the section header, otherwise it will
5691 * return NULL.
5692 **/
5693struct i40e_profile_section_header *
5694i40e_find_section_in_profile(u32 section_type,
5695 struct i40e_profile_segment *profile)
5696{
5697 struct i40e_profile_section_header *sec;
5698 struct i40e_section_table *sec_tbl;
5699 u32 sec_off;
5700 u32 i;
5701
5702 if (profile->header.type != SEGMENT_TYPE_I40E)
5703 return NULL;
5704
5705 I40E_SECTION_TABLE(profile, sec_tbl);
5706
5707 for (i = 0; i < sec_tbl->section_count; i++) {
5708 sec_off = sec_tbl->section_offset[i];
5709 sec = I40E_SECTION_HEADER(profile, sec_off);
5710 if (sec->section.type == section_type)
5711 return sec;
5712 }
5713
5714 return NULL;
5715}
5716
5717/**
5718 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5719 * @hw: pointer to the hw struct
5720 * @aq: command buffer containing all data to execute AQ
5721 **/
5722static enum
5723i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5724 struct i40e_profile_aq_section *aq)
5725{
5726 i40e_status status;
5727 struct i40e_aq_desc desc;
5728 u8 *msg = NULL;
5729 u16 msglen;
5730
5731 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5732 desc.flags |= cpu_to_le16(aq->flags);
5733 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5734
5735 msglen = aq->datalen;
5736 if (msglen) {
5737 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5738 I40E_AQ_FLAG_RD));
5739 if (msglen > I40E_AQ_LARGE_BUF)
5740 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5741 desc.datalen = cpu_to_le16(msglen);
5742 msg = &aq->data[0];
5743 }
5744
5745 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5746
5747 if (status) {
5748 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5749 "unable to exec DDP AQ opcode %u, error %d\n",
5750 aq->opcode, status);
5751 return status;
5752 }
5753
5754 /* copy returned desc to aq_buf */
5755 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5756
5757 return 0;
5758}
5759
5760/**
5761 * i40e_validate_profile
5762 * @hw: pointer to the hardware structure
5763 * @profile: pointer to the profile segment of the package to be validated
5764 * @track_id: package tracking id
5765 * @rollback: flag if the profile is for rollback.
5766 *
5767 * Validates supported devices and profile's sections.
5768 */
5769static enum i40e_status_code
5770i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5771 u32 track_id, bool rollback)
5772{
5773 struct i40e_profile_section_header *sec = NULL;
5774 i40e_status status = 0;
5775 struct i40e_section_table *sec_tbl;
5776 u32 vendor_dev_id;
5777 u32 dev_cnt;
5778 u32 sec_off;
5779 u32 i;
5780
5781 if (track_id == I40E_DDP_TRACKID_INVALID) {
5782 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5783 return I40E_NOT_SUPPORTED;
5784 }
5785
5786 dev_cnt = profile->device_table_count;
5787 for (i = 0; i < dev_cnt; i++) {
5788 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5789 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5790 hw->device_id == (vendor_dev_id & 0xFFFF))
5791 break;
5792 }
5793 if (dev_cnt && i == dev_cnt) {
5794 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5795 "Device doesn't support DDP\n");
5796 return I40E_ERR_DEVICE_NOT_SUPPORTED;
5797 }
5798
5799 I40E_SECTION_TABLE(profile, sec_tbl);
5800
5801 /* Validate sections types */
5802 for (i = 0; i < sec_tbl->section_count; i++) {
5803 sec_off = sec_tbl->section_offset[i];
5804 sec = I40E_SECTION_HEADER(profile, sec_off);
5805 if (rollback) {
5806 if (sec->section.type == SECTION_TYPE_MMIO ||
5807 sec->section.type == SECTION_TYPE_AQ ||
5808 sec->section.type == SECTION_TYPE_RB_AQ) {
5809 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5810 "Not a roll-back package\n");
5811 return I40E_NOT_SUPPORTED;
5812 }
5813 } else {
5814 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5815 sec->section.type == SECTION_TYPE_RB_MMIO) {
5816 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5817 "Not an original package\n");
5818 return I40E_NOT_SUPPORTED;
5819 }
5820 }
5821 }
5822
5823 return status;
5824}
5825
5826/**
5827 * i40e_write_profile
5828 * @hw: pointer to the hardware structure
5829 * @profile: pointer to the profile segment of the package to be downloaded
5830 * @track_id: package tracking id
5831 *
5832 * Handles the download of a complete package.
5833 */
5834enum i40e_status_code
5835i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5836 u32 track_id)
5837{
5838 i40e_status status = 0;
5839 struct i40e_section_table *sec_tbl;
5840 struct i40e_profile_section_header *sec = NULL;
5841 struct i40e_profile_aq_section *ddp_aq;
5842 u32 section_size = 0;
5843 u32 offset = 0, info = 0;
5844 u32 sec_off;
5845 u32 i;
5846
5847 status = i40e_validate_profile(hw, profile, track_id, false);
5848 if (status)
5849 return status;
5850
5851 I40E_SECTION_TABLE(profile, sec_tbl);
5852
5853 for (i = 0; i < sec_tbl->section_count; i++) {
5854 sec_off = sec_tbl->section_offset[i];
5855 sec = I40E_SECTION_HEADER(profile, sec_off);
5856 /* Process generic admin command */
5857 if (sec->section.type == SECTION_TYPE_AQ) {
5858 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5859 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5860 if (status) {
5861 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5862 "Failed to execute aq: section %d, opcode %u\n",
5863 i, ddp_aq->opcode);
5864 break;
5865 }
5866 sec->section.type = SECTION_TYPE_RB_AQ;
5867 }
5868
5869 /* Skip any non-mmio sections */
5870 if (sec->section.type != SECTION_TYPE_MMIO)
5871 continue;
5872
5873 section_size = sec->section.size +
5874 sizeof(struct i40e_profile_section_header);
5875
5876 /* Write MMIO section */
5877 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5878 track_id, &offset, &info, NULL);
5879 if (status) {
5880 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5881 "Failed to write profile: section %d, offset %d, info %d\n",
5882 i, offset, info);
5883 break;
5884 }
5885 }
5886 return status;
5887}
5888
5889/**
5890 * i40e_rollback_profile
5891 * @hw: pointer to the hardware structure
5892 * @profile: pointer to the profile segment of the package to be removed
5893 * @track_id: package tracking id
5894 *
5895 * Rolls back previously loaded package.
5896 */
5897enum i40e_status_code
5898i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5899 u32 track_id)
5900{
5901 struct i40e_profile_section_header *sec = NULL;
5902 i40e_status status = 0;
5903 struct i40e_section_table *sec_tbl;
5904 u32 offset = 0, info = 0;
5905 u32 section_size = 0;
5906 u32 sec_off;
5907 int i;
5908
5909 status = i40e_validate_profile(hw, profile, track_id, true);
5910 if (status)
5911 return status;
5912
5913 I40E_SECTION_TABLE(profile, sec_tbl);
5914
5915 /* For rollback write sections in reverse */
5916 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5917 sec_off = sec_tbl->section_offset[i];
5918 sec = I40E_SECTION_HEADER(profile, sec_off);
5919
5920 /* Skip any non-rollback sections */
5921 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5922 continue;
5923
5924 section_size = sec->section.size +
5925 sizeof(struct i40e_profile_section_header);
5926
5927 /* Write roll-back MMIO section */
5928 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5929 track_id, &offset, &info, NULL);
5930 if (status) {
5931 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5932 "Failed to write profile: section %d, offset %d, info %d\n",
5933 i, offset, info);
5934 break;
5935 }
5936 }
5937 return status;
5938}
5939
5940/**
5941 * i40e_add_pinfo_to_list
5942 * @hw: pointer to the hardware structure
5943 * @profile: pointer to the profile segment of the package
5944 * @profile_info_sec: buffer for information section
5945 * @track_id: package tracking id
5946 *
5947 * Register a profile to the list of loaded profiles.
5948 */
5949enum i40e_status_code
5950i40e_add_pinfo_to_list(struct i40e_hw *hw,
5951 struct i40e_profile_segment *profile,
5952 u8 *profile_info_sec, u32 track_id)
5953{
5954 i40e_status status = 0;
5955 struct i40e_profile_section_header *sec = NULL;
5956 struct i40e_profile_info *pinfo;
5957 u32 offset = 0, info = 0;
5958
5959 sec = (struct i40e_profile_section_header *)profile_info_sec;
5960 sec->tbl_size = 1;
5961 sec->data_end = sizeof(struct i40e_profile_section_header) +
5962 sizeof(struct i40e_profile_info);
5963 sec->section.type = SECTION_TYPE_INFO;
5964 sec->section.offset = sizeof(struct i40e_profile_section_header);
5965 sec->section.size = sizeof(struct i40e_profile_info);
5966 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5967 sec->section.offset);
5968 pinfo->track_id = track_id;
5969 pinfo->version = profile->version;
5970 pinfo->op = I40E_DDP_ADD_TRACKID;
5971 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5972
5973 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5974 track_id, &offset, &info, NULL);
5975
5976 return status;
5977}
5978
5979/**
5980 * i40e_aq_add_cloud_filters
5981 * @hw: pointer to the hardware structure
5982 * @seid: VSI seid to add cloud filters from
5983 * @filters: Buffer which contains the filters to be added
5984 * @filter_count: number of filters contained in the buffer
5985 *
5986 * Set the cloud filters for a given VSI. The contents of the
5987 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5988 * of the function.
5989 *
5990 **/
5991enum i40e_status_code
5992i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5993 struct i40e_aqc_cloud_filters_element_data *filters,
5994 u8 filter_count)
5995{
5996 struct i40e_aq_desc desc;
5997 struct i40e_aqc_add_remove_cloud_filters *cmd =
5998 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5999 enum i40e_status_code status;
6000 u16 buff_len;
6001
6002 i40e_fill_default_direct_cmd_desc(&desc,
6003 i40e_aqc_opc_add_cloud_filters);
6004
6005 buff_len = filter_count * sizeof(*filters);
6006 desc.datalen = cpu_to_le16(buff_len);
6007 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6008 cmd->num_filters = filter_count;
6009 cmd->seid = cpu_to_le16(seid);
6010
6011 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6012
6013 return status;
6014}
6015
6016/**
6017 * i40e_aq_add_cloud_filters_bb
6018 * @hw: pointer to the hardware structure
6019 * @seid: VSI seid to add cloud filters from
6020 * @filters: Buffer which contains the filters in big buffer to be added
6021 * @filter_count: number of filters contained in the buffer
6022 *
6023 * Set the big buffer cloud filters for a given VSI. The contents of the
6024 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6025 * function.
6026 *
6027 **/
6028enum i40e_status_code
6029i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6030 struct i40e_aqc_cloud_filters_element_bb *filters,
6031 u8 filter_count)
6032{
6033 struct i40e_aq_desc desc;
6034 struct i40e_aqc_add_remove_cloud_filters *cmd =
6035 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6036 i40e_status status;
6037 u16 buff_len;
6038 int i;
6039
6040 i40e_fill_default_direct_cmd_desc(&desc,
6041 i40e_aqc_opc_add_cloud_filters);
6042
6043 buff_len = filter_count * sizeof(*filters);
6044 desc.datalen = cpu_to_le16(buff_len);
6045 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6046 cmd->num_filters = filter_count;
6047 cmd->seid = cpu_to_le16(seid);
6048 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6049
6050 for (i = 0; i < filter_count; i++) {
6051 u16 tnl_type;
6052 u32 ti;
6053
6054 tnl_type = (le16_to_cpu(filters[i].element.flags) &
6055 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6056 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6057
6058 /* Due to hardware eccentricities, the VNI for Geneve is shifted
6059 * one more byte further than normally used for Tenant ID in
6060 * other tunnel types.
6061 */
6062 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6063 ti = le32_to_cpu(filters[i].element.tenant_id);
6064 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6065 }
6066 }
6067
6068 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6069
6070 return status;
6071}
6072
6073/**
6074 * i40e_aq_rem_cloud_filters
6075 * @hw: pointer to the hardware structure
6076 * @seid: VSI seid to remove cloud filters from
6077 * @filters: Buffer which contains the filters to be removed
6078 * @filter_count: number of filters contained in the buffer
6079 *
6080 * Remove the cloud filters for a given VSI. The contents of the
6081 * i40e_aqc_cloud_filters_element_data are filled in by the caller
6082 * of the function.
6083 *
6084 **/
6085enum i40e_status_code
6086i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
6087 struct i40e_aqc_cloud_filters_element_data *filters,
6088 u8 filter_count)
6089{
6090 struct i40e_aq_desc desc;
6091 struct i40e_aqc_add_remove_cloud_filters *cmd =
6092 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6093 enum i40e_status_code status;
6094 u16 buff_len;
6095
6096 i40e_fill_default_direct_cmd_desc(&desc,
6097 i40e_aqc_opc_remove_cloud_filters);
6098
6099 buff_len = filter_count * sizeof(*filters);
6100 desc.datalen = cpu_to_le16(buff_len);
6101 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6102 cmd->num_filters = filter_count;
6103 cmd->seid = cpu_to_le16(seid);
6104
6105 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6106
6107 return status;
6108}
6109
6110/**
6111 * i40e_aq_rem_cloud_filters_bb
6112 * @hw: pointer to the hardware structure
6113 * @seid: VSI seid to remove cloud filters from
6114 * @filters: Buffer which contains the filters in big buffer to be removed
6115 * @filter_count: number of filters contained in the buffer
6116 *
6117 * Remove the big buffer cloud filters for a given VSI. The contents of the
6118 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6119 * function.
6120 *
6121 **/
6122enum i40e_status_code
6123i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6124 struct i40e_aqc_cloud_filters_element_bb *filters,
6125 u8 filter_count)
6126{
6127 struct i40e_aq_desc desc;
6128 struct i40e_aqc_add_remove_cloud_filters *cmd =
6129 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6130 i40e_status status;
6131 u16 buff_len;
6132 int i;
6133
6134 i40e_fill_default_direct_cmd_desc(&desc,
6135 i40e_aqc_opc_remove_cloud_filters);
6136
6137 buff_len = filter_count * sizeof(*filters);
6138 desc.datalen = cpu_to_le16(buff_len);
6139 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6140 cmd->num_filters = filter_count;
6141 cmd->seid = cpu_to_le16(seid);
6142 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6143
6144 for (i = 0; i < filter_count; i++) {
6145 u16 tnl_type;
6146 u32 ti;
6147
6148 tnl_type = (le16_to_cpu(filters[i].element.flags) &
6149 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6150 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6151
6152 /* Due to hardware eccentricities, the VNI for Geneve is shifted
6153 * one more byte further than normally used for Tenant ID in
6154 * other tunnel types.
6155 */
6156 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6157 ti = le32_to_cpu(filters[i].element.tenant_id);
6158 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6159 }
6160 }
6161
6162 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6163
6164 return status;
6165}