Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4#include <linux/avf/virtchnl.h>
5#include <linux/bitfield.h>
6#include <linux/delay.h>
7#include <linux/etherdevice.h>
8#include <linux/pci.h>
9#include "i40e_adminq_cmd.h"
10#include "i40e_devids.h"
11#include "i40e_prototype.h"
12#include "i40e_register.h"
13
14/**
15 * i40e_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
17 *
18 * This function sets the mac type of the adapter based on the
19 * vendor ID and device ID stored in the hw structure.
20 **/
21int i40e_set_mac_type(struct i40e_hw *hw)
22{
23 int status = 0;
24
25 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
26 switch (hw->device_id) {
27 case I40E_DEV_ID_SFP_XL710:
28 case I40E_DEV_ID_QEMU:
29 case I40E_DEV_ID_KX_B:
30 case I40E_DEV_ID_KX_C:
31 case I40E_DEV_ID_QSFP_A:
32 case I40E_DEV_ID_QSFP_B:
33 case I40E_DEV_ID_QSFP_C:
34 case I40E_DEV_ID_1G_BASE_T_BC:
35 case I40E_DEV_ID_5G_BASE_T_BC:
36 case I40E_DEV_ID_10G_BASE_T:
37 case I40E_DEV_ID_10G_BASE_T4:
38 case I40E_DEV_ID_10G_BASE_T_BC:
39 case I40E_DEV_ID_10G_B:
40 case I40E_DEV_ID_10G_SFP:
41 case I40E_DEV_ID_20G_KR2:
42 case I40E_DEV_ID_20G_KR2_A:
43 case I40E_DEV_ID_25G_B:
44 case I40E_DEV_ID_25G_SFP28:
45 case I40E_DEV_ID_X710_N3000:
46 case I40E_DEV_ID_XXV710_N3000:
47 hw->mac.type = I40E_MAC_XL710;
48 break;
49 case I40E_DEV_ID_KX_X722:
50 case I40E_DEV_ID_QSFP_X722:
51 case I40E_DEV_ID_SFP_X722:
52 case I40E_DEV_ID_1G_BASE_T_X722:
53 case I40E_DEV_ID_10G_BASE_T_X722:
54 case I40E_DEV_ID_SFP_I_X722:
55 case I40E_DEV_ID_SFP_X722_A:
56 hw->mac.type = I40E_MAC_X722;
57 break;
58 default:
59 hw->mac.type = I40E_MAC_GENERIC;
60 break;
61 }
62 } else {
63 status = -ENODEV;
64 }
65
66 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
67 hw->mac.type, status);
68 return status;
69}
70
71/**
72 * i40e_aq_str - convert AQ err code to a string
73 * @hw: pointer to the HW structure
74 * @aq_err: the AQ error code to convert
75 **/
76const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
77{
78 switch (aq_err) {
79 case I40E_AQ_RC_OK:
80 return "OK";
81 case I40E_AQ_RC_EPERM:
82 return "I40E_AQ_RC_EPERM";
83 case I40E_AQ_RC_ENOENT:
84 return "I40E_AQ_RC_ENOENT";
85 case I40E_AQ_RC_ESRCH:
86 return "I40E_AQ_RC_ESRCH";
87 case I40E_AQ_RC_EINTR:
88 return "I40E_AQ_RC_EINTR";
89 case I40E_AQ_RC_EIO:
90 return "I40E_AQ_RC_EIO";
91 case I40E_AQ_RC_ENXIO:
92 return "I40E_AQ_RC_ENXIO";
93 case I40E_AQ_RC_E2BIG:
94 return "I40E_AQ_RC_E2BIG";
95 case I40E_AQ_RC_EAGAIN:
96 return "I40E_AQ_RC_EAGAIN";
97 case I40E_AQ_RC_ENOMEM:
98 return "I40E_AQ_RC_ENOMEM";
99 case I40E_AQ_RC_EACCES:
100 return "I40E_AQ_RC_EACCES";
101 case I40E_AQ_RC_EFAULT:
102 return "I40E_AQ_RC_EFAULT";
103 case I40E_AQ_RC_EBUSY:
104 return "I40E_AQ_RC_EBUSY";
105 case I40E_AQ_RC_EEXIST:
106 return "I40E_AQ_RC_EEXIST";
107 case I40E_AQ_RC_EINVAL:
108 return "I40E_AQ_RC_EINVAL";
109 case I40E_AQ_RC_ENOTTY:
110 return "I40E_AQ_RC_ENOTTY";
111 case I40E_AQ_RC_ENOSPC:
112 return "I40E_AQ_RC_ENOSPC";
113 case I40E_AQ_RC_ENOSYS:
114 return "I40E_AQ_RC_ENOSYS";
115 case I40E_AQ_RC_ERANGE:
116 return "I40E_AQ_RC_ERANGE";
117 case I40E_AQ_RC_EFLUSHED:
118 return "I40E_AQ_RC_EFLUSHED";
119 case I40E_AQ_RC_BAD_ADDR:
120 return "I40E_AQ_RC_BAD_ADDR";
121 case I40E_AQ_RC_EMODE:
122 return "I40E_AQ_RC_EMODE";
123 case I40E_AQ_RC_EFBIG:
124 return "I40E_AQ_RC_EFBIG";
125 }
126
127 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
128 return hw->err_str;
129}
130
131/**
132 * i40e_debug_aq
133 * @hw: debug mask related to admin queue
134 * @mask: debug mask
135 * @desc: pointer to admin queue descriptor
136 * @buffer: pointer to command buffer
137 * @buf_len: max length of buffer
138 *
139 * Dumps debug log about adminq command with descriptor contents.
140 **/
141void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
142 void *buffer, u16 buf_len)
143{
144 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
145 u32 effective_mask = hw->debug_mask & mask;
146 char prefix[27];
147 u16 len;
148 u8 *buf = (u8 *)buffer;
149
150 if (!effective_mask || !desc)
151 return;
152
153 len = le16_to_cpu(aq_desc->datalen);
154
155 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
156 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
157 le16_to_cpu(aq_desc->opcode),
158 le16_to_cpu(aq_desc->flags),
159 le16_to_cpu(aq_desc->datalen),
160 le16_to_cpu(aq_desc->retval));
161 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
162 "\tcookie (h,l) 0x%08X 0x%08X\n",
163 le32_to_cpu(aq_desc->cookie_high),
164 le32_to_cpu(aq_desc->cookie_low));
165 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
166 "\tparam (0,1) 0x%08X 0x%08X\n",
167 le32_to_cpu(aq_desc->params.internal.param0),
168 le32_to_cpu(aq_desc->params.internal.param1));
169 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
170 "\taddr (h,l) 0x%08X 0x%08X\n",
171 le32_to_cpu(aq_desc->params.external.addr_high),
172 le32_to_cpu(aq_desc->params.external.addr_low));
173
174 if (buffer && buf_len != 0 && len != 0 &&
175 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
176 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
177 if (buf_len < len)
178 len = buf_len;
179
180 snprintf(prefix, sizeof(prefix),
181 "i40e %02x:%02x.%x: \t0x",
182 hw->bus.bus_id,
183 hw->bus.device,
184 hw->bus.func);
185
186 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
187 16, 1, buf, len, false);
188 }
189}
190
191/**
192 * i40e_check_asq_alive
193 * @hw: pointer to the hw struct
194 *
195 * Returns true if Queue is enabled else false.
196 **/
197bool i40e_check_asq_alive(struct i40e_hw *hw)
198{
199 /* Check if the queue is initialized */
200 if (!hw->aq.asq.count)
201 return false;
202
203 return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK);
204}
205
206/**
207 * i40e_aq_queue_shutdown
208 * @hw: pointer to the hw struct
209 * @unloading: is the driver unloading itself
210 *
211 * Tell the Firmware that we're shutting down the AdminQ and whether
212 * or not the driver is unloading as well.
213 **/
214int i40e_aq_queue_shutdown(struct i40e_hw *hw,
215 bool unloading)
216{
217 struct i40e_aq_desc desc;
218 struct i40e_aqc_queue_shutdown *cmd =
219 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
220 int status;
221
222 i40e_fill_default_direct_cmd_desc(&desc,
223 i40e_aqc_opc_queue_shutdown);
224
225 if (unloading)
226 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
227 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
228
229 return status;
230}
231
232/**
233 * i40e_aq_get_set_rss_lut
234 * @hw: pointer to the hardware structure
235 * @vsi_id: vsi fw index
236 * @pf_lut: for PF table set true, for VSI table set false
237 * @lut: pointer to the lut buffer provided by the caller
238 * @lut_size: size of the lut buffer
239 * @set: set true to set the table, false to get the table
240 *
241 * Internal function to get or set RSS look up table
242 **/
243static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
244 u16 vsi_id, bool pf_lut,
245 u8 *lut, u16 lut_size,
246 bool set)
247{
248 struct i40e_aq_desc desc;
249 struct i40e_aqc_get_set_rss_lut *cmd_resp =
250 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
251 int status;
252 u16 flags;
253
254 if (set)
255 i40e_fill_default_direct_cmd_desc(&desc,
256 i40e_aqc_opc_set_rss_lut);
257 else
258 i40e_fill_default_direct_cmd_desc(&desc,
259 i40e_aqc_opc_get_rss_lut);
260
261 /* Indirect command */
262 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
263 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
264
265 vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
266 FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
267 cmd_resp->vsi_id = cpu_to_le16(vsi_id);
268
269 if (pf_lut)
270 flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
271 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF);
272 else
273 flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
274 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI);
275
276 cmd_resp->flags = cpu_to_le16(flags);
277 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
278
279 return status;
280}
281
282/**
283 * i40e_aq_get_rss_lut
284 * @hw: pointer to the hardware structure
285 * @vsi_id: vsi fw index
286 * @pf_lut: for PF table set true, for VSI table set false
287 * @lut: pointer to the lut buffer provided by the caller
288 * @lut_size: size of the lut buffer
289 *
290 * get the RSS lookup table, PF or VSI type
291 **/
292int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
293 bool pf_lut, u8 *lut, u16 lut_size)
294{
295 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
296 false);
297}
298
299/**
300 * i40e_aq_set_rss_lut
301 * @hw: pointer to the hardware structure
302 * @vsi_id: vsi fw index
303 * @pf_lut: for PF table set true, for VSI table set false
304 * @lut: pointer to the lut buffer provided by the caller
305 * @lut_size: size of the lut buffer
306 *
307 * set the RSS lookup table, PF or VSI type
308 **/
309int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
310 bool pf_lut, u8 *lut, u16 lut_size)
311{
312 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
313}
314
315/**
316 * i40e_aq_get_set_rss_key
317 * @hw: pointer to the hw struct
318 * @vsi_id: vsi fw index
319 * @key: pointer to key info struct
320 * @set: set true to set the key, false to get the key
321 *
322 * get the RSS key per VSI
323 **/
324static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
325 u16 vsi_id,
326 struct i40e_aqc_get_set_rss_key_data *key,
327 bool set)
328{
329 struct i40e_aq_desc desc;
330 struct i40e_aqc_get_set_rss_key *cmd_resp =
331 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
332 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
333 int status;
334
335 if (set)
336 i40e_fill_default_direct_cmd_desc(&desc,
337 i40e_aqc_opc_set_rss_key);
338 else
339 i40e_fill_default_direct_cmd_desc(&desc,
340 i40e_aqc_opc_get_rss_key);
341
342 /* Indirect command */
343 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
344 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
345
346 vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
347 FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
348 cmd_resp->vsi_id = cpu_to_le16(vsi_id);
349
350 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
351
352 return status;
353}
354
355/**
356 * i40e_aq_get_rss_key
357 * @hw: pointer to the hw struct
358 * @vsi_id: vsi fw index
359 * @key: pointer to key info struct
360 *
361 **/
362int i40e_aq_get_rss_key(struct i40e_hw *hw,
363 u16 vsi_id,
364 struct i40e_aqc_get_set_rss_key_data *key)
365{
366 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
367}
368
369/**
370 * i40e_aq_set_rss_key
371 * @hw: pointer to the hw struct
372 * @vsi_id: vsi fw index
373 * @key: pointer to key info struct
374 *
375 * set the RSS key per VSI
376 **/
377int i40e_aq_set_rss_key(struct i40e_hw *hw,
378 u16 vsi_id,
379 struct i40e_aqc_get_set_rss_key_data *key)
380{
381 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
382}
383
384/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
385 * hardware to a bit-field that can be used by SW to more easily determine the
386 * packet type.
387 *
388 * Macros are used to shorten the table lines and make this table human
389 * readable.
390 *
391 * We store the PTYPE in the top byte of the bit field - this is just so that
392 * we can check that the table doesn't have a row missing, as the index into
393 * the table should be the PTYPE.
394 *
395 * Typical work flow:
396 *
397 * IF NOT i40e_ptype_lookup[ptype].known
398 * THEN
399 * Packet is unknown
400 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
401 * Use the rest of the fields to look at the tunnels, inner protocols, etc
402 * ELSE
403 * Use the enum i40e_rx_l2_ptype to decode the packet type
404 * ENDIF
405 */
406
407/* macro to make the table lines short, use explicit indexing with [PTYPE] */
408#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
409 [PTYPE] = { \
410 1, \
411 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
412 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
413 I40E_RX_PTYPE_##OUTER_FRAG, \
414 I40E_RX_PTYPE_TUNNEL_##T, \
415 I40E_RX_PTYPE_TUNNEL_END_##TE, \
416 I40E_RX_PTYPE_##TEF, \
417 I40E_RX_PTYPE_INNER_PROT_##I, \
418 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
419
420#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
421
422/* shorter macros makes the table fit but are terse */
423#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
424#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
425#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
426
427/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
428struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
429 /* L2 Packet types */
430 I40E_PTT_UNUSED_ENTRY(0),
431 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
432 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
433 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
434 I40E_PTT_UNUSED_ENTRY(4),
435 I40E_PTT_UNUSED_ENTRY(5),
436 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
437 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
438 I40E_PTT_UNUSED_ENTRY(8),
439 I40E_PTT_UNUSED_ENTRY(9),
440 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
441 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
442 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
443 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
444 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
445 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
446 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
447 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
448 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
449 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
450 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
451 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
452
453 /* Non Tunneled IPv4 */
454 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
455 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
456 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
457 I40E_PTT_UNUSED_ENTRY(25),
458 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
459 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
460 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
461
462 /* IPv4 --> IPv4 */
463 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
464 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
465 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
466 I40E_PTT_UNUSED_ENTRY(32),
467 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
468 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
469 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
470
471 /* IPv4 --> IPv6 */
472 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
473 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
474 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
475 I40E_PTT_UNUSED_ENTRY(39),
476 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
477 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
478 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
479
480 /* IPv4 --> GRE/NAT */
481 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
482
483 /* IPv4 --> GRE/NAT --> IPv4 */
484 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
485 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
486 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
487 I40E_PTT_UNUSED_ENTRY(47),
488 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
489 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
490 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
491
492 /* IPv4 --> GRE/NAT --> IPv6 */
493 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
494 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
495 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
496 I40E_PTT_UNUSED_ENTRY(54),
497 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
498 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
499 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
500
501 /* IPv4 --> GRE/NAT --> MAC */
502 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
503
504 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
505 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
506 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
507 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
508 I40E_PTT_UNUSED_ENTRY(62),
509 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
510 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
511 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
512
513 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
514 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
515 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
516 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
517 I40E_PTT_UNUSED_ENTRY(69),
518 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
519 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
520 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
521
522 /* IPv4 --> GRE/NAT --> MAC/VLAN */
523 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
524
525 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
526 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
527 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
528 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
529 I40E_PTT_UNUSED_ENTRY(77),
530 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
531 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
532 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
533
534 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
535 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
536 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
537 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
538 I40E_PTT_UNUSED_ENTRY(84),
539 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
540 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
541 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
542
543 /* Non Tunneled IPv6 */
544 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
545 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
546 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
547 I40E_PTT_UNUSED_ENTRY(91),
548 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
549 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
550 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
551
552 /* IPv6 --> IPv4 */
553 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
554 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
555 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
556 I40E_PTT_UNUSED_ENTRY(98),
557 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
558 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
559 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
560
561 /* IPv6 --> IPv6 */
562 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
563 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
564 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
565 I40E_PTT_UNUSED_ENTRY(105),
566 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
567 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
568 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
569
570 /* IPv6 --> GRE/NAT */
571 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
572
573 /* IPv6 --> GRE/NAT -> IPv4 */
574 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
575 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
576 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
577 I40E_PTT_UNUSED_ENTRY(113),
578 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
579 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
580 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
581
582 /* IPv6 --> GRE/NAT -> IPv6 */
583 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
584 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
585 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
586 I40E_PTT_UNUSED_ENTRY(120),
587 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
588 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
589 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
590
591 /* IPv6 --> GRE/NAT -> MAC */
592 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
593
594 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
595 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
596 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
597 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
598 I40E_PTT_UNUSED_ENTRY(128),
599 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
600 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
601 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
602
603 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
604 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
605 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
606 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
607 I40E_PTT_UNUSED_ENTRY(135),
608 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
609 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
610 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
611
612 /* IPv6 --> GRE/NAT -> MAC/VLAN */
613 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
614
615 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
616 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
617 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
618 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
619 I40E_PTT_UNUSED_ENTRY(143),
620 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
621 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
622 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
623
624 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
625 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
626 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
627 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
628 I40E_PTT_UNUSED_ENTRY(150),
629 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
630 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
631 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
632
633 /* unused entries */
634 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
635};
636
637/**
638 * i40e_init_shared_code - Initialize the shared code
639 * @hw: pointer to hardware structure
640 *
641 * This assigns the MAC type and PHY code and inits the NVM.
642 * Does not touch the hardware. This function must be called prior to any
643 * other function in the shared code. The i40e_hw structure should be
644 * memset to 0 prior to calling this function. The following fields in
645 * hw structure should be filled in prior to calling this function:
646 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
647 * subsystem_vendor_id, and revision_id
648 **/
649int i40e_init_shared_code(struct i40e_hw *hw)
650{
651 u32 port, ari, func_rid;
652 int status = 0;
653
654 i40e_set_mac_type(hw);
655
656 switch (hw->mac.type) {
657 case I40E_MAC_XL710:
658 case I40E_MAC_X722:
659 break;
660 default:
661 return -ENODEV;
662 }
663
664 hw->phy.get_link_info = true;
665
666 /* Determine port number and PF number*/
667 port = FIELD_GET(I40E_PFGEN_PORTNUM_PORT_NUM_MASK,
668 rd32(hw, I40E_PFGEN_PORTNUM));
669 hw->port = (u8)port;
670 ari = FIELD_GET(I40E_GLPCI_CAPSUP_ARI_EN_MASK,
671 rd32(hw, I40E_GLPCI_CAPSUP));
672 func_rid = rd32(hw, I40E_PF_FUNC_RID);
673 if (ari)
674 hw->pf_id = (u8)(func_rid & 0xff);
675 else
676 hw->pf_id = (u8)(func_rid & 0x7);
677
678 status = i40e_init_nvm(hw);
679 return status;
680}
681
682/**
683 * i40e_aq_mac_address_read - Retrieve the MAC addresses
684 * @hw: pointer to the hw struct
685 * @flags: a return indicator of what addresses were added to the addr store
686 * @addrs: the requestor's mac addr store
687 * @cmd_details: pointer to command details structure or NULL
688 **/
689static int
690i40e_aq_mac_address_read(struct i40e_hw *hw,
691 u16 *flags,
692 struct i40e_aqc_mac_address_read_data *addrs,
693 struct i40e_asq_cmd_details *cmd_details)
694{
695 struct i40e_aq_desc desc;
696 struct i40e_aqc_mac_address_read *cmd_data =
697 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
698 int status;
699
700 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
701 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
702
703 status = i40e_asq_send_command(hw, &desc, addrs,
704 sizeof(*addrs), cmd_details);
705 *flags = le16_to_cpu(cmd_data->command_flags);
706
707 return status;
708}
709
710/**
711 * i40e_aq_mac_address_write - Change the MAC addresses
712 * @hw: pointer to the hw struct
713 * @flags: indicates which MAC to be written
714 * @mac_addr: address to write
715 * @cmd_details: pointer to command details structure or NULL
716 **/
717int i40e_aq_mac_address_write(struct i40e_hw *hw,
718 u16 flags, u8 *mac_addr,
719 struct i40e_asq_cmd_details *cmd_details)
720{
721 struct i40e_aq_desc desc;
722 struct i40e_aqc_mac_address_write *cmd_data =
723 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
724 int status;
725
726 i40e_fill_default_direct_cmd_desc(&desc,
727 i40e_aqc_opc_mac_address_write);
728 cmd_data->command_flags = cpu_to_le16(flags);
729 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
730 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
731 ((u32)mac_addr[3] << 16) |
732 ((u32)mac_addr[4] << 8) |
733 mac_addr[5]);
734
735 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
736
737 return status;
738}
739
740/**
741 * i40e_get_mac_addr - get MAC address
742 * @hw: pointer to the HW structure
743 * @mac_addr: pointer to MAC address
744 *
745 * Reads the adapter's MAC address from register
746 **/
747int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
748{
749 struct i40e_aqc_mac_address_read_data addrs;
750 u16 flags = 0;
751 int status;
752
753 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
754
755 if (flags & I40E_AQC_LAN_ADDR_VALID)
756 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
757
758 return status;
759}
760
761/**
762 * i40e_get_port_mac_addr - get Port MAC address
763 * @hw: pointer to the HW structure
764 * @mac_addr: pointer to Port MAC address
765 *
766 * Reads the adapter's Port MAC address
767 **/
768int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
769{
770 struct i40e_aqc_mac_address_read_data addrs;
771 u16 flags = 0;
772 int status;
773
774 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
775 if (status)
776 return status;
777
778 if (flags & I40E_AQC_PORT_ADDR_VALID)
779 ether_addr_copy(mac_addr, addrs.port_mac);
780 else
781 status = -EINVAL;
782
783 return status;
784}
785
786/**
787 * i40e_pre_tx_queue_cfg - pre tx queue configure
788 * @hw: pointer to the HW structure
789 * @queue: target PF queue index
790 * @enable: state change request
791 *
792 * Handles hw requirement to indicate intention to enable
793 * or disable target queue.
794 **/
795void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
796{
797 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
798 u32 reg_block = 0;
799 u32 reg_val;
800
801 if (abs_queue_idx >= 128) {
802 reg_block = abs_queue_idx / 128;
803 abs_queue_idx %= 128;
804 }
805
806 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
807 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
808 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
809
810 if (enable)
811 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
812 else
813 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
814
815 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
816}
817
818/**
819 * i40e_get_pba_string - Reads part number string from EEPROM
820 * @hw: pointer to hardware structure
821 *
822 * Reads the part number string from the EEPROM and stores it
823 * into newly allocated buffer and saves resulting pointer
824 * to i40e_hw->pba_id field.
825 **/
826void i40e_get_pba_string(struct i40e_hw *hw)
827{
828#define I40E_NVM_PBA_FLAGS_BLK_PRESENT 0xFAFA
829 u16 pba_word = 0;
830 u16 pba_size = 0;
831 u16 pba_ptr = 0;
832 int status;
833 char *ptr;
834 u16 i;
835
836 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
837 if (status) {
838 hw_dbg(hw, "Failed to read PBA flags.\n");
839 return;
840 }
841 if (pba_word != I40E_NVM_PBA_FLAGS_BLK_PRESENT) {
842 hw_dbg(hw, "PBA block is not present.\n");
843 return;
844 }
845
846 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
847 if (status) {
848 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
849 return;
850 }
851
852 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
853 if (status) {
854 hw_dbg(hw, "Failed to read PBA Block size.\n");
855 return;
856 }
857
858 /* Subtract one to get PBA word count (PBA Size word is included in
859 * total size) and advance pointer to first PBA word.
860 */
861 pba_size--;
862 pba_ptr++;
863 if (!pba_size) {
864 hw_dbg(hw, "PBA ID is empty.\n");
865 return;
866 }
867
868 ptr = devm_kzalloc(i40e_hw_to_dev(hw), pba_size * 2 + 1, GFP_KERNEL);
869 if (!ptr)
870 return;
871 hw->pba_id = ptr;
872
873 for (i = 0; i < pba_size; i++) {
874 status = i40e_read_nvm_word(hw, pba_ptr + i, &pba_word);
875 if (status) {
876 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
877 devm_kfree(i40e_hw_to_dev(hw), hw->pba_id);
878 hw->pba_id = NULL;
879 return;
880 }
881
882 *ptr++ = (pba_word >> 8) & 0xFF;
883 *ptr++ = pba_word & 0xFF;
884 }
885}
886
887/**
888 * i40e_get_media_type - Gets media type
889 * @hw: pointer to the hardware structure
890 **/
891static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
892{
893 enum i40e_media_type media;
894
895 switch (hw->phy.link_info.phy_type) {
896 case I40E_PHY_TYPE_10GBASE_SR:
897 case I40E_PHY_TYPE_10GBASE_LR:
898 case I40E_PHY_TYPE_1000BASE_SX:
899 case I40E_PHY_TYPE_1000BASE_LX:
900 case I40E_PHY_TYPE_40GBASE_SR4:
901 case I40E_PHY_TYPE_40GBASE_LR4:
902 case I40E_PHY_TYPE_25GBASE_LR:
903 case I40E_PHY_TYPE_25GBASE_SR:
904 media = I40E_MEDIA_TYPE_FIBER;
905 break;
906 case I40E_PHY_TYPE_100BASE_TX:
907 case I40E_PHY_TYPE_1000BASE_T:
908 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
909 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
910 case I40E_PHY_TYPE_10GBASE_T:
911 media = I40E_MEDIA_TYPE_BASET;
912 break;
913 case I40E_PHY_TYPE_10GBASE_CR1_CU:
914 case I40E_PHY_TYPE_40GBASE_CR4_CU:
915 case I40E_PHY_TYPE_10GBASE_CR1:
916 case I40E_PHY_TYPE_40GBASE_CR4:
917 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
918 case I40E_PHY_TYPE_40GBASE_AOC:
919 case I40E_PHY_TYPE_10GBASE_AOC:
920 case I40E_PHY_TYPE_25GBASE_CR:
921 case I40E_PHY_TYPE_25GBASE_AOC:
922 case I40E_PHY_TYPE_25GBASE_ACC:
923 media = I40E_MEDIA_TYPE_DA;
924 break;
925 case I40E_PHY_TYPE_1000BASE_KX:
926 case I40E_PHY_TYPE_10GBASE_KX4:
927 case I40E_PHY_TYPE_10GBASE_KR:
928 case I40E_PHY_TYPE_40GBASE_KR4:
929 case I40E_PHY_TYPE_20GBASE_KR2:
930 case I40E_PHY_TYPE_25GBASE_KR:
931 media = I40E_MEDIA_TYPE_BACKPLANE;
932 break;
933 case I40E_PHY_TYPE_SGMII:
934 case I40E_PHY_TYPE_XAUI:
935 case I40E_PHY_TYPE_XFI:
936 case I40E_PHY_TYPE_XLAUI:
937 case I40E_PHY_TYPE_XLPPI:
938 default:
939 media = I40E_MEDIA_TYPE_UNKNOWN;
940 break;
941 }
942
943 return media;
944}
945
946/**
947 * i40e_poll_globr - Poll for Global Reset completion
948 * @hw: pointer to the hardware structure
949 * @retry_limit: how many times to retry before failure
950 **/
951static int i40e_poll_globr(struct i40e_hw *hw,
952 u32 retry_limit)
953{
954 u32 cnt, reg = 0;
955
956 for (cnt = 0; cnt < retry_limit; cnt++) {
957 reg = rd32(hw, I40E_GLGEN_RSTAT);
958 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
959 return 0;
960 msleep(100);
961 }
962
963 hw_dbg(hw, "Global reset failed.\n");
964 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
965
966 return -EIO;
967}
968
969#define I40E_PF_RESET_WAIT_COUNT_A0 200
970#define I40E_PF_RESET_WAIT_COUNT 200
971/**
972 * i40e_pf_reset - Reset the PF
973 * @hw: pointer to the hardware structure
974 *
975 * Assuming someone else has triggered a global reset,
976 * assure the global reset is complete and then reset the PF
977 **/
978int i40e_pf_reset(struct i40e_hw *hw)
979{
980 u32 cnt = 0;
981 u32 cnt1 = 0;
982 u32 reg = 0;
983 u32 grst_del;
984
985 /* Poll for Global Reset steady state in case of recent GRST.
986 * The grst delay value is in 100ms units, and we'll wait a
987 * couple counts longer to be sure we don't just miss the end.
988 */
989 grst_del = FIELD_GET(I40E_GLGEN_RSTCTL_GRSTDEL_MASK,
990 rd32(hw, I40E_GLGEN_RSTCTL));
991
992 /* It can take upto 15 secs for GRST steady state.
993 * Bump it to 16 secs max to be safe.
994 */
995 grst_del = grst_del * 20;
996
997 for (cnt = 0; cnt < grst_del; cnt++) {
998 reg = rd32(hw, I40E_GLGEN_RSTAT);
999 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1000 break;
1001 msleep(100);
1002 }
1003 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1004 hw_dbg(hw, "Global reset polling failed to complete.\n");
1005 return -EIO;
1006 }
1007
1008 /* Now Wait for the FW to be ready */
1009 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1010 reg = rd32(hw, I40E_GLNVM_ULD);
1011 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1012 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1013 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1014 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1015 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1016 break;
1017 }
1018 usleep_range(10000, 20000);
1019 }
1020 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1021 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1022 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1023 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1024 return -EIO;
1025 }
1026
1027 /* If there was a Global Reset in progress when we got here,
1028 * we don't need to do the PF Reset
1029 */
1030 if (!cnt) {
1031 u32 reg2 = 0;
1032 if (hw->revision_id == 0)
1033 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1034 else
1035 cnt = I40E_PF_RESET_WAIT_COUNT;
1036 reg = rd32(hw, I40E_PFGEN_CTRL);
1037 wr32(hw, I40E_PFGEN_CTRL,
1038 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1039 for (; cnt; cnt--) {
1040 reg = rd32(hw, I40E_PFGEN_CTRL);
1041 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1042 break;
1043 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1044 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1045 break;
1046 usleep_range(1000, 2000);
1047 }
1048 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1049 if (i40e_poll_globr(hw, grst_del))
1050 return -EIO;
1051 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1052 hw_dbg(hw, "PF reset polling failed to complete.\n");
1053 return -EIO;
1054 }
1055 }
1056
1057 i40e_clear_pxe_mode(hw);
1058
1059 return 0;
1060}
1061
1062/**
1063 * i40e_clear_hw - clear out any left over hw state
1064 * @hw: pointer to the hw struct
1065 *
1066 * Clear queues and interrupts, typically called at init time,
1067 * but after the capabilities have been found so we know how many
1068 * queues and msix vectors have been allocated.
1069 **/
1070void i40e_clear_hw(struct i40e_hw *hw)
1071{
1072 u32 num_queues, base_queue;
1073 u32 num_pf_int;
1074 u32 num_vf_int;
1075 u32 num_vfs;
1076 u32 i, j;
1077 u32 val;
1078 u32 eol = 0x7ff;
1079
1080 /* get number of interrupts, queues, and VFs */
1081 val = rd32(hw, I40E_GLPCI_CNF2);
1082 num_pf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_PF_N_MASK, val);
1083 num_vf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_VF_N_MASK, val);
1084
1085 val = rd32(hw, I40E_PFLAN_QALLOC);
1086 base_queue = FIELD_GET(I40E_PFLAN_QALLOC_FIRSTQ_MASK, val);
1087 j = FIELD_GET(I40E_PFLAN_QALLOC_LASTQ_MASK, val);
1088 if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
1089 num_queues = (j - base_queue) + 1;
1090 else
1091 num_queues = 0;
1092
1093 val = rd32(hw, I40E_PF_VT_PFALLOC);
1094 i = FIELD_GET(I40E_PF_VT_PFALLOC_FIRSTVF_MASK, val);
1095 j = FIELD_GET(I40E_PF_VT_PFALLOC_LASTVF_MASK, val);
1096 if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
1097 num_vfs = (j - i) + 1;
1098 else
1099 num_vfs = 0;
1100
1101 /* stop all the interrupts */
1102 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1103 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1104 for (i = 0; i < num_pf_int - 2; i++)
1105 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1106
1107 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1108 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1109 wr32(hw, I40E_PFINT_LNKLST0, val);
1110 for (i = 0; i < num_pf_int - 2; i++)
1111 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1112 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1113 for (i = 0; i < num_vfs; i++)
1114 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1115 for (i = 0; i < num_vf_int - 2; i++)
1116 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1117
1118 /* warn the HW of the coming Tx disables */
1119 for (i = 0; i < num_queues; i++) {
1120 u32 abs_queue_idx = base_queue + i;
1121 u32 reg_block = 0;
1122
1123 if (abs_queue_idx >= 128) {
1124 reg_block = abs_queue_idx / 128;
1125 abs_queue_idx %= 128;
1126 }
1127
1128 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1129 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1130 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1131 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1132
1133 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1134 }
1135 udelay(400);
1136
1137 /* stop all the queues */
1138 for (i = 0; i < num_queues; i++) {
1139 wr32(hw, I40E_QINT_TQCTL(i), 0);
1140 wr32(hw, I40E_QTX_ENA(i), 0);
1141 wr32(hw, I40E_QINT_RQCTL(i), 0);
1142 wr32(hw, I40E_QRX_ENA(i), 0);
1143 }
1144
1145 /* short wait for all queue disables to settle */
1146 udelay(50);
1147}
1148
1149/**
1150 * i40e_clear_pxe_mode - clear pxe operations mode
1151 * @hw: pointer to the hw struct
1152 *
1153 * Make sure all PXE mode settings are cleared, including things
1154 * like descriptor fetch/write-back mode.
1155 **/
1156void i40e_clear_pxe_mode(struct i40e_hw *hw)
1157{
1158 u32 reg;
1159
1160 if (i40e_check_asq_alive(hw))
1161 i40e_aq_clear_pxe_mode(hw, NULL);
1162
1163 /* Clear single descriptor fetch/write-back mode */
1164 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1165
1166 if (hw->revision_id == 0) {
1167 /* As a work around clear PXE_MODE instead of setting it */
1168 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1169 } else {
1170 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1171 }
1172}
1173
1174/**
1175 * i40e_led_is_mine - helper to find matching led
1176 * @hw: pointer to the hw struct
1177 * @idx: index into GPIO registers
1178 *
1179 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1180 */
1181static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1182{
1183 u32 gpio_val = 0;
1184 u32 port;
1185
1186 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1187 !hw->func_caps.led[idx])
1188 return 0;
1189 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1190 port = FIELD_GET(I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK, gpio_val);
1191
1192 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1193 * if it is not our port then ignore
1194 */
1195 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1196 (port != hw->port))
1197 return 0;
1198
1199 return gpio_val;
1200}
1201
1202#define I40E_FW_LED BIT(4)
1203#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1204 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1205
1206#define I40E_LED0 22
1207
1208#define I40E_PIN_FUNC_SDP 0x0
1209#define I40E_PIN_FUNC_LED 0x1
1210
1211/**
1212 * i40e_led_get - return current on/off mode
1213 * @hw: pointer to the hw struct
1214 *
1215 * The value returned is the 'mode' field as defined in the
1216 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1217 * values are variations of possible behaviors relating to
1218 * blink, link, and wire.
1219 **/
1220u32 i40e_led_get(struct i40e_hw *hw)
1221{
1222 u32 mode = 0;
1223 int i;
1224
1225 /* as per the documentation GPIO 22-29 are the LED
1226 * GPIO pins named LED0..LED7
1227 */
1228 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1229 u32 gpio_val = i40e_led_is_mine(hw, i);
1230
1231 if (!gpio_val)
1232 continue;
1233
1234 mode = FIELD_GET(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, gpio_val);
1235 break;
1236 }
1237
1238 return mode;
1239}
1240
1241/**
1242 * i40e_led_set - set new on/off mode
1243 * @hw: pointer to the hw struct
1244 * @mode: 0=off, 0xf=on (else see manual for mode details)
1245 * @blink: true if the LED should blink when on, false if steady
1246 *
1247 * if this function is used to turn on the blink it should
1248 * be used to disable the blink when restoring the original state.
1249 **/
1250void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1251{
1252 int i;
1253
1254 if (mode & ~I40E_LED_MODE_VALID) {
1255 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1256 return;
1257 }
1258
1259 /* as per the documentation GPIO 22-29 are the LED
1260 * GPIO pins named LED0..LED7
1261 */
1262 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1263 u32 gpio_val = i40e_led_is_mine(hw, i);
1264
1265 if (!gpio_val)
1266 continue;
1267
1268 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1269 u32 pin_func = 0;
1270
1271 if (mode & I40E_FW_LED)
1272 pin_func = I40E_PIN_FUNC_SDP;
1273 else
1274 pin_func = I40E_PIN_FUNC_LED;
1275
1276 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1277 gpio_val |=
1278 FIELD_PREP(I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK,
1279 pin_func);
1280 }
1281 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1282 /* this & is a bit of paranoia, but serves as a range check */
1283 gpio_val |= FIELD_PREP(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK,
1284 mode);
1285
1286 if (blink)
1287 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1288 else
1289 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1290
1291 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1292 break;
1293 }
1294}
1295
1296/* Admin command wrappers */
1297
1298/**
1299 * i40e_aq_get_phy_capabilities
1300 * @hw: pointer to the hw struct
1301 * @abilities: structure for PHY capabilities to be filled
1302 * @qualified_modules: report Qualified Modules
1303 * @report_init: report init capabilities (active are default)
1304 * @cmd_details: pointer to command details structure or NULL
1305 *
1306 * Returns the various PHY abilities supported on the Port.
1307 **/
1308int
1309i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1310 bool qualified_modules, bool report_init,
1311 struct i40e_aq_get_phy_abilities_resp *abilities,
1312 struct i40e_asq_cmd_details *cmd_details)
1313{
1314 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1315 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1316 struct i40e_aq_desc desc;
1317 int status;
1318
1319 if (!abilities)
1320 return -EINVAL;
1321
1322 do {
1323 i40e_fill_default_direct_cmd_desc(&desc,
1324 i40e_aqc_opc_get_phy_abilities);
1325
1326 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1327 if (abilities_size > I40E_AQ_LARGE_BUF)
1328 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1329
1330 if (qualified_modules)
1331 desc.params.external.param0 |=
1332 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1333
1334 if (report_init)
1335 desc.params.external.param0 |=
1336 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1337
1338 status = i40e_asq_send_command(hw, &desc, abilities,
1339 abilities_size, cmd_details);
1340
1341 switch (hw->aq.asq_last_status) {
1342 case I40E_AQ_RC_EIO:
1343 status = -EIO;
1344 break;
1345 case I40E_AQ_RC_EAGAIN:
1346 usleep_range(1000, 2000);
1347 total_delay++;
1348 status = -EIO;
1349 break;
1350 /* also covers I40E_AQ_RC_OK */
1351 default:
1352 break;
1353 }
1354
1355 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1356 (total_delay < max_delay));
1357
1358 if (status)
1359 return status;
1360
1361 if (report_init) {
1362 if (hw->mac.type == I40E_MAC_XL710 &&
1363 i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
1364 I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
1365 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1366 } else {
1367 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1368 hw->phy.phy_types |=
1369 ((u64)abilities->phy_type_ext << 32);
1370 }
1371 }
1372
1373 return status;
1374}
1375
1376/**
1377 * i40e_aq_set_phy_config
1378 * @hw: pointer to the hw struct
1379 * @config: structure with PHY configuration to be set
1380 * @cmd_details: pointer to command details structure or NULL
1381 *
1382 * Set the various PHY configuration parameters
1383 * supported on the Port.One or more of the Set PHY config parameters may be
1384 * ignored in an MFP mode as the PF may not have the privilege to set some
1385 * of the PHY Config parameters. This status will be indicated by the
1386 * command response.
1387 **/
1388int i40e_aq_set_phy_config(struct i40e_hw *hw,
1389 struct i40e_aq_set_phy_config *config,
1390 struct i40e_asq_cmd_details *cmd_details)
1391{
1392 struct i40e_aq_desc desc;
1393 struct i40e_aq_set_phy_config *cmd =
1394 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1395 int status;
1396
1397 if (!config)
1398 return -EINVAL;
1399
1400 i40e_fill_default_direct_cmd_desc(&desc,
1401 i40e_aqc_opc_set_phy_config);
1402
1403 *cmd = *config;
1404
1405 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1406
1407 return status;
1408}
1409
1410static noinline_for_stack int
1411i40e_set_fc_status(struct i40e_hw *hw,
1412 struct i40e_aq_get_phy_abilities_resp *abilities,
1413 bool atomic_restart)
1414{
1415 struct i40e_aq_set_phy_config config;
1416 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1417 u8 pause_mask = 0x0;
1418
1419 switch (fc_mode) {
1420 case I40E_FC_FULL:
1421 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1422 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1423 break;
1424 case I40E_FC_RX_PAUSE:
1425 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1426 break;
1427 case I40E_FC_TX_PAUSE:
1428 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1429 break;
1430 default:
1431 break;
1432 }
1433
1434 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1435 /* clear the old pause settings */
1436 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1437 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1438 /* set the new abilities */
1439 config.abilities |= pause_mask;
1440 /* If the abilities have changed, then set the new config */
1441 if (config.abilities == abilities->abilities)
1442 return 0;
1443
1444 /* Auto restart link so settings take effect */
1445 if (atomic_restart)
1446 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1447 /* Copy over all the old settings */
1448 config.phy_type = abilities->phy_type;
1449 config.phy_type_ext = abilities->phy_type_ext;
1450 config.link_speed = abilities->link_speed;
1451 config.eee_capability = abilities->eee_capability;
1452 config.eeer = abilities->eeer_val;
1453 config.low_power_ctrl = abilities->d3_lpan;
1454 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1455 I40E_AQ_PHY_FEC_CONFIG_MASK;
1456
1457 return i40e_aq_set_phy_config(hw, &config, NULL);
1458}
1459
1460/**
1461 * i40e_set_fc
1462 * @hw: pointer to the hw struct
1463 * @aq_failures: buffer to return AdminQ failure information
1464 * @atomic_restart: whether to enable atomic link restart
1465 *
1466 * Set the requested flow control mode using set_phy_config.
1467 **/
1468int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1469 bool atomic_restart)
1470{
1471 struct i40e_aq_get_phy_abilities_resp abilities;
1472 int status;
1473
1474 *aq_failures = 0x0;
1475
1476 /* Get the current phy config */
1477 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1478 NULL);
1479 if (status) {
1480 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1481 return status;
1482 }
1483
1484 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1485 if (status)
1486 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1487
1488 /* Update the link info */
1489 status = i40e_update_link_info(hw);
1490 if (status) {
1491 /* Wait a little bit (on 40G cards it sometimes takes a really
1492 * long time for link to come back from the atomic reset)
1493 * and try once more
1494 */
1495 msleep(1000);
1496 status = i40e_update_link_info(hw);
1497 }
1498 if (status)
1499 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1500
1501 return status;
1502}
1503
1504/**
1505 * i40e_aq_clear_pxe_mode
1506 * @hw: pointer to the hw struct
1507 * @cmd_details: pointer to command details structure or NULL
1508 *
1509 * Tell the firmware that the driver is taking over from PXE
1510 **/
1511int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1512 struct i40e_asq_cmd_details *cmd_details)
1513{
1514 struct i40e_aq_desc desc;
1515 struct i40e_aqc_clear_pxe *cmd =
1516 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1517 int status;
1518
1519 i40e_fill_default_direct_cmd_desc(&desc,
1520 i40e_aqc_opc_clear_pxe_mode);
1521
1522 cmd->rx_cnt = 0x2;
1523
1524 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1525
1526 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1527
1528 return status;
1529}
1530
1531/**
1532 * i40e_aq_set_link_restart_an
1533 * @hw: pointer to the hw struct
1534 * @enable_link: if true: enable link, if false: disable link
1535 * @cmd_details: pointer to command details structure or NULL
1536 *
1537 * Sets up the link and restarts the Auto-Negotiation over the link.
1538 **/
1539int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1540 bool enable_link,
1541 struct i40e_asq_cmd_details *cmd_details)
1542{
1543 struct i40e_aq_desc desc;
1544 struct i40e_aqc_set_link_restart_an *cmd =
1545 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1546 int status;
1547
1548 i40e_fill_default_direct_cmd_desc(&desc,
1549 i40e_aqc_opc_set_link_restart_an);
1550
1551 cmd->command = I40E_AQ_PHY_RESTART_AN;
1552 if (enable_link)
1553 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1554 else
1555 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1556
1557 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1558
1559 return status;
1560}
1561
1562/**
1563 * i40e_aq_get_link_info
1564 * @hw: pointer to the hw struct
1565 * @enable_lse: enable/disable LinkStatusEvent reporting
1566 * @link: pointer to link status structure - optional
1567 * @cmd_details: pointer to command details structure or NULL
1568 *
1569 * Returns the link status of the adapter.
1570 **/
1571int i40e_aq_get_link_info(struct i40e_hw *hw,
1572 bool enable_lse, struct i40e_link_status *link,
1573 struct i40e_asq_cmd_details *cmd_details)
1574{
1575 struct i40e_aq_desc desc;
1576 struct i40e_aqc_get_link_status *resp =
1577 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1578 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1579 bool tx_pause, rx_pause;
1580 u16 command_flags;
1581 int status;
1582
1583 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1584
1585 if (enable_lse)
1586 command_flags = I40E_AQ_LSE_ENABLE;
1587 else
1588 command_flags = I40E_AQ_LSE_DISABLE;
1589 resp->command_flags = cpu_to_le16(command_flags);
1590
1591 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1592
1593 if (status)
1594 goto aq_get_link_info_exit;
1595
1596 /* save off old link status information */
1597 hw->phy.link_info_old = *hw_link_info;
1598
1599 /* update link status */
1600 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1601 hw->phy.media_type = i40e_get_media_type(hw);
1602 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1603 hw_link_info->link_info = resp->link_info;
1604 hw_link_info->an_info = resp->an_info;
1605 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1606 I40E_AQ_CONFIG_FEC_RS_ENA);
1607 hw_link_info->ext_info = resp->ext_info;
1608 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1609 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1610 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1611
1612 /* update fc info */
1613 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1614 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1615 if (tx_pause & rx_pause)
1616 hw->fc.current_mode = I40E_FC_FULL;
1617 else if (tx_pause)
1618 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1619 else if (rx_pause)
1620 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1621 else
1622 hw->fc.current_mode = I40E_FC_NONE;
1623
1624 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1625 hw_link_info->crc_enable = true;
1626 else
1627 hw_link_info->crc_enable = false;
1628
1629 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1630 hw_link_info->lse_enable = true;
1631 else
1632 hw_link_info->lse_enable = false;
1633
1634 if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) &&
1635 hw_link_info->phy_type == 0xE)
1636 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1637
1638 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) &&
1639 hw->mac.type != I40E_MAC_X722) {
1640 __le32 tmp;
1641
1642 memcpy(&tmp, resp->link_type, sizeof(tmp));
1643 hw->phy.phy_types = le32_to_cpu(tmp);
1644 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1645 }
1646
1647 /* save link status information */
1648 if (link)
1649 *link = *hw_link_info;
1650
1651 /* flag cleared so helper functions don't call AQ again */
1652 hw->phy.get_link_info = false;
1653
1654aq_get_link_info_exit:
1655 return status;
1656}
1657
1658/**
1659 * i40e_aq_set_phy_int_mask
1660 * @hw: pointer to the hw struct
1661 * @mask: interrupt mask to be set
1662 * @cmd_details: pointer to command details structure or NULL
1663 *
1664 * Set link interrupt mask.
1665 **/
1666int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1667 u16 mask,
1668 struct i40e_asq_cmd_details *cmd_details)
1669{
1670 struct i40e_aq_desc desc;
1671 struct i40e_aqc_set_phy_int_mask *cmd =
1672 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1673 int status;
1674
1675 i40e_fill_default_direct_cmd_desc(&desc,
1676 i40e_aqc_opc_set_phy_int_mask);
1677
1678 cmd->event_mask = cpu_to_le16(mask);
1679
1680 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1681
1682 return status;
1683}
1684
1685/**
1686 * i40e_aq_set_mac_loopback
1687 * @hw: pointer to the HW struct
1688 * @ena_lpbk: Enable or Disable loopback
1689 * @cmd_details: pointer to command details structure or NULL
1690 *
1691 * Enable/disable loopback on a given port
1692 */
1693int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
1694 struct i40e_asq_cmd_details *cmd_details)
1695{
1696 struct i40e_aq_desc desc;
1697 struct i40e_aqc_set_lb_mode *cmd =
1698 (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
1699
1700 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
1701 if (ena_lpbk) {
1702 if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
1703 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
1704 else
1705 cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL);
1706 }
1707
1708 return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1709}
1710
1711/**
1712 * i40e_aq_set_phy_debug
1713 * @hw: pointer to the hw struct
1714 * @cmd_flags: debug command flags
1715 * @cmd_details: pointer to command details structure or NULL
1716 *
1717 * Reset the external PHY.
1718 **/
1719int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1720 struct i40e_asq_cmd_details *cmd_details)
1721{
1722 struct i40e_aq_desc desc;
1723 struct i40e_aqc_set_phy_debug *cmd =
1724 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1725 int status;
1726
1727 i40e_fill_default_direct_cmd_desc(&desc,
1728 i40e_aqc_opc_set_phy_debug);
1729
1730 cmd->command_flags = cmd_flags;
1731
1732 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1733
1734 return status;
1735}
1736
1737/**
1738 * i40e_aq_add_vsi
1739 * @hw: pointer to the hw struct
1740 * @vsi_ctx: pointer to a vsi context struct
1741 * @cmd_details: pointer to command details structure or NULL
1742 *
1743 * Add a VSI context to the hardware.
1744**/
1745int i40e_aq_add_vsi(struct i40e_hw *hw,
1746 struct i40e_vsi_context *vsi_ctx,
1747 struct i40e_asq_cmd_details *cmd_details)
1748{
1749 struct i40e_aq_desc desc;
1750 struct i40e_aqc_add_get_update_vsi *cmd =
1751 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1752 struct i40e_aqc_add_get_update_vsi_completion *resp =
1753 (struct i40e_aqc_add_get_update_vsi_completion *)
1754 &desc.params.raw;
1755 int status;
1756
1757 i40e_fill_default_direct_cmd_desc(&desc,
1758 i40e_aqc_opc_add_vsi);
1759
1760 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1761 cmd->connection_type = vsi_ctx->connection_type;
1762 cmd->vf_id = vsi_ctx->vf_num;
1763 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1764
1765 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1766
1767 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
1768 sizeof(vsi_ctx->info),
1769 cmd_details, true);
1770
1771 if (status)
1772 goto aq_add_vsi_exit;
1773
1774 vsi_ctx->seid = le16_to_cpu(resp->seid);
1775 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1776 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1777 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1778
1779aq_add_vsi_exit:
1780 return status;
1781}
1782
1783/**
1784 * i40e_aq_set_default_vsi
1785 * @hw: pointer to the hw struct
1786 * @seid: vsi number
1787 * @cmd_details: pointer to command details structure or NULL
1788 **/
1789int i40e_aq_set_default_vsi(struct i40e_hw *hw,
1790 u16 seid,
1791 struct i40e_asq_cmd_details *cmd_details)
1792{
1793 struct i40e_aq_desc desc;
1794 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1795 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1796 &desc.params.raw;
1797 int status;
1798
1799 i40e_fill_default_direct_cmd_desc(&desc,
1800 i40e_aqc_opc_set_vsi_promiscuous_modes);
1801
1802 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1803 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1804 cmd->seid = cpu_to_le16(seid);
1805
1806 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1807
1808 return status;
1809}
1810
1811/**
1812 * i40e_aq_clear_default_vsi
1813 * @hw: pointer to the hw struct
1814 * @seid: vsi number
1815 * @cmd_details: pointer to command details structure or NULL
1816 **/
1817int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1818 u16 seid,
1819 struct i40e_asq_cmd_details *cmd_details)
1820{
1821 struct i40e_aq_desc desc;
1822 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1823 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1824 &desc.params.raw;
1825 int status;
1826
1827 i40e_fill_default_direct_cmd_desc(&desc,
1828 i40e_aqc_opc_set_vsi_promiscuous_modes);
1829
1830 cmd->promiscuous_flags = cpu_to_le16(0);
1831 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1832 cmd->seid = cpu_to_le16(seid);
1833
1834 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1835
1836 return status;
1837}
1838
1839/**
1840 * i40e_aq_set_vsi_unicast_promiscuous
1841 * @hw: pointer to the hw struct
1842 * @seid: vsi number
1843 * @set: set unicast promiscuous enable/disable
1844 * @cmd_details: pointer to command details structure or NULL
1845 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
1846 **/
1847int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1848 u16 seid, bool set,
1849 struct i40e_asq_cmd_details *cmd_details,
1850 bool rx_only_promisc)
1851{
1852 struct i40e_aq_desc desc;
1853 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1854 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1855 u16 flags = 0;
1856 int status;
1857
1858 i40e_fill_default_direct_cmd_desc(&desc,
1859 i40e_aqc_opc_set_vsi_promiscuous_modes);
1860
1861 if (set) {
1862 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1863 if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
1864 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1865 }
1866
1867 cmd->promiscuous_flags = cpu_to_le16(flags);
1868
1869 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1870 if (i40e_is_aq_api_ver_ge(hw, 1, 5))
1871 cmd->valid_flags |=
1872 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1873
1874 cmd->seid = cpu_to_le16(seid);
1875 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1876
1877 return status;
1878}
1879
1880/**
1881 * i40e_aq_set_vsi_multicast_promiscuous
1882 * @hw: pointer to the hw struct
1883 * @seid: vsi number
1884 * @set: set multicast promiscuous enable/disable
1885 * @cmd_details: pointer to command details structure or NULL
1886 **/
1887int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
1888 u16 seid, bool set,
1889 struct i40e_asq_cmd_details *cmd_details)
1890{
1891 struct i40e_aq_desc desc;
1892 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1893 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1894 u16 flags = 0;
1895 int status;
1896
1897 i40e_fill_default_direct_cmd_desc(&desc,
1898 i40e_aqc_opc_set_vsi_promiscuous_modes);
1899
1900 if (set)
1901 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1902
1903 cmd->promiscuous_flags = cpu_to_le16(flags);
1904
1905 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1906
1907 cmd->seid = cpu_to_le16(seid);
1908 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1909
1910 return status;
1911}
1912
1913/**
1914 * i40e_aq_set_vsi_mc_promisc_on_vlan
1915 * @hw: pointer to the hw struct
1916 * @seid: vsi number
1917 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1918 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
1919 * @cmd_details: pointer to command details structure or NULL
1920 **/
1921int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
1922 u16 seid, bool enable,
1923 u16 vid,
1924 struct i40e_asq_cmd_details *cmd_details)
1925{
1926 struct i40e_aq_desc desc;
1927 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1928 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1929 u16 flags = 0;
1930 int status;
1931
1932 i40e_fill_default_direct_cmd_desc(&desc,
1933 i40e_aqc_opc_set_vsi_promiscuous_modes);
1934
1935 if (enable)
1936 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
1937
1938 cmd->promiscuous_flags = cpu_to_le16(flags);
1939 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
1940 cmd->seid = cpu_to_le16(seid);
1941 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1942
1943 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
1944 cmd_details, true);
1945
1946 return status;
1947}
1948
1949/**
1950 * i40e_aq_set_vsi_uc_promisc_on_vlan
1951 * @hw: pointer to the hw struct
1952 * @seid: vsi number
1953 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
1954 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
1955 * @cmd_details: pointer to command details structure or NULL
1956 **/
1957int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
1958 u16 seid, bool enable,
1959 u16 vid,
1960 struct i40e_asq_cmd_details *cmd_details)
1961{
1962 struct i40e_aq_desc desc;
1963 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1964 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1965 u16 flags = 0;
1966 int status;
1967
1968 i40e_fill_default_direct_cmd_desc(&desc,
1969 i40e_aqc_opc_set_vsi_promiscuous_modes);
1970
1971 if (enable) {
1972 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1973 if (i40e_is_aq_api_ver_ge(hw, 1, 5))
1974 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1975 }
1976
1977 cmd->promiscuous_flags = cpu_to_le16(flags);
1978 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1979 if (i40e_is_aq_api_ver_ge(hw, 1, 5))
1980 cmd->valid_flags |=
1981 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1982 cmd->seid = cpu_to_le16(seid);
1983 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1984
1985 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
1986 cmd_details, true);
1987
1988 return status;
1989}
1990
1991/**
1992 * i40e_aq_set_vsi_bc_promisc_on_vlan
1993 * @hw: pointer to the hw struct
1994 * @seid: vsi number
1995 * @enable: set broadcast promiscuous enable/disable for a given VLAN
1996 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
1997 * @cmd_details: pointer to command details structure or NULL
1998 **/
1999int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2000 u16 seid, bool enable, u16 vid,
2001 struct i40e_asq_cmd_details *cmd_details)
2002{
2003 struct i40e_aq_desc desc;
2004 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2005 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2006 u16 flags = 0;
2007 int status;
2008
2009 i40e_fill_default_direct_cmd_desc(&desc,
2010 i40e_aqc_opc_set_vsi_promiscuous_modes);
2011
2012 if (enable)
2013 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2014
2015 cmd->promiscuous_flags = cpu_to_le16(flags);
2016 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2017 cmd->seid = cpu_to_le16(seid);
2018 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2019
2020 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2021
2022 return status;
2023}
2024
2025/**
2026 * i40e_aq_set_vsi_broadcast
2027 * @hw: pointer to the hw struct
2028 * @seid: vsi number
2029 * @set_filter: true to set filter, false to clear filter
2030 * @cmd_details: pointer to command details structure or NULL
2031 *
2032 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2033 **/
2034int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2035 u16 seid, bool set_filter,
2036 struct i40e_asq_cmd_details *cmd_details)
2037{
2038 struct i40e_aq_desc desc;
2039 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2040 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2041 int status;
2042
2043 i40e_fill_default_direct_cmd_desc(&desc,
2044 i40e_aqc_opc_set_vsi_promiscuous_modes);
2045
2046 if (set_filter)
2047 cmd->promiscuous_flags
2048 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2049 else
2050 cmd->promiscuous_flags
2051 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2052
2053 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2054 cmd->seid = cpu_to_le16(seid);
2055 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2056
2057 return status;
2058}
2059
2060/**
2061 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2062 * @hw: pointer to the hw struct
2063 * @seid: vsi number
2064 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2065 * @cmd_details: pointer to command details structure or NULL
2066 **/
2067int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2068 u16 seid, bool enable,
2069 struct i40e_asq_cmd_details *cmd_details)
2070{
2071 struct i40e_aq_desc desc;
2072 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2073 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2074 u16 flags = 0;
2075 int status;
2076
2077 i40e_fill_default_direct_cmd_desc(&desc,
2078 i40e_aqc_opc_set_vsi_promiscuous_modes);
2079 if (enable)
2080 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2081
2082 cmd->promiscuous_flags = cpu_to_le16(flags);
2083 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2084 cmd->seid = cpu_to_le16(seid);
2085
2086 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2087
2088 return status;
2089}
2090
2091/**
2092 * i40e_aq_get_vsi_params - get VSI configuration info
2093 * @hw: pointer to the hw struct
2094 * @vsi_ctx: pointer to a vsi context struct
2095 * @cmd_details: pointer to command details structure or NULL
2096 **/
2097int i40e_aq_get_vsi_params(struct i40e_hw *hw,
2098 struct i40e_vsi_context *vsi_ctx,
2099 struct i40e_asq_cmd_details *cmd_details)
2100{
2101 struct i40e_aq_desc desc;
2102 struct i40e_aqc_add_get_update_vsi *cmd =
2103 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2104 struct i40e_aqc_add_get_update_vsi_completion *resp =
2105 (struct i40e_aqc_add_get_update_vsi_completion *)
2106 &desc.params.raw;
2107 int status;
2108
2109 i40e_fill_default_direct_cmd_desc(&desc,
2110 i40e_aqc_opc_get_vsi_parameters);
2111
2112 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2113
2114 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2115
2116 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2117 sizeof(vsi_ctx->info), NULL);
2118
2119 if (status)
2120 goto aq_get_vsi_params_exit;
2121
2122 vsi_ctx->seid = le16_to_cpu(resp->seid);
2123 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2124 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2125 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2126
2127aq_get_vsi_params_exit:
2128 return status;
2129}
2130
2131/**
2132 * i40e_aq_update_vsi_params
2133 * @hw: pointer to the hw struct
2134 * @vsi_ctx: pointer to a vsi context struct
2135 * @cmd_details: pointer to command details structure or NULL
2136 *
2137 * Update a VSI context.
2138 **/
2139int i40e_aq_update_vsi_params(struct i40e_hw *hw,
2140 struct i40e_vsi_context *vsi_ctx,
2141 struct i40e_asq_cmd_details *cmd_details)
2142{
2143 struct i40e_aq_desc desc;
2144 struct i40e_aqc_add_get_update_vsi *cmd =
2145 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2146 struct i40e_aqc_add_get_update_vsi_completion *resp =
2147 (struct i40e_aqc_add_get_update_vsi_completion *)
2148 &desc.params.raw;
2149 int status;
2150
2151 i40e_fill_default_direct_cmd_desc(&desc,
2152 i40e_aqc_opc_update_vsi_parameters);
2153 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2154
2155 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2156
2157 status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
2158 sizeof(vsi_ctx->info),
2159 cmd_details, true);
2160
2161 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2162 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2163
2164 return status;
2165}
2166
2167/**
2168 * i40e_aq_get_switch_config
2169 * @hw: pointer to the hardware structure
2170 * @buf: pointer to the result buffer
2171 * @buf_size: length of input buffer
2172 * @start_seid: seid to start for the report, 0 == beginning
2173 * @cmd_details: pointer to command details structure or NULL
2174 *
2175 * Fill the buf with switch configuration returned from AdminQ command
2176 **/
2177int i40e_aq_get_switch_config(struct i40e_hw *hw,
2178 struct i40e_aqc_get_switch_config_resp *buf,
2179 u16 buf_size, u16 *start_seid,
2180 struct i40e_asq_cmd_details *cmd_details)
2181{
2182 struct i40e_aq_desc desc;
2183 struct i40e_aqc_switch_seid *scfg =
2184 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2185 int status;
2186
2187 i40e_fill_default_direct_cmd_desc(&desc,
2188 i40e_aqc_opc_get_switch_config);
2189 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2190 if (buf_size > I40E_AQ_LARGE_BUF)
2191 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2192 scfg->seid = cpu_to_le16(*start_seid);
2193
2194 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2195 *start_seid = le16_to_cpu(scfg->seid);
2196
2197 return status;
2198}
2199
2200/**
2201 * i40e_aq_set_switch_config
2202 * @hw: pointer to the hardware structure
2203 * @flags: bit flag values to set
2204 * @mode: cloud filter mode
2205 * @valid_flags: which bit flags to set
2206 * @mode: cloud filter mode
2207 * @cmd_details: pointer to command details structure or NULL
2208 *
2209 * Set switch configuration bits
2210 **/
2211int i40e_aq_set_switch_config(struct i40e_hw *hw,
2212 u16 flags,
2213 u16 valid_flags, u8 mode,
2214 struct i40e_asq_cmd_details *cmd_details)
2215{
2216 struct i40e_aq_desc desc;
2217 struct i40e_aqc_set_switch_config *scfg =
2218 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2219 int status;
2220
2221 i40e_fill_default_direct_cmd_desc(&desc,
2222 i40e_aqc_opc_set_switch_config);
2223 scfg->flags = cpu_to_le16(flags);
2224 scfg->valid_flags = cpu_to_le16(valid_flags);
2225 scfg->mode = mode;
2226 if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) {
2227 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2228 scfg->first_tag = cpu_to_le16(hw->first_tag);
2229 scfg->second_tag = cpu_to_le16(hw->second_tag);
2230 }
2231 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2232
2233 return status;
2234}
2235
2236/**
2237 * i40e_aq_get_firmware_version
2238 * @hw: pointer to the hw struct
2239 * @fw_major_version: firmware major version
2240 * @fw_minor_version: firmware minor version
2241 * @fw_build: firmware build number
2242 * @api_major_version: major queue version
2243 * @api_minor_version: minor queue version
2244 * @cmd_details: pointer to command details structure or NULL
2245 *
2246 * Get the firmware version from the admin queue commands
2247 **/
2248int i40e_aq_get_firmware_version(struct i40e_hw *hw,
2249 u16 *fw_major_version, u16 *fw_minor_version,
2250 u32 *fw_build,
2251 u16 *api_major_version, u16 *api_minor_version,
2252 struct i40e_asq_cmd_details *cmd_details)
2253{
2254 struct i40e_aq_desc desc;
2255 struct i40e_aqc_get_version *resp =
2256 (struct i40e_aqc_get_version *)&desc.params.raw;
2257 int status;
2258
2259 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2260
2261 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2262
2263 if (!status) {
2264 if (fw_major_version)
2265 *fw_major_version = le16_to_cpu(resp->fw_major);
2266 if (fw_minor_version)
2267 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2268 if (fw_build)
2269 *fw_build = le32_to_cpu(resp->fw_build);
2270 if (api_major_version)
2271 *api_major_version = le16_to_cpu(resp->api_major);
2272 if (api_minor_version)
2273 *api_minor_version = le16_to_cpu(resp->api_minor);
2274 }
2275
2276 return status;
2277}
2278
2279/**
2280 * i40e_aq_send_driver_version
2281 * @hw: pointer to the hw struct
2282 * @dv: driver's major, minor version
2283 * @cmd_details: pointer to command details structure or NULL
2284 *
2285 * Send the driver version to the firmware
2286 **/
2287int i40e_aq_send_driver_version(struct i40e_hw *hw,
2288 struct i40e_driver_version *dv,
2289 struct i40e_asq_cmd_details *cmd_details)
2290{
2291 struct i40e_aq_desc desc;
2292 struct i40e_aqc_driver_version *cmd =
2293 (struct i40e_aqc_driver_version *)&desc.params.raw;
2294 int status;
2295 u16 len;
2296
2297 if (dv == NULL)
2298 return -EINVAL;
2299
2300 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2301
2302 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2303 cmd->driver_major_ver = dv->major_version;
2304 cmd->driver_minor_ver = dv->minor_version;
2305 cmd->driver_build_ver = dv->build_version;
2306 cmd->driver_subbuild_ver = dv->subbuild_version;
2307
2308 len = 0;
2309 while (len < sizeof(dv->driver_string) &&
2310 (dv->driver_string[len] < 0x80) &&
2311 dv->driver_string[len])
2312 len++;
2313 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2314 len, cmd_details);
2315
2316 return status;
2317}
2318
2319/**
2320 * i40e_get_link_status - get status of the HW network link
2321 * @hw: pointer to the hw struct
2322 * @link_up: pointer to bool (true/false = linkup/linkdown)
2323 *
2324 * Variable link_up true if link is up, false if link is down.
2325 * The variable link_up is invalid if returned value of status != 0
2326 *
2327 * Side effect: LinkStatusEvent reporting becomes enabled
2328 **/
2329int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2330{
2331 int status = 0;
2332
2333 if (hw->phy.get_link_info) {
2334 status = i40e_update_link_info(hw);
2335
2336 if (status)
2337 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2338 status);
2339 }
2340
2341 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2342
2343 return status;
2344}
2345
2346/**
2347 * i40e_update_link_info - update status of the HW network link
2348 * @hw: pointer to the hw struct
2349 **/
2350noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
2351{
2352 struct i40e_aq_get_phy_abilities_resp abilities;
2353 int status = 0;
2354
2355 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2356 if (status)
2357 return status;
2358
2359 /* extra checking needed to ensure link info to user is timely */
2360 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2361 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2362 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2363 status = i40e_aq_get_phy_capabilities(hw, false, false,
2364 &abilities, NULL);
2365 if (status)
2366 return status;
2367
2368 if (abilities.fec_cfg_curr_mod_ext_info &
2369 I40E_AQ_ENABLE_FEC_AUTO)
2370 hw->phy.link_info.req_fec_info =
2371 (I40E_AQ_REQUEST_FEC_KR |
2372 I40E_AQ_REQUEST_FEC_RS);
2373 else
2374 hw->phy.link_info.req_fec_info =
2375 abilities.fec_cfg_curr_mod_ext_info &
2376 (I40E_AQ_REQUEST_FEC_KR |
2377 I40E_AQ_REQUEST_FEC_RS);
2378
2379 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2380 sizeof(hw->phy.link_info.module_type));
2381 }
2382
2383 return status;
2384}
2385
2386/**
2387 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2388 * @hw: pointer to the hw struct
2389 * @uplink_seid: the MAC or other gizmo SEID
2390 * @downlink_seid: the VSI SEID
2391 * @enabled_tc: bitmap of TCs to be enabled
2392 * @default_port: true for default port VSI, false for control port
2393 * @veb_seid: pointer to where to put the resulting VEB SEID
2394 * @enable_stats: true to turn on VEB stats
2395 * @cmd_details: pointer to command details structure or NULL
2396 *
2397 * This asks the FW to add a VEB between the uplink and downlink
2398 * elements. If the uplink SEID is 0, this will be a floating VEB.
2399 **/
2400int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2401 u16 downlink_seid, u8 enabled_tc,
2402 bool default_port, u16 *veb_seid,
2403 bool enable_stats,
2404 struct i40e_asq_cmd_details *cmd_details)
2405{
2406 struct i40e_aq_desc desc;
2407 struct i40e_aqc_add_veb *cmd =
2408 (struct i40e_aqc_add_veb *)&desc.params.raw;
2409 struct i40e_aqc_add_veb_completion *resp =
2410 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2411 u16 veb_flags = 0;
2412 int status;
2413
2414 /* SEIDs need to either both be set or both be 0 for floating VEB */
2415 if (!!uplink_seid != !!downlink_seid)
2416 return -EINVAL;
2417
2418 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2419
2420 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2421 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2422 cmd->enable_tcs = enabled_tc;
2423 if (!uplink_seid)
2424 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2425 if (default_port)
2426 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2427 else
2428 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2429
2430 /* reverse logic here: set the bitflag to disable the stats */
2431 if (!enable_stats)
2432 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2433
2434 cmd->veb_flags = cpu_to_le16(veb_flags);
2435
2436 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2437
2438 if (!status && veb_seid)
2439 *veb_seid = le16_to_cpu(resp->veb_seid);
2440
2441 return status;
2442}
2443
2444/**
2445 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2446 * @hw: pointer to the hw struct
2447 * @veb_seid: the SEID of the VEB to query
2448 * @switch_id: the uplink switch id
2449 * @floating: set to true if the VEB is floating
2450 * @statistic_index: index of the stats counter block for this VEB
2451 * @vebs_used: number of VEB's used by function
2452 * @vebs_free: total VEB's not reserved by any function
2453 * @cmd_details: pointer to command details structure or NULL
2454 *
2455 * This retrieves the parameters for a particular VEB, specified by
2456 * uplink_seid, and returns them to the caller.
2457 **/
2458int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2459 u16 veb_seid, u16 *switch_id,
2460 bool *floating, u16 *statistic_index,
2461 u16 *vebs_used, u16 *vebs_free,
2462 struct i40e_asq_cmd_details *cmd_details)
2463{
2464 struct i40e_aq_desc desc;
2465 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2466 (struct i40e_aqc_get_veb_parameters_completion *)
2467 &desc.params.raw;
2468 int status;
2469
2470 if (veb_seid == 0)
2471 return -EINVAL;
2472
2473 i40e_fill_default_direct_cmd_desc(&desc,
2474 i40e_aqc_opc_get_veb_parameters);
2475 cmd_resp->seid = cpu_to_le16(veb_seid);
2476
2477 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2478 if (status)
2479 goto get_veb_exit;
2480
2481 if (switch_id)
2482 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2483 if (statistic_index)
2484 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2485 if (vebs_used)
2486 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2487 if (vebs_free)
2488 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2489 if (floating) {
2490 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2491
2492 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2493 *floating = true;
2494 else
2495 *floating = false;
2496 }
2497
2498get_veb_exit:
2499 return status;
2500}
2501
2502/**
2503 * i40e_prepare_add_macvlan
2504 * @mv_list: list of macvlans to be added
2505 * @desc: pointer to AQ descriptor structure
2506 * @count: length of the list
2507 * @seid: VSI for the mac address
2508 *
2509 * Internal helper function that prepares the add macvlan request
2510 * and returns the buffer size.
2511 **/
2512static u16
2513i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
2514 struct i40e_aq_desc *desc, u16 count, u16 seid)
2515{
2516 struct i40e_aqc_macvlan *cmd =
2517 (struct i40e_aqc_macvlan *)&desc->params.raw;
2518 u16 buf_size;
2519 int i;
2520
2521 buf_size = count * sizeof(*mv_list);
2522
2523 /* prep the rest of the request */
2524 i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
2525 cmd->num_addresses = cpu_to_le16(count);
2526 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2527 cmd->seid[1] = 0;
2528 cmd->seid[2] = 0;
2529
2530 for (i = 0; i < count; i++)
2531 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2532 mv_list[i].flags |=
2533 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2534
2535 desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2536 if (buf_size > I40E_AQ_LARGE_BUF)
2537 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2538
2539 return buf_size;
2540}
2541
2542/**
2543 * i40e_aq_add_macvlan
2544 * @hw: pointer to the hw struct
2545 * @seid: VSI for the mac address
2546 * @mv_list: list of macvlans to be added
2547 * @count: length of the list
2548 * @cmd_details: pointer to command details structure or NULL
2549 *
2550 * Add MAC/VLAN addresses to the HW filtering
2551 **/
2552int
2553i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2554 struct i40e_aqc_add_macvlan_element_data *mv_list,
2555 u16 count, struct i40e_asq_cmd_details *cmd_details)
2556{
2557 struct i40e_aq_desc desc;
2558 u16 buf_size;
2559
2560 if (count == 0 || !mv_list || !hw)
2561 return -EINVAL;
2562
2563 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2564
2565 return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2566 cmd_details, true);
2567}
2568
2569/**
2570 * i40e_aq_add_macvlan_v2
2571 * @hw: pointer to the hw struct
2572 * @seid: VSI for the mac address
2573 * @mv_list: list of macvlans to be added
2574 * @count: length of the list
2575 * @cmd_details: pointer to command details structure or NULL
2576 * @aq_status: pointer to Admin Queue status return value
2577 *
2578 * Add MAC/VLAN addresses to the HW filtering.
2579 * The _v2 version returns the last Admin Queue status in aq_status
2580 * to avoid race conditions in access to hw->aq.asq_last_status.
2581 * It also calls _v2 versions of asq_send_command functions to
2582 * get the aq_status on the stack.
2583 **/
2584int
2585i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
2586 struct i40e_aqc_add_macvlan_element_data *mv_list,
2587 u16 count, struct i40e_asq_cmd_details *cmd_details,
2588 enum i40e_admin_queue_err *aq_status)
2589{
2590 struct i40e_aq_desc desc;
2591 u16 buf_size;
2592
2593 if (count == 0 || !mv_list || !hw)
2594 return -EINVAL;
2595
2596 buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
2597
2598 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2599 cmd_details, true, aq_status);
2600}
2601
2602/**
2603 * i40e_aq_remove_macvlan
2604 * @hw: pointer to the hw struct
2605 * @seid: VSI for the mac address
2606 * @mv_list: list of macvlans to be removed
2607 * @count: length of the list
2608 * @cmd_details: pointer to command details structure or NULL
2609 *
2610 * Remove MAC/VLAN addresses from the HW filtering
2611 **/
2612int
2613i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2614 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2615 u16 count, struct i40e_asq_cmd_details *cmd_details)
2616{
2617 struct i40e_aq_desc desc;
2618 struct i40e_aqc_macvlan *cmd =
2619 (struct i40e_aqc_macvlan *)&desc.params.raw;
2620 u16 buf_size;
2621 int status;
2622
2623 if (count == 0 || !mv_list || !hw)
2624 return -EINVAL;
2625
2626 buf_size = count * sizeof(*mv_list);
2627
2628 /* prep the rest of the request */
2629 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2630 cmd->num_addresses = cpu_to_le16(count);
2631 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2632 cmd->seid[1] = 0;
2633 cmd->seid[2] = 0;
2634
2635 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2636 if (buf_size > I40E_AQ_LARGE_BUF)
2637 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2638
2639 status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
2640 cmd_details, true);
2641
2642 return status;
2643}
2644
2645/**
2646 * i40e_aq_remove_macvlan_v2
2647 * @hw: pointer to the hw struct
2648 * @seid: VSI for the mac address
2649 * @mv_list: list of macvlans to be removed
2650 * @count: length of the list
2651 * @cmd_details: pointer to command details structure or NULL
2652 * @aq_status: pointer to Admin Queue status return value
2653 *
2654 * Remove MAC/VLAN addresses from the HW filtering.
2655 * The _v2 version returns the last Admin Queue status in aq_status
2656 * to avoid race conditions in access to hw->aq.asq_last_status.
2657 * It also calls _v2 versions of asq_send_command functions to
2658 * get the aq_status on the stack.
2659 **/
2660int
2661i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
2662 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2663 u16 count, struct i40e_asq_cmd_details *cmd_details,
2664 enum i40e_admin_queue_err *aq_status)
2665{
2666 struct i40e_aqc_macvlan *cmd;
2667 struct i40e_aq_desc desc;
2668 u16 buf_size;
2669
2670 if (count == 0 || !mv_list || !hw)
2671 return -EINVAL;
2672
2673 buf_size = count * sizeof(*mv_list);
2674
2675 /* prep the rest of the request */
2676 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2677 cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
2678 cmd->num_addresses = cpu_to_le16(count);
2679 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2680 cmd->seid[1] = 0;
2681 cmd->seid[2] = 0;
2682
2683 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2684 if (buf_size > I40E_AQ_LARGE_BUF)
2685 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2686
2687 return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
2688 cmd_details, true, aq_status);
2689}
2690
2691/**
2692 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2693 * @hw: pointer to the hw struct
2694 * @opcode: AQ opcode for add or delete mirror rule
2695 * @sw_seid: Switch SEID (to which rule refers)
2696 * @rule_type: Rule Type (ingress/egress/VLAN)
2697 * @id: Destination VSI SEID or Rule ID
2698 * @count: length of the list
2699 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2700 * @cmd_details: pointer to command details structure or NULL
2701 * @rule_id: Rule ID returned from FW
2702 * @rules_used: Number of rules used in internal switch
2703 * @rules_free: Number of rules free in internal switch
2704 *
2705 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2706 * VEBs/VEPA elements only
2707 **/
2708static int i40e_mirrorrule_op(struct i40e_hw *hw,
2709 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2710 u16 count, __le16 *mr_list,
2711 struct i40e_asq_cmd_details *cmd_details,
2712 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2713{
2714 struct i40e_aq_desc desc;
2715 struct i40e_aqc_add_delete_mirror_rule *cmd =
2716 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2717 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2718 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2719 u16 buf_size;
2720 int status;
2721
2722 buf_size = count * sizeof(*mr_list);
2723
2724 /* prep the rest of the request */
2725 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2726 cmd->seid = cpu_to_le16(sw_seid);
2727 cmd->rule_type = cpu_to_le16(rule_type &
2728 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2729 cmd->num_entries = cpu_to_le16(count);
2730 /* Dest VSI for add, rule_id for delete */
2731 cmd->destination = cpu_to_le16(id);
2732 if (mr_list) {
2733 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2734 I40E_AQ_FLAG_RD));
2735 if (buf_size > I40E_AQ_LARGE_BUF)
2736 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2737 }
2738
2739 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2740 cmd_details);
2741 if (!status ||
2742 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2743 if (rule_id)
2744 *rule_id = le16_to_cpu(resp->rule_id);
2745 if (rules_used)
2746 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2747 if (rules_free)
2748 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2749 }
2750 return status;
2751}
2752
2753/**
2754 * i40e_aq_add_mirrorrule - add a mirror rule
2755 * @hw: pointer to the hw struct
2756 * @sw_seid: Switch SEID (to which rule refers)
2757 * @rule_type: Rule Type (ingress/egress/VLAN)
2758 * @dest_vsi: SEID of VSI to which packets will be mirrored
2759 * @count: length of the list
2760 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2761 * @cmd_details: pointer to command details structure or NULL
2762 * @rule_id: Rule ID returned from FW
2763 * @rules_used: Number of rules used in internal switch
2764 * @rules_free: Number of rules free in internal switch
2765 *
2766 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2767 **/
2768int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2769 u16 rule_type, u16 dest_vsi, u16 count,
2770 __le16 *mr_list,
2771 struct i40e_asq_cmd_details *cmd_details,
2772 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2773{
2774 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2775 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2776 if (count == 0 || !mr_list)
2777 return -EINVAL;
2778 }
2779
2780 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2781 rule_type, dest_vsi, count, mr_list,
2782 cmd_details, rule_id, rules_used, rules_free);
2783}
2784
2785/**
2786 * i40e_aq_delete_mirrorrule - delete a mirror rule
2787 * @hw: pointer to the hw struct
2788 * @sw_seid: Switch SEID (to which rule refers)
2789 * @rule_type: Rule Type (ingress/egress/VLAN)
2790 * @count: length of the list
2791 * @rule_id: Rule ID that is returned in the receive desc as part of
2792 * add_mirrorrule.
2793 * @mr_list: list of mirrored VLAN IDs to be removed
2794 * @cmd_details: pointer to command details structure or NULL
2795 * @rules_used: Number of rules used in internal switch
2796 * @rules_free: Number of rules free in internal switch
2797 *
2798 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2799 **/
2800int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2801 u16 rule_type, u16 rule_id, u16 count,
2802 __le16 *mr_list,
2803 struct i40e_asq_cmd_details *cmd_details,
2804 u16 *rules_used, u16 *rules_free)
2805{
2806 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2807 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2808 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2809 * mirroring. For other rule_type, count and rule_type should
2810 * not matter.
2811 */
2812 if (count == 0 || !mr_list)
2813 return -EINVAL;
2814 }
2815
2816 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2817 rule_type, rule_id, count, mr_list,
2818 cmd_details, NULL, rules_used, rules_free);
2819}
2820
2821/**
2822 * i40e_aq_send_msg_to_vf
2823 * @hw: pointer to the hardware structure
2824 * @vfid: VF id to send msg
2825 * @v_opcode: opcodes for VF-PF communication
2826 * @v_retval: return error code
2827 * @msg: pointer to the msg buffer
2828 * @msglen: msg length
2829 * @cmd_details: pointer to command details
2830 *
2831 * send msg to vf
2832 **/
2833int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2834 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2835 struct i40e_asq_cmd_details *cmd_details)
2836{
2837 struct i40e_aq_desc desc;
2838 struct i40e_aqc_pf_vf_message *cmd =
2839 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2840 int status;
2841
2842 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2843 cmd->id = cpu_to_le32(vfid);
2844 desc.cookie_high = cpu_to_le32(v_opcode);
2845 desc.cookie_low = cpu_to_le32(v_retval);
2846 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2847 if (msglen) {
2848 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2849 I40E_AQ_FLAG_RD));
2850 if (msglen > I40E_AQ_LARGE_BUF)
2851 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2852 desc.datalen = cpu_to_le16(msglen);
2853 }
2854 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2855
2856 return status;
2857}
2858
2859/**
2860 * i40e_aq_debug_read_register
2861 * @hw: pointer to the hw struct
2862 * @reg_addr: register address
2863 * @reg_val: register value
2864 * @cmd_details: pointer to command details structure or NULL
2865 *
2866 * Read the register using the admin queue commands
2867 **/
2868int i40e_aq_debug_read_register(struct i40e_hw *hw,
2869 u32 reg_addr, u64 *reg_val,
2870 struct i40e_asq_cmd_details *cmd_details)
2871{
2872 struct i40e_aq_desc desc;
2873 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2874 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2875 int status;
2876
2877 if (reg_val == NULL)
2878 return -EINVAL;
2879
2880 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2881
2882 cmd_resp->address = cpu_to_le32(reg_addr);
2883
2884 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2885
2886 if (!status) {
2887 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2888 (u64)le32_to_cpu(cmd_resp->value_low);
2889 }
2890
2891 return status;
2892}
2893
2894/**
2895 * i40e_aq_debug_write_register
2896 * @hw: pointer to the hw struct
2897 * @reg_addr: register address
2898 * @reg_val: register value
2899 * @cmd_details: pointer to command details structure or NULL
2900 *
2901 * Write to a register using the admin queue commands
2902 **/
2903int i40e_aq_debug_write_register(struct i40e_hw *hw,
2904 u32 reg_addr, u64 reg_val,
2905 struct i40e_asq_cmd_details *cmd_details)
2906{
2907 struct i40e_aq_desc desc;
2908 struct i40e_aqc_debug_reg_read_write *cmd =
2909 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2910 int status;
2911
2912 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2913
2914 cmd->address = cpu_to_le32(reg_addr);
2915 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2916 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2917
2918 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2919
2920 return status;
2921}
2922
2923/**
2924 * i40e_aq_request_resource
2925 * @hw: pointer to the hw struct
2926 * @resource: resource id
2927 * @access: access type
2928 * @sdp_number: resource number
2929 * @timeout: the maximum time in ms that the driver may hold the resource
2930 * @cmd_details: pointer to command details structure or NULL
2931 *
2932 * requests common resource using the admin queue commands
2933 **/
2934int i40e_aq_request_resource(struct i40e_hw *hw,
2935 enum i40e_aq_resources_ids resource,
2936 enum i40e_aq_resource_access_type access,
2937 u8 sdp_number, u64 *timeout,
2938 struct i40e_asq_cmd_details *cmd_details)
2939{
2940 struct i40e_aq_desc desc;
2941 struct i40e_aqc_request_resource *cmd_resp =
2942 (struct i40e_aqc_request_resource *)&desc.params.raw;
2943 int status;
2944
2945 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
2946
2947 cmd_resp->resource_id = cpu_to_le16(resource);
2948 cmd_resp->access_type = cpu_to_le16(access);
2949 cmd_resp->resource_number = cpu_to_le32(sdp_number);
2950
2951 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2952 /* The completion specifies the maximum time in ms that the driver
2953 * may hold the resource in the Timeout field.
2954 * If the resource is held by someone else, the command completes with
2955 * busy return value and the timeout field indicates the maximum time
2956 * the current owner of the resource has to free it.
2957 */
2958 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
2959 *timeout = le32_to_cpu(cmd_resp->timeout);
2960
2961 return status;
2962}
2963
2964/**
2965 * i40e_aq_release_resource
2966 * @hw: pointer to the hw struct
2967 * @resource: resource id
2968 * @sdp_number: resource number
2969 * @cmd_details: pointer to command details structure or NULL
2970 *
2971 * release common resource using the admin queue commands
2972 **/
2973int i40e_aq_release_resource(struct i40e_hw *hw,
2974 enum i40e_aq_resources_ids resource,
2975 u8 sdp_number,
2976 struct i40e_asq_cmd_details *cmd_details)
2977{
2978 struct i40e_aq_desc desc;
2979 struct i40e_aqc_request_resource *cmd =
2980 (struct i40e_aqc_request_resource *)&desc.params.raw;
2981 int status;
2982
2983 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
2984
2985 cmd->resource_id = cpu_to_le16(resource);
2986 cmd->resource_number = cpu_to_le32(sdp_number);
2987
2988 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2989
2990 return status;
2991}
2992
2993/**
2994 * i40e_aq_read_nvm
2995 * @hw: pointer to the hw struct
2996 * @module_pointer: module pointer location in words from the NVM beginning
2997 * @offset: byte offset from the module beginning
2998 * @length: length of the section to be read (in bytes from the offset)
2999 * @data: command buffer (size [bytes] = length)
3000 * @last_command: tells if this is the last command in a series
3001 * @cmd_details: pointer to command details structure or NULL
3002 *
3003 * Read the NVM using the admin queue commands
3004 **/
3005int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3006 u32 offset, u16 length, void *data,
3007 bool last_command,
3008 struct i40e_asq_cmd_details *cmd_details)
3009{
3010 struct i40e_aq_desc desc;
3011 struct i40e_aqc_nvm_update *cmd =
3012 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3013 int status;
3014
3015 /* In offset the highest byte must be zeroed. */
3016 if (offset & 0xFF000000) {
3017 status = -EINVAL;
3018 goto i40e_aq_read_nvm_exit;
3019 }
3020
3021 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3022
3023 /* If this is the last command in a series, set the proper flag. */
3024 if (last_command)
3025 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3026 cmd->module_pointer = module_pointer;
3027 cmd->offset = cpu_to_le32(offset);
3028 cmd->length = cpu_to_le16(length);
3029
3030 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3031 if (length > I40E_AQ_LARGE_BUF)
3032 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3033
3034 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3035
3036i40e_aq_read_nvm_exit:
3037 return status;
3038}
3039
3040/**
3041 * i40e_aq_erase_nvm
3042 * @hw: pointer to the hw struct
3043 * @module_pointer: module pointer location in words from the NVM beginning
3044 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3045 * @length: length of the section to be erased (expressed in 4 KB)
3046 * @last_command: tells if this is the last command in a series
3047 * @cmd_details: pointer to command details structure or NULL
3048 *
3049 * Erase the NVM sector using the admin queue commands
3050 **/
3051int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3052 u32 offset, u16 length, bool last_command,
3053 struct i40e_asq_cmd_details *cmd_details)
3054{
3055 struct i40e_aq_desc desc;
3056 struct i40e_aqc_nvm_update *cmd =
3057 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3058 int status;
3059
3060 /* In offset the highest byte must be zeroed. */
3061 if (offset & 0xFF000000) {
3062 status = -EINVAL;
3063 goto i40e_aq_erase_nvm_exit;
3064 }
3065
3066 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3067
3068 /* If this is the last command in a series, set the proper flag. */
3069 if (last_command)
3070 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3071 cmd->module_pointer = module_pointer;
3072 cmd->offset = cpu_to_le32(offset);
3073 cmd->length = cpu_to_le16(length);
3074
3075 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3076
3077i40e_aq_erase_nvm_exit:
3078 return status;
3079}
3080
3081/**
3082 * i40e_parse_discover_capabilities
3083 * @hw: pointer to the hw struct
3084 * @buff: pointer to a buffer containing device/function capability records
3085 * @cap_count: number of capability records in the list
3086 * @list_type_opc: type of capabilities list to parse
3087 *
3088 * Parse the device/function capabilities list.
3089 **/
3090static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3091 u32 cap_count,
3092 enum i40e_admin_queue_opc list_type_opc)
3093{
3094 struct i40e_aqc_list_capabilities_element_resp *cap;
3095 u32 valid_functions, num_functions;
3096 u32 number, logical_id, phys_id;
3097 struct i40e_hw_capabilities *p;
3098 u16 id, ocp_cfg_word0;
3099 u8 major_rev;
3100 int status;
3101 u32 i = 0;
3102
3103 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3104
3105 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3106 p = &hw->dev_caps;
3107 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3108 p = &hw->func_caps;
3109 else
3110 return;
3111
3112 for (i = 0; i < cap_count; i++, cap++) {
3113 id = le16_to_cpu(cap->id);
3114 number = le32_to_cpu(cap->number);
3115 logical_id = le32_to_cpu(cap->logical_id);
3116 phys_id = le32_to_cpu(cap->phys_id);
3117 major_rev = cap->major_rev;
3118
3119 switch (id) {
3120 case I40E_AQ_CAP_ID_SWITCH_MODE:
3121 p->switch_mode = number;
3122 break;
3123 case I40E_AQ_CAP_ID_MNG_MODE:
3124 p->management_mode = number;
3125 if (major_rev > 1) {
3126 p->mng_protocols_over_mctp = logical_id;
3127 i40e_debug(hw, I40E_DEBUG_INIT,
3128 "HW Capability: Protocols over MCTP = %d\n",
3129 p->mng_protocols_over_mctp);
3130 } else {
3131 p->mng_protocols_over_mctp = 0;
3132 }
3133 break;
3134 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3135 p->npar_enable = number;
3136 break;
3137 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3138 p->os2bmc = number;
3139 break;
3140 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3141 p->valid_functions = number;
3142 break;
3143 case I40E_AQ_CAP_ID_SRIOV:
3144 if (number == 1)
3145 p->sr_iov_1_1 = true;
3146 break;
3147 case I40E_AQ_CAP_ID_VF:
3148 p->num_vfs = number;
3149 p->vf_base_id = logical_id;
3150 break;
3151 case I40E_AQ_CAP_ID_VMDQ:
3152 if (number == 1)
3153 p->vmdq = true;
3154 break;
3155 case I40E_AQ_CAP_ID_8021QBG:
3156 if (number == 1)
3157 p->evb_802_1_qbg = true;
3158 break;
3159 case I40E_AQ_CAP_ID_8021QBR:
3160 if (number == 1)
3161 p->evb_802_1_qbh = true;
3162 break;
3163 case I40E_AQ_CAP_ID_VSI:
3164 p->num_vsis = number;
3165 break;
3166 case I40E_AQ_CAP_ID_DCB:
3167 if (number == 1) {
3168 p->dcb = true;
3169 p->enabled_tcmap = logical_id;
3170 p->maxtc = phys_id;
3171 }
3172 break;
3173 case I40E_AQ_CAP_ID_FCOE:
3174 if (number == 1)
3175 p->fcoe = true;
3176 break;
3177 case I40E_AQ_CAP_ID_ISCSI:
3178 if (number == 1)
3179 p->iscsi = true;
3180 break;
3181 case I40E_AQ_CAP_ID_RSS:
3182 p->rss = true;
3183 p->rss_table_size = number;
3184 p->rss_table_entry_width = logical_id;
3185 break;
3186 case I40E_AQ_CAP_ID_RXQ:
3187 p->num_rx_qp = number;
3188 p->base_queue = phys_id;
3189 break;
3190 case I40E_AQ_CAP_ID_TXQ:
3191 p->num_tx_qp = number;
3192 p->base_queue = phys_id;
3193 break;
3194 case I40E_AQ_CAP_ID_MSIX:
3195 p->num_msix_vectors = number;
3196 i40e_debug(hw, I40E_DEBUG_INIT,
3197 "HW Capability: MSIX vector count = %d\n",
3198 p->num_msix_vectors);
3199 break;
3200 case I40E_AQ_CAP_ID_VF_MSIX:
3201 p->num_msix_vectors_vf = number;
3202 break;
3203 case I40E_AQ_CAP_ID_FLEX10:
3204 if (major_rev == 1) {
3205 if (number == 1) {
3206 p->flex10_enable = true;
3207 p->flex10_capable = true;
3208 }
3209 } else {
3210 /* Capability revision >= 2 */
3211 if (number & 1)
3212 p->flex10_enable = true;
3213 if (number & 2)
3214 p->flex10_capable = true;
3215 }
3216 p->flex10_mode = logical_id;
3217 p->flex10_status = phys_id;
3218 break;
3219 case I40E_AQ_CAP_ID_CEM:
3220 if (number == 1)
3221 p->mgmt_cem = true;
3222 break;
3223 case I40E_AQ_CAP_ID_IWARP:
3224 if (number == 1)
3225 p->iwarp = true;
3226 break;
3227 case I40E_AQ_CAP_ID_LED:
3228 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3229 p->led[phys_id] = true;
3230 break;
3231 case I40E_AQ_CAP_ID_SDP:
3232 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3233 p->sdp[phys_id] = true;
3234 break;
3235 case I40E_AQ_CAP_ID_MDIO:
3236 if (number == 1) {
3237 p->mdio_port_num = phys_id;
3238 p->mdio_port_mode = logical_id;
3239 }
3240 break;
3241 case I40E_AQ_CAP_ID_1588:
3242 if (number == 1)
3243 p->ieee_1588 = true;
3244 break;
3245 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3246 p->fd = true;
3247 p->fd_filters_guaranteed = number;
3248 p->fd_filters_best_effort = logical_id;
3249 break;
3250 case I40E_AQ_CAP_ID_WSR_PROT:
3251 p->wr_csr_prot = (u64)number;
3252 p->wr_csr_prot |= (u64)logical_id << 32;
3253 break;
3254 case I40E_AQ_CAP_ID_NVM_MGMT:
3255 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3256 p->sec_rev_disabled = true;
3257 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3258 p->update_disabled = true;
3259 break;
3260 default:
3261 break;
3262 }
3263 }
3264
3265 if (p->fcoe)
3266 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3267
3268 /* Software override ensuring FCoE is disabled if npar or mfp
3269 * mode because it is not supported in these modes.
3270 */
3271 if (p->npar_enable || p->flex10_enable)
3272 p->fcoe = false;
3273
3274 /* count the enabled ports (aka the "not disabled" ports) */
3275 hw->num_ports = 0;
3276 for (i = 0; i < 4; i++) {
3277 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3278 u64 port_cfg = 0;
3279
3280 /* use AQ read to get the physical register offset instead
3281 * of the port relative offset
3282 */
3283 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3284 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3285 hw->num_ports++;
3286 }
3287
3288 /* OCP cards case: if a mezz is removed the Ethernet port is at
3289 * disabled state in PRTGEN_CNF register. Additional NVM read is
3290 * needed in order to check if we are dealing with OCP card.
3291 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3292 * physical ports results in wrong partition id calculation and thus
3293 * not supporting WoL.
3294 */
3295 if (hw->mac.type == I40E_MAC_X722) {
3296 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3297 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3298 2 * I40E_SR_OCP_CFG_WORD0,
3299 sizeof(ocp_cfg_word0),
3300 &ocp_cfg_word0, true, NULL);
3301 if (!status &&
3302 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3303 hw->num_ports = 4;
3304 i40e_release_nvm(hw);
3305 }
3306 }
3307
3308 valid_functions = p->valid_functions;
3309 num_functions = 0;
3310 while (valid_functions) {
3311 if (valid_functions & 1)
3312 num_functions++;
3313 valid_functions >>= 1;
3314 }
3315
3316 /* partition id is 1-based, and functions are evenly spread
3317 * across the ports as partitions
3318 */
3319 if (hw->num_ports != 0) {
3320 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3321 hw->num_partitions = num_functions / hw->num_ports;
3322 }
3323
3324 /* additional HW specific goodies that might
3325 * someday be HW version specific
3326 */
3327 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3328}
3329
3330/**
3331 * i40e_aq_discover_capabilities
3332 * @hw: pointer to the hw struct
3333 * @buff: a virtual buffer to hold the capabilities
3334 * @buff_size: Size of the virtual buffer
3335 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3336 * @list_type_opc: capabilities type to discover - pass in the command opcode
3337 * @cmd_details: pointer to command details structure or NULL
3338 *
3339 * Get the device capabilities descriptions from the firmware
3340 **/
3341int i40e_aq_discover_capabilities(struct i40e_hw *hw,
3342 void *buff, u16 buff_size, u16 *data_size,
3343 enum i40e_admin_queue_opc list_type_opc,
3344 struct i40e_asq_cmd_details *cmd_details)
3345{
3346 struct i40e_aqc_list_capabilites *cmd;
3347 struct i40e_aq_desc desc;
3348 int status = 0;
3349
3350 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3351
3352 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3353 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3354 status = -EINVAL;
3355 goto exit;
3356 }
3357
3358 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3359
3360 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3361 if (buff_size > I40E_AQ_LARGE_BUF)
3362 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3363
3364 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3365 *data_size = le16_to_cpu(desc.datalen);
3366
3367 if (status)
3368 goto exit;
3369
3370 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3371 list_type_opc);
3372
3373exit:
3374 return status;
3375}
3376
3377/**
3378 * i40e_aq_update_nvm
3379 * @hw: pointer to the hw struct
3380 * @module_pointer: module pointer location in words from the NVM beginning
3381 * @offset: byte offset from the module beginning
3382 * @length: length of the section to be written (in bytes from the offset)
3383 * @data: command buffer (size [bytes] = length)
3384 * @last_command: tells if this is the last command in a series
3385 * @preservation_flags: Preservation mode flags
3386 * @cmd_details: pointer to command details structure or NULL
3387 *
3388 * Update the NVM using the admin queue commands
3389 **/
3390int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3391 u32 offset, u16 length, void *data,
3392 bool last_command, u8 preservation_flags,
3393 struct i40e_asq_cmd_details *cmd_details)
3394{
3395 struct i40e_aq_desc desc;
3396 struct i40e_aqc_nvm_update *cmd =
3397 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3398 int status;
3399
3400 /* In offset the highest byte must be zeroed. */
3401 if (offset & 0xFF000000) {
3402 status = -EINVAL;
3403 goto i40e_aq_update_nvm_exit;
3404 }
3405
3406 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3407
3408 /* If this is the last command in a series, set the proper flag. */
3409 if (last_command)
3410 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3411 if (hw->mac.type == I40E_MAC_X722) {
3412 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3413 cmd->command_flags |=
3414 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3415 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3416 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3417 cmd->command_flags |=
3418 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3419 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3420 }
3421 cmd->module_pointer = module_pointer;
3422 cmd->offset = cpu_to_le32(offset);
3423 cmd->length = cpu_to_le16(length);
3424
3425 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3426 if (length > I40E_AQ_LARGE_BUF)
3427 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3428
3429 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3430
3431i40e_aq_update_nvm_exit:
3432 return status;
3433}
3434
3435/**
3436 * i40e_aq_rearrange_nvm
3437 * @hw: pointer to the hw struct
3438 * @rearrange_nvm: defines direction of rearrangement
3439 * @cmd_details: pointer to command details structure or NULL
3440 *
3441 * Rearrange NVM structure, available only for transition FW
3442 **/
3443int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3444 u8 rearrange_nvm,
3445 struct i40e_asq_cmd_details *cmd_details)
3446{
3447 struct i40e_aqc_nvm_update *cmd;
3448 struct i40e_aq_desc desc;
3449 int status;
3450
3451 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3452
3453 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3454
3455 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3456 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3457
3458 if (!rearrange_nvm) {
3459 status = -EINVAL;
3460 goto i40e_aq_rearrange_nvm_exit;
3461 }
3462
3463 cmd->command_flags |= rearrange_nvm;
3464 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3465
3466i40e_aq_rearrange_nvm_exit:
3467 return status;
3468}
3469
3470/**
3471 * i40e_aq_get_lldp_mib
3472 * @hw: pointer to the hw struct
3473 * @bridge_type: type of bridge requested
3474 * @mib_type: Local, Remote or both Local and Remote MIBs
3475 * @buff: pointer to a user supplied buffer to store the MIB block
3476 * @buff_size: size of the buffer (in bytes)
3477 * @local_len : length of the returned Local LLDP MIB
3478 * @remote_len: length of the returned Remote LLDP MIB
3479 * @cmd_details: pointer to command details structure or NULL
3480 *
3481 * Requests the complete LLDP MIB (entire packet).
3482 **/
3483int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3484 u8 mib_type, void *buff, u16 buff_size,
3485 u16 *local_len, u16 *remote_len,
3486 struct i40e_asq_cmd_details *cmd_details)
3487{
3488 struct i40e_aq_desc desc;
3489 struct i40e_aqc_lldp_get_mib *cmd =
3490 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3491 struct i40e_aqc_lldp_get_mib *resp =
3492 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3493 int status;
3494
3495 if (buff_size == 0 || !buff)
3496 return -EINVAL;
3497
3498 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3499 /* Indirect Command */
3500 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3501
3502 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3503 cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
3504
3505 desc.datalen = cpu_to_le16(buff_size);
3506
3507 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3508 if (buff_size > I40E_AQ_LARGE_BUF)
3509 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3510
3511 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3512 if (!status) {
3513 if (local_len != NULL)
3514 *local_len = le16_to_cpu(resp->local_len);
3515 if (remote_len != NULL)
3516 *remote_len = le16_to_cpu(resp->remote_len);
3517 }
3518
3519 return status;
3520}
3521
3522/**
3523 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3524 * @hw: pointer to the hw struct
3525 * @mib_type: Local, Remote or both Local and Remote MIBs
3526 * @buff: pointer to a user supplied buffer to store the MIB block
3527 * @buff_size: size of the buffer (in bytes)
3528 * @cmd_details: pointer to command details structure or NULL
3529 *
3530 * Set the LLDP MIB.
3531 **/
3532int
3533i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3534 u8 mib_type, void *buff, u16 buff_size,
3535 struct i40e_asq_cmd_details *cmd_details)
3536{
3537 struct i40e_aqc_lldp_set_local_mib *cmd;
3538 struct i40e_aq_desc desc;
3539 int status;
3540
3541 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3542 if (buff_size == 0 || !buff)
3543 return -EINVAL;
3544
3545 i40e_fill_default_direct_cmd_desc(&desc,
3546 i40e_aqc_opc_lldp_set_local_mib);
3547 /* Indirect Command */
3548 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3549 if (buff_size > I40E_AQ_LARGE_BUF)
3550 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3551 desc.datalen = cpu_to_le16(buff_size);
3552
3553 cmd->type = mib_type;
3554 cmd->length = cpu_to_le16(buff_size);
3555 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3556 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3557
3558 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3559 return status;
3560}
3561
3562/**
3563 * i40e_aq_cfg_lldp_mib_change_event
3564 * @hw: pointer to the hw struct
3565 * @enable_update: Enable or Disable event posting
3566 * @cmd_details: pointer to command details structure or NULL
3567 *
3568 * Enable or Disable posting of an event on ARQ when LLDP MIB
3569 * associated with the interface changes
3570 **/
3571int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3572 bool enable_update,
3573 struct i40e_asq_cmd_details *cmd_details)
3574{
3575 struct i40e_aq_desc desc;
3576 struct i40e_aqc_lldp_update_mib *cmd =
3577 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3578 int status;
3579
3580 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3581
3582 if (!enable_update)
3583 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3584
3585 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3586
3587 return status;
3588}
3589
3590/**
3591 * i40e_aq_restore_lldp
3592 * @hw: pointer to the hw struct
3593 * @setting: pointer to factory setting variable or NULL
3594 * @restore: True if factory settings should be restored
3595 * @cmd_details: pointer to command details structure or NULL
3596 *
3597 * Restore LLDP Agent factory settings if @restore set to True. In other case
3598 * only returns factory setting in AQ response.
3599 **/
3600int
3601i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3602 struct i40e_asq_cmd_details *cmd_details)
3603{
3604 struct i40e_aq_desc desc;
3605 struct i40e_aqc_lldp_restore *cmd =
3606 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3607 int status;
3608
3609 if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
3610 i40e_debug(hw, I40E_DEBUG_ALL,
3611 "Restore LLDP not supported by current FW version.\n");
3612 return -ENODEV;
3613 }
3614
3615 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3616
3617 if (restore)
3618 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3619
3620 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3621
3622 if (setting)
3623 *setting = cmd->command & 1;
3624
3625 return status;
3626}
3627
3628/**
3629 * i40e_aq_stop_lldp
3630 * @hw: pointer to the hw struct
3631 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3632 * @persist: True if stop of LLDP should be persistent across power cycles
3633 * @cmd_details: pointer to command details structure or NULL
3634 *
3635 * Stop or Shutdown the embedded LLDP Agent
3636 **/
3637int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3638 bool persist,
3639 struct i40e_asq_cmd_details *cmd_details)
3640{
3641 struct i40e_aq_desc desc;
3642 struct i40e_aqc_lldp_stop *cmd =
3643 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3644 int status;
3645
3646 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3647
3648 if (shutdown_agent)
3649 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3650
3651 if (persist) {
3652 if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
3653 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3654 else
3655 i40e_debug(hw, I40E_DEBUG_ALL,
3656 "Persistent Stop LLDP not supported by current FW version.\n");
3657 }
3658
3659 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3660
3661 return status;
3662}
3663
3664/**
3665 * i40e_aq_start_lldp
3666 * @hw: pointer to the hw struct
3667 * @persist: True if start of LLDP should be persistent across power cycles
3668 * @cmd_details: pointer to command details structure or NULL
3669 *
3670 * Start the embedded LLDP Agent on all ports.
3671 **/
3672int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3673 struct i40e_asq_cmd_details *cmd_details)
3674{
3675 struct i40e_aq_desc desc;
3676 struct i40e_aqc_lldp_start *cmd =
3677 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3678 int status;
3679
3680 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3681
3682 cmd->command = I40E_AQ_LLDP_AGENT_START;
3683
3684 if (persist) {
3685 if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
3686 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3687 else
3688 i40e_debug(hw, I40E_DEBUG_ALL,
3689 "Persistent Start LLDP not supported by current FW version.\n");
3690 }
3691
3692 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3693
3694 return status;
3695}
3696
3697/**
3698 * i40e_aq_set_dcb_parameters
3699 * @hw: pointer to the hw struct
3700 * @cmd_details: pointer to command details structure or NULL
3701 * @dcb_enable: True if DCB configuration needs to be applied
3702 *
3703 **/
3704int
3705i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3706 struct i40e_asq_cmd_details *cmd_details)
3707{
3708 struct i40e_aq_desc desc;
3709 struct i40e_aqc_set_dcb_parameters *cmd =
3710 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3711 int status;
3712
3713 if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
3714 return -ENODEV;
3715
3716 i40e_fill_default_direct_cmd_desc(&desc,
3717 i40e_aqc_opc_set_dcb_parameters);
3718
3719 if (dcb_enable) {
3720 cmd->valid_flags = I40E_DCB_VALID;
3721 cmd->command = I40E_AQ_DCB_SET_AGENT;
3722 }
3723 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3724
3725 return status;
3726}
3727
3728/**
3729 * i40e_aq_get_cee_dcb_config
3730 * @hw: pointer to the hw struct
3731 * @buff: response buffer that stores CEE operational configuration
3732 * @buff_size: size of the buffer passed
3733 * @cmd_details: pointer to command details structure or NULL
3734 *
3735 * Get CEE DCBX mode operational configuration from firmware
3736 **/
3737int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3738 void *buff, u16 buff_size,
3739 struct i40e_asq_cmd_details *cmd_details)
3740{
3741 struct i40e_aq_desc desc;
3742 int status;
3743
3744 if (buff_size == 0 || !buff)
3745 return -EINVAL;
3746
3747 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3748
3749 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3750 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3751 cmd_details);
3752
3753 return status;
3754}
3755
3756/**
3757 * i40e_aq_add_udp_tunnel
3758 * @hw: pointer to the hw struct
3759 * @udp_port: the UDP port to add in Host byte order
3760 * @protocol_index: protocol index type
3761 * @filter_index: pointer to filter index
3762 * @cmd_details: pointer to command details structure or NULL
3763 *
3764 * Note: Firmware expects the udp_port value to be in Little Endian format,
3765 * and this function will call cpu_to_le16 to convert from Host byte order to
3766 * Little Endian order.
3767 **/
3768int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3769 u16 udp_port, u8 protocol_index,
3770 u8 *filter_index,
3771 struct i40e_asq_cmd_details *cmd_details)
3772{
3773 struct i40e_aq_desc desc;
3774 struct i40e_aqc_add_udp_tunnel *cmd =
3775 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3776 struct i40e_aqc_del_udp_tunnel_completion *resp =
3777 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3778 int status;
3779
3780 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3781
3782 cmd->udp_port = cpu_to_le16(udp_port);
3783 cmd->protocol_type = protocol_index;
3784
3785 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3786
3787 if (!status && filter_index)
3788 *filter_index = resp->index;
3789
3790 return status;
3791}
3792
3793/**
3794 * i40e_aq_del_udp_tunnel
3795 * @hw: pointer to the hw struct
3796 * @index: filter index
3797 * @cmd_details: pointer to command details structure or NULL
3798 **/
3799int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3800 struct i40e_asq_cmd_details *cmd_details)
3801{
3802 struct i40e_aq_desc desc;
3803 struct i40e_aqc_remove_udp_tunnel *cmd =
3804 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3805 int status;
3806
3807 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3808
3809 cmd->index = index;
3810
3811 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3812
3813 return status;
3814}
3815
3816/**
3817 * i40e_aq_delete_element - Delete switch element
3818 * @hw: pointer to the hw struct
3819 * @seid: the SEID to delete from the switch
3820 * @cmd_details: pointer to command details structure or NULL
3821 *
3822 * This deletes a switch element from the switch.
3823 **/
3824int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3825 struct i40e_asq_cmd_details *cmd_details)
3826{
3827 struct i40e_aq_desc desc;
3828 struct i40e_aqc_switch_seid *cmd =
3829 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3830 int status;
3831
3832 if (seid == 0)
3833 return -EINVAL;
3834
3835 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3836
3837 cmd->seid = cpu_to_le16(seid);
3838
3839 status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
3840 cmd_details, true);
3841
3842 return status;
3843}
3844
3845/**
3846 * i40e_aq_dcb_updated - DCB Updated Command
3847 * @hw: pointer to the hw struct
3848 * @cmd_details: pointer to command details structure or NULL
3849 *
3850 * EMP will return when the shared RPB settings have been
3851 * recomputed and modified. The retval field in the descriptor
3852 * will be set to 0 when RPB is modified.
3853 **/
3854int i40e_aq_dcb_updated(struct i40e_hw *hw,
3855 struct i40e_asq_cmd_details *cmd_details)
3856{
3857 struct i40e_aq_desc desc;
3858 int status;
3859
3860 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3861
3862 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3863
3864 return status;
3865}
3866
3867/**
3868 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3869 * @hw: pointer to the hw struct
3870 * @seid: seid for the physical port/switching component/vsi
3871 * @buff: Indirect buffer to hold data parameters and response
3872 * @buff_size: Indirect buffer size
3873 * @opcode: Tx scheduler AQ command opcode
3874 * @cmd_details: pointer to command details structure or NULL
3875 *
3876 * Generic command handler for Tx scheduler AQ commands
3877 **/
3878static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3879 void *buff, u16 buff_size,
3880 enum i40e_admin_queue_opc opcode,
3881 struct i40e_asq_cmd_details *cmd_details)
3882{
3883 struct i40e_aq_desc desc;
3884 struct i40e_aqc_tx_sched_ind *cmd =
3885 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3886 int status;
3887 bool cmd_param_flag = false;
3888
3889 switch (opcode) {
3890 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3891 case i40e_aqc_opc_configure_vsi_tc_bw:
3892 case i40e_aqc_opc_enable_switching_comp_ets:
3893 case i40e_aqc_opc_modify_switching_comp_ets:
3894 case i40e_aqc_opc_disable_switching_comp_ets:
3895 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3896 case i40e_aqc_opc_configure_switching_comp_bw_config:
3897 cmd_param_flag = true;
3898 break;
3899 case i40e_aqc_opc_query_vsi_bw_config:
3900 case i40e_aqc_opc_query_vsi_ets_sla_config:
3901 case i40e_aqc_opc_query_switching_comp_ets_config:
3902 case i40e_aqc_opc_query_port_ets_config:
3903 case i40e_aqc_opc_query_switching_comp_bw_config:
3904 cmd_param_flag = false;
3905 break;
3906 default:
3907 return -EINVAL;
3908 }
3909
3910 i40e_fill_default_direct_cmd_desc(&desc, opcode);
3911
3912 /* Indirect command */
3913 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3914 if (cmd_param_flag)
3915 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3916 if (buff_size > I40E_AQ_LARGE_BUF)
3917 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3918
3919 desc.datalen = cpu_to_le16(buff_size);
3920
3921 cmd->vsi_seid = cpu_to_le16(seid);
3922
3923 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3924
3925 return status;
3926}
3927
3928/**
3929 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3930 * @hw: pointer to the hw struct
3931 * @seid: VSI seid
3932 * @credit: BW limit credits (0 = disabled)
3933 * @max_credit: Max BW limit credits
3934 * @cmd_details: pointer to command details structure or NULL
3935 **/
3936int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3937 u16 seid, u16 credit, u8 max_credit,
3938 struct i40e_asq_cmd_details *cmd_details)
3939{
3940 struct i40e_aq_desc desc;
3941 struct i40e_aqc_configure_vsi_bw_limit *cmd =
3942 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
3943 int status;
3944
3945 i40e_fill_default_direct_cmd_desc(&desc,
3946 i40e_aqc_opc_configure_vsi_bw_limit);
3947
3948 cmd->vsi_seid = cpu_to_le16(seid);
3949 cmd->credit = cpu_to_le16(credit);
3950 cmd->max_credit = max_credit;
3951
3952 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3953
3954 return status;
3955}
3956
3957/**
3958 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
3959 * @hw: pointer to the hw struct
3960 * @seid: VSI seid
3961 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
3962 * @cmd_details: pointer to command details structure or NULL
3963 **/
3964int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
3965 u16 seid,
3966 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
3967 struct i40e_asq_cmd_details *cmd_details)
3968{
3969 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3970 i40e_aqc_opc_configure_vsi_tc_bw,
3971 cmd_details);
3972}
3973
3974/**
3975 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
3976 * @hw: pointer to the hw struct
3977 * @seid: seid of the switching component connected to Physical Port
3978 * @ets_data: Buffer holding ETS parameters
3979 * @opcode: Tx scheduler AQ command opcode
3980 * @cmd_details: pointer to command details structure or NULL
3981 **/
3982int
3983i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
3984 u16 seid,
3985 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
3986 enum i40e_admin_queue_opc opcode,
3987 struct i40e_asq_cmd_details *cmd_details)
3988{
3989 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
3990 sizeof(*ets_data), opcode, cmd_details);
3991}
3992
3993/**
3994 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
3995 * @hw: pointer to the hw struct
3996 * @seid: seid of the switching component
3997 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
3998 * @cmd_details: pointer to command details structure or NULL
3999 **/
4000int
4001i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4002 u16 seid,
4003 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4004 struct i40e_asq_cmd_details *cmd_details)
4005{
4006 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4007 i40e_aqc_opc_configure_switching_comp_bw_config,
4008 cmd_details);
4009}
4010
4011/**
4012 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4013 * @hw: pointer to the hw struct
4014 * @seid: seid of the VSI
4015 * @bw_data: Buffer to hold VSI BW configuration
4016 * @cmd_details: pointer to command details structure or NULL
4017 **/
4018int
4019i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4020 u16 seid,
4021 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4022 struct i40e_asq_cmd_details *cmd_details)
4023{
4024 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4025 i40e_aqc_opc_query_vsi_bw_config,
4026 cmd_details);
4027}
4028
4029/**
4030 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4031 * @hw: pointer to the hw struct
4032 * @seid: seid of the VSI
4033 * @bw_data: Buffer to hold VSI BW configuration per TC
4034 * @cmd_details: pointer to command details structure or NULL
4035 **/
4036int
4037i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4038 u16 seid,
4039 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4040 struct i40e_asq_cmd_details *cmd_details)
4041{
4042 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4043 i40e_aqc_opc_query_vsi_ets_sla_config,
4044 cmd_details);
4045}
4046
4047/**
4048 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4049 * @hw: pointer to the hw struct
4050 * @seid: seid of the switching component
4051 * @bw_data: Buffer to hold switching component's per TC BW config
4052 * @cmd_details: pointer to command details structure or NULL
4053 **/
4054int
4055i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4056 u16 seid,
4057 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4058 struct i40e_asq_cmd_details *cmd_details)
4059{
4060 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4061 i40e_aqc_opc_query_switching_comp_ets_config,
4062 cmd_details);
4063}
4064
4065/**
4066 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4067 * @hw: pointer to the hw struct
4068 * @seid: seid of the VSI or switching component connected to Physical Port
4069 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4070 * @cmd_details: pointer to command details structure or NULL
4071 **/
4072int
4073i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4074 u16 seid,
4075 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4076 struct i40e_asq_cmd_details *cmd_details)
4077{
4078 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4079 i40e_aqc_opc_query_port_ets_config,
4080 cmd_details);
4081}
4082
4083/**
4084 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4085 * @hw: pointer to the hw struct
4086 * @seid: seid of the switching component
4087 * @bw_data: Buffer to hold switching component's BW configuration
4088 * @cmd_details: pointer to command details structure or NULL
4089 **/
4090int
4091i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4092 u16 seid,
4093 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4094 struct i40e_asq_cmd_details *cmd_details)
4095{
4096 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4097 i40e_aqc_opc_query_switching_comp_bw_config,
4098 cmd_details);
4099}
4100
4101/**
4102 * i40e_validate_filter_settings
4103 * @hw: pointer to the hardware structure
4104 * @settings: Filter control settings
4105 *
4106 * Check and validate the filter control settings passed.
4107 * The function checks for the valid filter/context sizes being
4108 * passed for FCoE and PE.
4109 *
4110 * Returns 0 if the values passed are valid and within
4111 * range else returns an error.
4112 **/
4113static int
4114i40e_validate_filter_settings(struct i40e_hw *hw,
4115 struct i40e_filter_control_settings *settings)
4116{
4117 u32 fcoe_cntx_size, fcoe_filt_size;
4118 u32 fcoe_fmax;
4119 u32 val;
4120
4121 /* Validate FCoE settings passed */
4122 switch (settings->fcoe_filt_num) {
4123 case I40E_HASH_FILTER_SIZE_1K:
4124 case I40E_HASH_FILTER_SIZE_2K:
4125 case I40E_HASH_FILTER_SIZE_4K:
4126 case I40E_HASH_FILTER_SIZE_8K:
4127 case I40E_HASH_FILTER_SIZE_16K:
4128 case I40E_HASH_FILTER_SIZE_32K:
4129 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4130 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4131 break;
4132 default:
4133 return -EINVAL;
4134 }
4135
4136 switch (settings->fcoe_cntx_num) {
4137 case I40E_DMA_CNTX_SIZE_512:
4138 case I40E_DMA_CNTX_SIZE_1K:
4139 case I40E_DMA_CNTX_SIZE_2K:
4140 case I40E_DMA_CNTX_SIZE_4K:
4141 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4142 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4143 break;
4144 default:
4145 return -EINVAL;
4146 }
4147
4148 /* Validate PE settings passed */
4149 switch (settings->pe_filt_num) {
4150 case I40E_HASH_FILTER_SIZE_1K:
4151 case I40E_HASH_FILTER_SIZE_2K:
4152 case I40E_HASH_FILTER_SIZE_4K:
4153 case I40E_HASH_FILTER_SIZE_8K:
4154 case I40E_HASH_FILTER_SIZE_16K:
4155 case I40E_HASH_FILTER_SIZE_32K:
4156 case I40E_HASH_FILTER_SIZE_64K:
4157 case I40E_HASH_FILTER_SIZE_128K:
4158 case I40E_HASH_FILTER_SIZE_256K:
4159 case I40E_HASH_FILTER_SIZE_512K:
4160 case I40E_HASH_FILTER_SIZE_1M:
4161 break;
4162 default:
4163 return -EINVAL;
4164 }
4165
4166 switch (settings->pe_cntx_num) {
4167 case I40E_DMA_CNTX_SIZE_512:
4168 case I40E_DMA_CNTX_SIZE_1K:
4169 case I40E_DMA_CNTX_SIZE_2K:
4170 case I40E_DMA_CNTX_SIZE_4K:
4171 case I40E_DMA_CNTX_SIZE_8K:
4172 case I40E_DMA_CNTX_SIZE_16K:
4173 case I40E_DMA_CNTX_SIZE_32K:
4174 case I40E_DMA_CNTX_SIZE_64K:
4175 case I40E_DMA_CNTX_SIZE_128K:
4176 case I40E_DMA_CNTX_SIZE_256K:
4177 break;
4178 default:
4179 return -EINVAL;
4180 }
4181
4182 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4183 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4184 fcoe_fmax = FIELD_GET(I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK, val);
4185 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4186 return -EINVAL;
4187
4188 return 0;
4189}
4190
4191/**
4192 * i40e_set_filter_control
4193 * @hw: pointer to the hardware structure
4194 * @settings: Filter control settings
4195 *
4196 * Set the Queue Filters for PE/FCoE and enable filters required
4197 * for a single PF. It is expected that these settings are programmed
4198 * at the driver initialization time.
4199 **/
4200int i40e_set_filter_control(struct i40e_hw *hw,
4201 struct i40e_filter_control_settings *settings)
4202{
4203 u32 hash_lut_size = 0;
4204 int ret = 0;
4205 u32 val;
4206
4207 if (!settings)
4208 return -EINVAL;
4209
4210 /* Validate the input settings */
4211 ret = i40e_validate_filter_settings(hw, settings);
4212 if (ret)
4213 return ret;
4214
4215 /* Read the PF Queue Filter control register */
4216 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4217
4218 /* Program required PE hash buckets for the PF */
4219 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4220 val |= FIELD_PREP(I40E_PFQF_CTL_0_PEHSIZE_MASK, settings->pe_filt_num);
4221 /* Program required PE contexts for the PF */
4222 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4223 val |= FIELD_PREP(I40E_PFQF_CTL_0_PEDSIZE_MASK, settings->pe_cntx_num);
4224
4225 /* Program required FCoE hash buckets for the PF */
4226 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4227 val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCHSIZE_MASK,
4228 settings->fcoe_filt_num);
4229 /* Program required FCoE DDP contexts for the PF */
4230 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4231 val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCDSIZE_MASK,
4232 settings->fcoe_cntx_num);
4233
4234 /* Program Hash LUT size for the PF */
4235 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4236 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4237 hash_lut_size = 1;
4238 val |= FIELD_PREP(I40E_PFQF_CTL_0_HASHLUTSIZE_MASK, hash_lut_size);
4239
4240 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4241 if (settings->enable_fdir)
4242 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4243 if (settings->enable_ethtype)
4244 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4245 if (settings->enable_macvlan)
4246 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4247
4248 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4249
4250 return 0;
4251}
4252
4253/**
4254 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4255 * @hw: pointer to the hw struct
4256 * @mac_addr: MAC address to use in the filter
4257 * @ethtype: Ethertype to use in the filter
4258 * @flags: Flags that needs to be applied to the filter
4259 * @vsi_seid: seid of the control VSI
4260 * @queue: VSI queue number to send the packet to
4261 * @is_add: Add control packet filter if True else remove
4262 * @stats: Structure to hold information on control filter counts
4263 * @cmd_details: pointer to command details structure or NULL
4264 *
4265 * This command will Add or Remove control packet filter for a control VSI.
4266 * In return it will update the total number of perfect filter count in
4267 * the stats member.
4268 **/
4269int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4270 u8 *mac_addr, u16 ethtype, u16 flags,
4271 u16 vsi_seid, u16 queue, bool is_add,
4272 struct i40e_control_filter_stats *stats,
4273 struct i40e_asq_cmd_details *cmd_details)
4274{
4275 struct i40e_aq_desc desc;
4276 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4277 (struct i40e_aqc_add_remove_control_packet_filter *)
4278 &desc.params.raw;
4279 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4280 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4281 &desc.params.raw;
4282 int status;
4283
4284 if (vsi_seid == 0)
4285 return -EINVAL;
4286
4287 if (is_add) {
4288 i40e_fill_default_direct_cmd_desc(&desc,
4289 i40e_aqc_opc_add_control_packet_filter);
4290 cmd->queue = cpu_to_le16(queue);
4291 } else {
4292 i40e_fill_default_direct_cmd_desc(&desc,
4293 i40e_aqc_opc_remove_control_packet_filter);
4294 }
4295
4296 if (mac_addr)
4297 ether_addr_copy(cmd->mac, mac_addr);
4298
4299 cmd->etype = cpu_to_le16(ethtype);
4300 cmd->flags = cpu_to_le16(flags);
4301 cmd->seid = cpu_to_le16(vsi_seid);
4302
4303 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4304
4305 if (!status && stats) {
4306 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4307 stats->etype_used = le16_to_cpu(resp->etype_used);
4308 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4309 stats->etype_free = le16_to_cpu(resp->etype_free);
4310 }
4311
4312 return status;
4313}
4314
4315/**
4316 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4317 * @hw: pointer to the hw struct
4318 * @seid: VSI seid to add ethertype filter from
4319 **/
4320void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4321 u16 seid)
4322{
4323#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4324 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4325 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4326 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4327 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4328 int status;
4329
4330 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4331 seid, 0, true, NULL,
4332 NULL);
4333 if (status)
4334 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4335}
4336
4337/**
4338 * i40e_aq_alternate_read
4339 * @hw: pointer to the hardware structure
4340 * @reg_addr0: address of first dword to be read
4341 * @reg_val0: pointer for data read from 'reg_addr0'
4342 * @reg_addr1: address of second dword to be read
4343 * @reg_val1: pointer for data read from 'reg_addr1'
4344 *
4345 * Read one or two dwords from alternate structure. Fields are indicated
4346 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4347 * is not passed then only register at 'reg_addr0' is read.
4348 *
4349 **/
4350static int i40e_aq_alternate_read(struct i40e_hw *hw,
4351 u32 reg_addr0, u32 *reg_val0,
4352 u32 reg_addr1, u32 *reg_val1)
4353{
4354 struct i40e_aq_desc desc;
4355 struct i40e_aqc_alternate_write *cmd_resp =
4356 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4357 int status;
4358
4359 if (!reg_val0)
4360 return -EINVAL;
4361
4362 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4363 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4364 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4365
4366 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4367
4368 if (!status) {
4369 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4370
4371 if (reg_val1)
4372 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4373 }
4374
4375 return status;
4376}
4377
4378/**
4379 * i40e_aq_suspend_port_tx
4380 * @hw: pointer to the hardware structure
4381 * @seid: port seid
4382 * @cmd_details: pointer to command details structure or NULL
4383 *
4384 * Suspend port's Tx traffic
4385 **/
4386int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4387 struct i40e_asq_cmd_details *cmd_details)
4388{
4389 struct i40e_aqc_tx_sched_ind *cmd;
4390 struct i40e_aq_desc desc;
4391 int status;
4392
4393 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4394 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4395 cmd->vsi_seid = cpu_to_le16(seid);
4396 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4397
4398 return status;
4399}
4400
4401/**
4402 * i40e_aq_resume_port_tx
4403 * @hw: pointer to the hardware structure
4404 * @cmd_details: pointer to command details structure or NULL
4405 *
4406 * Resume port's Tx traffic
4407 **/
4408int i40e_aq_resume_port_tx(struct i40e_hw *hw,
4409 struct i40e_asq_cmd_details *cmd_details)
4410{
4411 struct i40e_aq_desc desc;
4412 int status;
4413
4414 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4415
4416 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4417
4418 return status;
4419}
4420
4421/**
4422 * i40e_set_pci_config_data - store PCI bus info
4423 * @hw: pointer to hardware structure
4424 * @link_status: the link status word from PCI config space
4425 *
4426 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4427 **/
4428void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4429{
4430 hw->bus.type = i40e_bus_type_pci_express;
4431
4432 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4433 case PCI_EXP_LNKSTA_NLW_X1:
4434 hw->bus.width = i40e_bus_width_pcie_x1;
4435 break;
4436 case PCI_EXP_LNKSTA_NLW_X2:
4437 hw->bus.width = i40e_bus_width_pcie_x2;
4438 break;
4439 case PCI_EXP_LNKSTA_NLW_X4:
4440 hw->bus.width = i40e_bus_width_pcie_x4;
4441 break;
4442 case PCI_EXP_LNKSTA_NLW_X8:
4443 hw->bus.width = i40e_bus_width_pcie_x8;
4444 break;
4445 default:
4446 hw->bus.width = i40e_bus_width_unknown;
4447 break;
4448 }
4449
4450 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4451 case PCI_EXP_LNKSTA_CLS_2_5GB:
4452 hw->bus.speed = i40e_bus_speed_2500;
4453 break;
4454 case PCI_EXP_LNKSTA_CLS_5_0GB:
4455 hw->bus.speed = i40e_bus_speed_5000;
4456 break;
4457 case PCI_EXP_LNKSTA_CLS_8_0GB:
4458 hw->bus.speed = i40e_bus_speed_8000;
4459 break;
4460 default:
4461 hw->bus.speed = i40e_bus_speed_unknown;
4462 break;
4463 }
4464}
4465
4466/**
4467 * i40e_aq_debug_dump
4468 * @hw: pointer to the hardware structure
4469 * @cluster_id: specific cluster to dump
4470 * @table_id: table id within cluster
4471 * @start_index: index of line in the block to read
4472 * @buff_size: dump buffer size
4473 * @buff: dump buffer
4474 * @ret_buff_size: actual buffer size returned
4475 * @ret_next_table: next block to read
4476 * @ret_next_index: next index to read
4477 * @cmd_details: pointer to command details structure or NULL
4478 *
4479 * Dump internal FW/HW data for debug purposes.
4480 *
4481 **/
4482int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4483 u8 table_id, u32 start_index, u16 buff_size,
4484 void *buff, u16 *ret_buff_size,
4485 u8 *ret_next_table, u32 *ret_next_index,
4486 struct i40e_asq_cmd_details *cmd_details)
4487{
4488 struct i40e_aq_desc desc;
4489 struct i40e_aqc_debug_dump_internals *cmd =
4490 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4491 struct i40e_aqc_debug_dump_internals *resp =
4492 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4493 int status;
4494
4495 if (buff_size == 0 || !buff)
4496 return -EINVAL;
4497
4498 i40e_fill_default_direct_cmd_desc(&desc,
4499 i40e_aqc_opc_debug_dump_internals);
4500 /* Indirect Command */
4501 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4502 if (buff_size > I40E_AQ_LARGE_BUF)
4503 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4504
4505 cmd->cluster_id = cluster_id;
4506 cmd->table_id = table_id;
4507 cmd->idx = cpu_to_le32(start_index);
4508
4509 desc.datalen = cpu_to_le16(buff_size);
4510
4511 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4512 if (!status) {
4513 if (ret_buff_size)
4514 *ret_buff_size = le16_to_cpu(desc.datalen);
4515 if (ret_next_table)
4516 *ret_next_table = resp->table_id;
4517 if (ret_next_index)
4518 *ret_next_index = le32_to_cpu(resp->idx);
4519 }
4520
4521 return status;
4522}
4523
4524/**
4525 * i40e_read_bw_from_alt_ram
4526 * @hw: pointer to the hardware structure
4527 * @max_bw: pointer for max_bw read
4528 * @min_bw: pointer for min_bw read
4529 * @min_valid: pointer for bool that is true if min_bw is a valid value
4530 * @max_valid: pointer for bool that is true if max_bw is a valid value
4531 *
4532 * Read bw from the alternate ram for the given pf
4533 **/
4534int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4535 u32 *max_bw, u32 *min_bw,
4536 bool *min_valid, bool *max_valid)
4537{
4538 u32 max_bw_addr, min_bw_addr;
4539 int status;
4540
4541 /* Calculate the address of the min/max bw registers */
4542 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4543 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4544 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4545 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4546 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4547 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4548
4549 /* Read the bandwidths from alt ram */
4550 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4551 min_bw_addr, min_bw);
4552
4553 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4554 *min_valid = true;
4555 else
4556 *min_valid = false;
4557
4558 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4559 *max_valid = true;
4560 else
4561 *max_valid = false;
4562
4563 return status;
4564}
4565
4566/**
4567 * i40e_aq_configure_partition_bw
4568 * @hw: pointer to the hardware structure
4569 * @bw_data: Buffer holding valid pfs and bw limits
4570 * @cmd_details: pointer to command details
4571 *
4572 * Configure partitions guaranteed/max bw
4573 **/
4574int
4575i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4576 struct i40e_aqc_configure_partition_bw_data *bw_data,
4577 struct i40e_asq_cmd_details *cmd_details)
4578{
4579 u16 bwd_size = sizeof(*bw_data);
4580 struct i40e_aq_desc desc;
4581 int status;
4582
4583 i40e_fill_default_direct_cmd_desc(&desc,
4584 i40e_aqc_opc_configure_partition_bw);
4585
4586 /* Indirect command */
4587 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4588 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4589
4590 if (bwd_size > I40E_AQ_LARGE_BUF)
4591 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4592
4593 desc.datalen = cpu_to_le16(bwd_size);
4594
4595 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4596 cmd_details);
4597
4598 return status;
4599}
4600
4601/**
4602 * i40e_read_phy_register_clause22
4603 * @hw: pointer to the HW structure
4604 * @reg: register address in the page
4605 * @phy_addr: PHY address on MDIO interface
4606 * @value: PHY register value
4607 *
4608 * Reads specified PHY register value
4609 **/
4610int i40e_read_phy_register_clause22(struct i40e_hw *hw,
4611 u16 reg, u8 phy_addr, u16 *value)
4612{
4613 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4614 int status = -EIO;
4615 u32 command = 0;
4616 u16 retry = 1000;
4617
4618 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4619 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4620 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4621 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4622 (I40E_GLGEN_MSCA_MDICMD_MASK);
4623 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4624 do {
4625 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4626 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4627 status = 0;
4628 break;
4629 }
4630 udelay(10);
4631 retry--;
4632 } while (retry);
4633
4634 if (status) {
4635 i40e_debug(hw, I40E_DEBUG_PHY,
4636 "PHY: Can't write command to external PHY.\n");
4637 } else {
4638 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4639 *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
4640 }
4641
4642 return status;
4643}
4644
4645/**
4646 * i40e_write_phy_register_clause22
4647 * @hw: pointer to the HW structure
4648 * @reg: register address in the page
4649 * @phy_addr: PHY address on MDIO interface
4650 * @value: PHY register value
4651 *
4652 * Writes specified PHY register value
4653 **/
4654int i40e_write_phy_register_clause22(struct i40e_hw *hw,
4655 u16 reg, u8 phy_addr, u16 value)
4656{
4657 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4658 int status = -EIO;
4659 u32 command = 0;
4660 u16 retry = 1000;
4661
4662 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4663 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4664
4665 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4666 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4667 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4668 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4669 (I40E_GLGEN_MSCA_MDICMD_MASK);
4670
4671 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4672 do {
4673 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4674 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4675 status = 0;
4676 break;
4677 }
4678 udelay(10);
4679 retry--;
4680 } while (retry);
4681
4682 return status;
4683}
4684
4685/**
4686 * i40e_read_phy_register_clause45
4687 * @hw: pointer to the HW structure
4688 * @page: registers page number
4689 * @reg: register address in the page
4690 * @phy_addr: PHY address on MDIO interface
4691 * @value: PHY register value
4692 *
4693 * Reads specified PHY register value
4694 **/
4695int i40e_read_phy_register_clause45(struct i40e_hw *hw,
4696 u8 page, u16 reg, u8 phy_addr, u16 *value)
4697{
4698 u8 port_num = hw->func_caps.mdio_port_num;
4699 int status = -EIO;
4700 u32 command = 0;
4701 u16 retry = 1000;
4702
4703 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4704 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4705 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4706 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4707 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4708 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4709 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4710 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4711 do {
4712 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4713 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4714 status = 0;
4715 break;
4716 }
4717 usleep_range(10, 20);
4718 retry--;
4719 } while (retry);
4720
4721 if (status) {
4722 i40e_debug(hw, I40E_DEBUG_PHY,
4723 "PHY: Can't write command to external PHY.\n");
4724 goto phy_read_end;
4725 }
4726
4727 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4728 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4729 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4730 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4731 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4732 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4733 status = -EIO;
4734 retry = 1000;
4735 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4736 do {
4737 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4738 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4739 status = 0;
4740 break;
4741 }
4742 usleep_range(10, 20);
4743 retry--;
4744 } while (retry);
4745
4746 if (!status) {
4747 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4748 *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
4749 } else {
4750 i40e_debug(hw, I40E_DEBUG_PHY,
4751 "PHY: Can't read register value from external PHY.\n");
4752 }
4753
4754phy_read_end:
4755 return status;
4756}
4757
4758/**
4759 * i40e_write_phy_register_clause45
4760 * @hw: pointer to the HW structure
4761 * @page: registers page number
4762 * @reg: register address in the page
4763 * @phy_addr: PHY address on MDIO interface
4764 * @value: PHY register value
4765 *
4766 * Writes value to specified PHY register
4767 **/
4768int i40e_write_phy_register_clause45(struct i40e_hw *hw,
4769 u8 page, u16 reg, u8 phy_addr, u16 value)
4770{
4771 u8 port_num = hw->func_caps.mdio_port_num;
4772 int status = -EIO;
4773 u16 retry = 1000;
4774 u32 command = 0;
4775
4776 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4777 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4778 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4779 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4780 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4781 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4782 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4783 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4784 do {
4785 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4786 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4787 status = 0;
4788 break;
4789 }
4790 usleep_range(10, 20);
4791 retry--;
4792 } while (retry);
4793 if (status) {
4794 i40e_debug(hw, I40E_DEBUG_PHY,
4795 "PHY: Can't write command to external PHY.\n");
4796 goto phy_write_end;
4797 }
4798
4799 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4800 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4801
4802 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4803 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4804 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4805 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4806 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4807 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4808 status = -EIO;
4809 retry = 1000;
4810 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4811 do {
4812 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4813 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4814 status = 0;
4815 break;
4816 }
4817 usleep_range(10, 20);
4818 retry--;
4819 } while (retry);
4820
4821phy_write_end:
4822 return status;
4823}
4824
4825/**
4826 * i40e_write_phy_register
4827 * @hw: pointer to the HW structure
4828 * @page: registers page number
4829 * @reg: register address in the page
4830 * @phy_addr: PHY address on MDIO interface
4831 * @value: PHY register value
4832 *
4833 * Writes value to specified PHY register
4834 **/
4835int i40e_write_phy_register(struct i40e_hw *hw,
4836 u8 page, u16 reg, u8 phy_addr, u16 value)
4837{
4838 int status;
4839
4840 switch (hw->device_id) {
4841 case I40E_DEV_ID_1G_BASE_T_X722:
4842 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4843 value);
4844 break;
4845 case I40E_DEV_ID_1G_BASE_T_BC:
4846 case I40E_DEV_ID_5G_BASE_T_BC:
4847 case I40E_DEV_ID_10G_BASE_T:
4848 case I40E_DEV_ID_10G_BASE_T4:
4849 case I40E_DEV_ID_10G_BASE_T_BC:
4850 case I40E_DEV_ID_10G_BASE_T_X722:
4851 case I40E_DEV_ID_25G_B:
4852 case I40E_DEV_ID_25G_SFP28:
4853 status = i40e_write_phy_register_clause45(hw, page, reg,
4854 phy_addr, value);
4855 break;
4856 default:
4857 status = -EIO;
4858 break;
4859 }
4860
4861 return status;
4862}
4863
4864/**
4865 * i40e_read_phy_register
4866 * @hw: pointer to the HW structure
4867 * @page: registers page number
4868 * @reg: register address in the page
4869 * @phy_addr: PHY address on MDIO interface
4870 * @value: PHY register value
4871 *
4872 * Reads specified PHY register value
4873 **/
4874int i40e_read_phy_register(struct i40e_hw *hw,
4875 u8 page, u16 reg, u8 phy_addr, u16 *value)
4876{
4877 int status;
4878
4879 switch (hw->device_id) {
4880 case I40E_DEV_ID_1G_BASE_T_X722:
4881 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4882 value);
4883 break;
4884 case I40E_DEV_ID_1G_BASE_T_BC:
4885 case I40E_DEV_ID_5G_BASE_T_BC:
4886 case I40E_DEV_ID_10G_BASE_T:
4887 case I40E_DEV_ID_10G_BASE_T4:
4888 case I40E_DEV_ID_10G_BASE_T_BC:
4889 case I40E_DEV_ID_10G_BASE_T_X722:
4890 case I40E_DEV_ID_25G_B:
4891 case I40E_DEV_ID_25G_SFP28:
4892 status = i40e_read_phy_register_clause45(hw, page, reg,
4893 phy_addr, value);
4894 break;
4895 default:
4896 status = -EIO;
4897 break;
4898 }
4899
4900 return status;
4901}
4902
4903/**
4904 * i40e_get_phy_address
4905 * @hw: pointer to the HW structure
4906 * @dev_num: PHY port num that address we want
4907 *
4908 * Gets PHY address for current port
4909 **/
4910u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4911{
4912 u8 port_num = hw->func_caps.mdio_port_num;
4913 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4914
4915 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4916}
4917
4918/**
4919 * i40e_blink_phy_link_led
4920 * @hw: pointer to the HW structure
4921 * @time: time how long led will blinks in secs
4922 * @interval: gap between LED on and off in msecs
4923 *
4924 * Blinks PHY link LED
4925 **/
4926int i40e_blink_phy_link_led(struct i40e_hw *hw,
4927 u32 time, u32 interval)
4928{
4929 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4930 u16 gpio_led_port;
4931 u8 phy_addr = 0;
4932 int status = 0;
4933 u16 led_ctl;
4934 u8 port_num;
4935 u16 led_reg;
4936 u32 i;
4937
4938 i = rd32(hw, I40E_PFGEN_PORTNUM);
4939 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4940 phy_addr = i40e_get_phy_address(hw, port_num);
4941
4942 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4943 led_addr++) {
4944 status = i40e_read_phy_register_clause45(hw,
4945 I40E_PHY_COM_REG_PAGE,
4946 led_addr, phy_addr,
4947 &led_reg);
4948 if (status)
4949 goto phy_blinking_end;
4950 led_ctl = led_reg;
4951 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4952 led_reg = 0;
4953 status = i40e_write_phy_register_clause45(hw,
4954 I40E_PHY_COM_REG_PAGE,
4955 led_addr, phy_addr,
4956 led_reg);
4957 if (status)
4958 goto phy_blinking_end;
4959 break;
4960 }
4961 }
4962
4963 if (time > 0 && interval > 0) {
4964 for (i = 0; i < time * 1000; i += interval) {
4965 status = i40e_read_phy_register_clause45(hw,
4966 I40E_PHY_COM_REG_PAGE,
4967 led_addr, phy_addr, &led_reg);
4968 if (status)
4969 goto restore_config;
4970 if (led_reg & I40E_PHY_LED_MANUAL_ON)
4971 led_reg = 0;
4972 else
4973 led_reg = I40E_PHY_LED_MANUAL_ON;
4974 status = i40e_write_phy_register_clause45(hw,
4975 I40E_PHY_COM_REG_PAGE,
4976 led_addr, phy_addr, led_reg);
4977 if (status)
4978 goto restore_config;
4979 msleep(interval);
4980 }
4981 }
4982
4983restore_config:
4984 status = i40e_write_phy_register_clause45(hw,
4985 I40E_PHY_COM_REG_PAGE,
4986 led_addr, phy_addr, led_ctl);
4987
4988phy_blinking_end:
4989 return status;
4990}
4991
4992/**
4993 * i40e_led_get_reg - read LED register
4994 * @hw: pointer to the HW structure
4995 * @led_addr: LED register address
4996 * @reg_val: read register value
4997 **/
4998static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
4999 u32 *reg_val)
5000{
5001 u8 phy_addr = 0;
5002 u8 port_num;
5003 int status;
5004 u32 i;
5005
5006 *reg_val = 0;
5007 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
5008 status =
5009 i40e_aq_get_phy_register(hw,
5010 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5011 I40E_PHY_COM_REG_PAGE, true,
5012 I40E_PHY_LED_PROV_REG_1,
5013 reg_val, NULL);
5014 } else {
5015 i = rd32(hw, I40E_PFGEN_PORTNUM);
5016 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5017 phy_addr = i40e_get_phy_address(hw, port_num);
5018 status = i40e_read_phy_register_clause45(hw,
5019 I40E_PHY_COM_REG_PAGE,
5020 led_addr, phy_addr,
5021 (u16 *)reg_val);
5022 }
5023 return status;
5024}
5025
5026/**
5027 * i40e_led_set_reg - write LED register
5028 * @hw: pointer to the HW structure
5029 * @led_addr: LED register address
5030 * @reg_val: register value to write
5031 **/
5032static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5033 u32 reg_val)
5034{
5035 u8 phy_addr = 0;
5036 u8 port_num;
5037 int status;
5038 u32 i;
5039
5040 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
5041 status =
5042 i40e_aq_set_phy_register(hw,
5043 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5044 I40E_PHY_COM_REG_PAGE, true,
5045 I40E_PHY_LED_PROV_REG_1,
5046 reg_val, NULL);
5047 } else {
5048 i = rd32(hw, I40E_PFGEN_PORTNUM);
5049 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5050 phy_addr = i40e_get_phy_address(hw, port_num);
5051 status = i40e_write_phy_register_clause45(hw,
5052 I40E_PHY_COM_REG_PAGE,
5053 led_addr, phy_addr,
5054 (u16)reg_val);
5055 }
5056
5057 return status;
5058}
5059
5060/**
5061 * i40e_led_get_phy - return current on/off mode
5062 * @hw: pointer to the hw struct
5063 * @led_addr: address of led register to use
5064 * @val: original value of register to use
5065 *
5066 **/
5067int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5068 u16 *val)
5069{
5070 u16 gpio_led_port;
5071 u8 phy_addr = 0;
5072 u32 reg_val_aq;
5073 int status = 0;
5074 u16 temp_addr;
5075 u16 reg_val;
5076 u8 port_num;
5077 u32 i;
5078
5079 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
5080 status =
5081 i40e_aq_get_phy_register(hw,
5082 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5083 I40E_PHY_COM_REG_PAGE, true,
5084 I40E_PHY_LED_PROV_REG_1,
5085 ®_val_aq, NULL);
5086 if (status == 0)
5087 *val = (u16)reg_val_aq;
5088 return status;
5089 }
5090 temp_addr = I40E_PHY_LED_PROV_REG_1;
5091 i = rd32(hw, I40E_PFGEN_PORTNUM);
5092 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5093 phy_addr = i40e_get_phy_address(hw, port_num);
5094
5095 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5096 temp_addr++) {
5097 status = i40e_read_phy_register_clause45(hw,
5098 I40E_PHY_COM_REG_PAGE,
5099 temp_addr, phy_addr,
5100 ®_val);
5101 if (status)
5102 return status;
5103 *val = reg_val;
5104 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5105 *led_addr = temp_addr;
5106 break;
5107 }
5108 }
5109 return status;
5110}
5111
5112/**
5113 * i40e_led_set_phy
5114 * @hw: pointer to the HW structure
5115 * @on: true or false
5116 * @led_addr: address of led register to use
5117 * @mode: original val plus bit for set or ignore
5118 *
5119 * Set led's on or off when controlled by the PHY
5120 *
5121 **/
5122int i40e_led_set_phy(struct i40e_hw *hw, bool on,
5123 u16 led_addr, u32 mode)
5124{
5125 u32 led_ctl = 0;
5126 u32 led_reg = 0;
5127 int status = 0;
5128
5129 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5130 if (status)
5131 return status;
5132 led_ctl = led_reg;
5133 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5134 led_reg = 0;
5135 status = i40e_led_set_reg(hw, led_addr, led_reg);
5136 if (status)
5137 return status;
5138 }
5139 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5140 if (status)
5141 goto restore_config;
5142 if (on)
5143 led_reg = I40E_PHY_LED_MANUAL_ON;
5144 else
5145 led_reg = 0;
5146
5147 status = i40e_led_set_reg(hw, led_addr, led_reg);
5148 if (status)
5149 goto restore_config;
5150 if (mode & I40E_PHY_LED_MODE_ORIG) {
5151 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5152 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5153 }
5154 return status;
5155
5156restore_config:
5157 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5158 return status;
5159}
5160
5161/**
5162 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5163 * @hw: pointer to the hw struct
5164 * @reg_addr: register address
5165 * @reg_val: ptr to register value
5166 * @cmd_details: pointer to command details structure or NULL
5167 *
5168 * Use the firmware to read the Rx control register,
5169 * especially useful if the Rx unit is under heavy pressure
5170 **/
5171int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5172 u32 reg_addr, u32 *reg_val,
5173 struct i40e_asq_cmd_details *cmd_details)
5174{
5175 struct i40e_aq_desc desc;
5176 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5177 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5178 int status;
5179
5180 if (!reg_val)
5181 return -EINVAL;
5182
5183 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5184
5185 cmd_resp->address = cpu_to_le32(reg_addr);
5186
5187 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5188
5189 if (status == 0)
5190 *reg_val = le32_to_cpu(cmd_resp->value);
5191
5192 return status;
5193}
5194
5195/**
5196 * i40e_read_rx_ctl - read from an Rx control register
5197 * @hw: pointer to the hw struct
5198 * @reg_addr: register address
5199 **/
5200u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5201{
5202 bool use_register = false;
5203 int status = 0;
5204 int retry = 5;
5205 u32 val = 0;
5206
5207 if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
5208 use_register = true;
5209
5210 if (!use_register) {
5211do_retry:
5212 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5213 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5214 usleep_range(1000, 2000);
5215 retry--;
5216 goto do_retry;
5217 }
5218 }
5219
5220 /* if the AQ access failed, try the old-fashioned way */
5221 if (status || use_register)
5222 val = rd32(hw, reg_addr);
5223
5224 return val;
5225}
5226
5227/**
5228 * i40e_aq_rx_ctl_write_register
5229 * @hw: pointer to the hw struct
5230 * @reg_addr: register address
5231 * @reg_val: register value
5232 * @cmd_details: pointer to command details structure or NULL
5233 *
5234 * Use the firmware to write to an Rx control register,
5235 * especially useful if the Rx unit is under heavy pressure
5236 **/
5237int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5238 u32 reg_addr, u32 reg_val,
5239 struct i40e_asq_cmd_details *cmd_details)
5240{
5241 struct i40e_aq_desc desc;
5242 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5243 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5244 int status;
5245
5246 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5247
5248 cmd->address = cpu_to_le32(reg_addr);
5249 cmd->value = cpu_to_le32(reg_val);
5250
5251 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5252
5253 return status;
5254}
5255
5256/**
5257 * i40e_write_rx_ctl - write to an Rx control register
5258 * @hw: pointer to the hw struct
5259 * @reg_addr: register address
5260 * @reg_val: register value
5261 **/
5262void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5263{
5264 bool use_register = false;
5265 int status = 0;
5266 int retry = 5;
5267
5268 if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
5269 use_register = true;
5270
5271 if (!use_register) {
5272do_retry:
5273 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5274 reg_val, NULL);
5275 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5276 usleep_range(1000, 2000);
5277 retry--;
5278 goto do_retry;
5279 }
5280 }
5281
5282 /* if the AQ access failed, try the old-fashioned way */
5283 if (status || use_register)
5284 wr32(hw, reg_addr, reg_val);
5285}
5286
5287/**
5288 * i40e_mdio_if_number_selection - MDIO I/F number selection
5289 * @hw: pointer to the hw struct
5290 * @set_mdio: use MDIO I/F number specified by mdio_num
5291 * @mdio_num: MDIO I/F number
5292 * @cmd: pointer to PHY Register command structure
5293 **/
5294static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5295 u8 mdio_num,
5296 struct i40e_aqc_phy_register_access *cmd)
5297{
5298 if (!set_mdio ||
5299 cmd->phy_interface != I40E_AQ_PHY_REG_ACCESS_EXTERNAL)
5300 return;
5301
5302 if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) {
5303 cmd->cmd_flags |=
5304 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5305 FIELD_PREP(I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK,
5306 mdio_num);
5307 } else {
5308 i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n");
5309 }
5310}
5311
5312/**
5313 * i40e_aq_set_phy_register_ext
5314 * @hw: pointer to the hw struct
5315 * @phy_select: select which phy should be accessed
5316 * @dev_addr: PHY device address
5317 * @page_change: flag to indicate if phy page should be updated
5318 * @set_mdio: use MDIO I/F number specified by mdio_num
5319 * @mdio_num: MDIO I/F number
5320 * @reg_addr: PHY register address
5321 * @reg_val: new register value
5322 * @cmd_details: pointer to command details structure or NULL
5323 *
5324 * Write the external PHY register.
5325 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5326 * may use simple wrapper i40e_aq_set_phy_register.
5327 **/
5328int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5329 u8 phy_select, u8 dev_addr, bool page_change,
5330 bool set_mdio, u8 mdio_num,
5331 u32 reg_addr, u32 reg_val,
5332 struct i40e_asq_cmd_details *cmd_details)
5333{
5334 struct i40e_aq_desc desc;
5335 struct i40e_aqc_phy_register_access *cmd =
5336 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5337 int status;
5338
5339 i40e_fill_default_direct_cmd_desc(&desc,
5340 i40e_aqc_opc_set_phy_register);
5341
5342 cmd->phy_interface = phy_select;
5343 cmd->dev_address = dev_addr;
5344 cmd->reg_address = cpu_to_le32(reg_addr);
5345 cmd->reg_value = cpu_to_le32(reg_val);
5346
5347 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5348
5349 if (!page_change)
5350 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5351
5352 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5353
5354 return status;
5355}
5356
5357/**
5358 * i40e_aq_get_phy_register_ext
5359 * @hw: pointer to the hw struct
5360 * @phy_select: select which phy should be accessed
5361 * @dev_addr: PHY device address
5362 * @page_change: flag to indicate if phy page should be updated
5363 * @set_mdio: use MDIO I/F number specified by mdio_num
5364 * @mdio_num: MDIO I/F number
5365 * @reg_addr: PHY register address
5366 * @reg_val: read register value
5367 * @cmd_details: pointer to command details structure or NULL
5368 *
5369 * Read the external PHY register.
5370 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5371 * may use simple wrapper i40e_aq_get_phy_register.
5372 **/
5373int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5374 u8 phy_select, u8 dev_addr, bool page_change,
5375 bool set_mdio, u8 mdio_num,
5376 u32 reg_addr, u32 *reg_val,
5377 struct i40e_asq_cmd_details *cmd_details)
5378{
5379 struct i40e_aq_desc desc;
5380 struct i40e_aqc_phy_register_access *cmd =
5381 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5382 int status;
5383
5384 i40e_fill_default_direct_cmd_desc(&desc,
5385 i40e_aqc_opc_get_phy_register);
5386
5387 cmd->phy_interface = phy_select;
5388 cmd->dev_address = dev_addr;
5389 cmd->reg_address = cpu_to_le32(reg_addr);
5390
5391 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5392
5393 if (!page_change)
5394 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5395
5396 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5397 if (!status)
5398 *reg_val = le32_to_cpu(cmd->reg_value);
5399
5400 return status;
5401}
5402
5403/**
5404 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5405 * @hw: pointer to the hw struct
5406 * @buff: command buffer (size in bytes = buff_size)
5407 * @buff_size: buffer size in bytes
5408 * @track_id: package tracking id
5409 * @error_offset: returns error offset
5410 * @error_info: returns error information
5411 * @cmd_details: pointer to command details structure or NULL
5412 **/
5413int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5414 u16 buff_size, u32 track_id,
5415 u32 *error_offset, u32 *error_info,
5416 struct i40e_asq_cmd_details *cmd_details)
5417{
5418 struct i40e_aq_desc desc;
5419 struct i40e_aqc_write_personalization_profile *cmd =
5420 (struct i40e_aqc_write_personalization_profile *)
5421 &desc.params.raw;
5422 struct i40e_aqc_write_ddp_resp *resp;
5423 int status;
5424
5425 i40e_fill_default_direct_cmd_desc(&desc,
5426 i40e_aqc_opc_write_personalization_profile);
5427
5428 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5429 if (buff_size > I40E_AQ_LARGE_BUF)
5430 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5431
5432 desc.datalen = cpu_to_le16(buff_size);
5433
5434 cmd->profile_track_id = cpu_to_le32(track_id);
5435
5436 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5437 if (!status) {
5438 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5439 if (error_offset)
5440 *error_offset = le32_to_cpu(resp->error_offset);
5441 if (error_info)
5442 *error_info = le32_to_cpu(resp->error_info);
5443 }
5444
5445 return status;
5446}
5447
5448/**
5449 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5450 * @hw: pointer to the hw struct
5451 * @buff: command buffer (size in bytes = buff_size)
5452 * @buff_size: buffer size in bytes
5453 * @flags: AdminQ command flags
5454 * @cmd_details: pointer to command details structure or NULL
5455 **/
5456int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5457 u16 buff_size, u8 flags,
5458 struct i40e_asq_cmd_details *cmd_details)
5459{
5460 struct i40e_aq_desc desc;
5461 struct i40e_aqc_get_applied_profiles *cmd =
5462 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5463 int status;
5464
5465 i40e_fill_default_direct_cmd_desc(&desc,
5466 i40e_aqc_opc_get_personalization_profile_list);
5467
5468 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5469 if (buff_size > I40E_AQ_LARGE_BUF)
5470 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5471 desc.datalen = cpu_to_le16(buff_size);
5472
5473 cmd->flags = flags;
5474
5475 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5476
5477 return status;
5478}
5479
5480/**
5481 * i40e_find_segment_in_package
5482 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5483 * @pkg_hdr: pointer to the package header to be searched
5484 *
5485 * This function searches a package file for a particular segment type. On
5486 * success it returns a pointer to the segment header, otherwise it will
5487 * return NULL.
5488 **/
5489struct i40e_generic_seg_header *
5490i40e_find_segment_in_package(u32 segment_type,
5491 struct i40e_package_header *pkg_hdr)
5492{
5493 struct i40e_generic_seg_header *segment;
5494 u32 i;
5495
5496 /* Search all package segments for the requested segment type */
5497 for (i = 0; i < pkg_hdr->segment_count; i++) {
5498 segment =
5499 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5500 pkg_hdr->segment_offset[i]);
5501
5502 if (segment->type == segment_type)
5503 return segment;
5504 }
5505
5506 return NULL;
5507}
5508
5509/* Get section table in profile */
5510#define I40E_SECTION_TABLE(profile, sec_tbl) \
5511 do { \
5512 struct i40e_profile_segment *p = (profile); \
5513 u32 count; \
5514 u32 *nvm; \
5515 count = p->device_table_count; \
5516 nvm = (u32 *)&p->device_table[count]; \
5517 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5518 } while (0)
5519
5520/* Get section header in profile */
5521#define I40E_SECTION_HEADER(profile, offset) \
5522 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5523
5524/**
5525 * i40e_find_section_in_profile
5526 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5527 * @profile: pointer to the i40e segment header to be searched
5528 *
5529 * This function searches i40e segment for a particular section type. On
5530 * success it returns a pointer to the section header, otherwise it will
5531 * return NULL.
5532 **/
5533struct i40e_profile_section_header *
5534i40e_find_section_in_profile(u32 section_type,
5535 struct i40e_profile_segment *profile)
5536{
5537 struct i40e_profile_section_header *sec;
5538 struct i40e_section_table *sec_tbl;
5539 u32 sec_off;
5540 u32 i;
5541
5542 if (profile->header.type != SEGMENT_TYPE_I40E)
5543 return NULL;
5544
5545 I40E_SECTION_TABLE(profile, sec_tbl);
5546
5547 for (i = 0; i < sec_tbl->section_count; i++) {
5548 sec_off = sec_tbl->section_offset[i];
5549 sec = I40E_SECTION_HEADER(profile, sec_off);
5550 if (sec->section.type == section_type)
5551 return sec;
5552 }
5553
5554 return NULL;
5555}
5556
5557/**
5558 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5559 * @hw: pointer to the hw struct
5560 * @aq: command buffer containing all data to execute AQ
5561 **/
5562static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5563 struct i40e_profile_aq_section *aq)
5564{
5565 struct i40e_aq_desc desc;
5566 u8 *msg = NULL;
5567 u16 msglen;
5568 int status;
5569
5570 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5571 desc.flags |= cpu_to_le16(aq->flags);
5572 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5573
5574 msglen = aq->datalen;
5575 if (msglen) {
5576 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5577 I40E_AQ_FLAG_RD));
5578 if (msglen > I40E_AQ_LARGE_BUF)
5579 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5580 desc.datalen = cpu_to_le16(msglen);
5581 msg = &aq->data[0];
5582 }
5583
5584 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5585
5586 if (status) {
5587 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5588 "unable to exec DDP AQ opcode %u, error %d\n",
5589 aq->opcode, status);
5590 return status;
5591 }
5592
5593 /* copy returned desc to aq_buf */
5594 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5595
5596 return 0;
5597}
5598
5599/**
5600 * i40e_validate_profile
5601 * @hw: pointer to the hardware structure
5602 * @profile: pointer to the profile segment of the package to be validated
5603 * @track_id: package tracking id
5604 * @rollback: flag if the profile is for rollback.
5605 *
5606 * Validates supported devices and profile's sections.
5607 */
5608static int
5609i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5610 u32 track_id, bool rollback)
5611{
5612 struct i40e_profile_section_header *sec = NULL;
5613 struct i40e_section_table *sec_tbl;
5614 u32 vendor_dev_id;
5615 int status = 0;
5616 u32 dev_cnt;
5617 u32 sec_off;
5618 u32 i;
5619
5620 if (track_id == I40E_DDP_TRACKID_INVALID) {
5621 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5622 return -EOPNOTSUPP;
5623 }
5624
5625 dev_cnt = profile->device_table_count;
5626 for (i = 0; i < dev_cnt; i++) {
5627 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5628 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5629 hw->device_id == (vendor_dev_id & 0xFFFF))
5630 break;
5631 }
5632 if (dev_cnt && i == dev_cnt) {
5633 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5634 "Device doesn't support DDP\n");
5635 return -ENODEV;
5636 }
5637
5638 I40E_SECTION_TABLE(profile, sec_tbl);
5639
5640 /* Validate sections types */
5641 for (i = 0; i < sec_tbl->section_count; i++) {
5642 sec_off = sec_tbl->section_offset[i];
5643 sec = I40E_SECTION_HEADER(profile, sec_off);
5644 if (rollback) {
5645 if (sec->section.type == SECTION_TYPE_MMIO ||
5646 sec->section.type == SECTION_TYPE_AQ ||
5647 sec->section.type == SECTION_TYPE_RB_AQ) {
5648 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5649 "Not a roll-back package\n");
5650 return -EOPNOTSUPP;
5651 }
5652 } else {
5653 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5654 sec->section.type == SECTION_TYPE_RB_MMIO) {
5655 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5656 "Not an original package\n");
5657 return -EOPNOTSUPP;
5658 }
5659 }
5660 }
5661
5662 return status;
5663}
5664
5665/**
5666 * i40e_write_profile
5667 * @hw: pointer to the hardware structure
5668 * @profile: pointer to the profile segment of the package to be downloaded
5669 * @track_id: package tracking id
5670 *
5671 * Handles the download of a complete package.
5672 */
5673int
5674i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5675 u32 track_id)
5676{
5677 struct i40e_profile_section_header *sec = NULL;
5678 struct i40e_profile_aq_section *ddp_aq;
5679 struct i40e_section_table *sec_tbl;
5680 u32 offset = 0, info = 0;
5681 u32 section_size = 0;
5682 int status = 0;
5683 u32 sec_off;
5684 u32 i;
5685
5686 status = i40e_validate_profile(hw, profile, track_id, false);
5687 if (status)
5688 return status;
5689
5690 I40E_SECTION_TABLE(profile, sec_tbl);
5691
5692 for (i = 0; i < sec_tbl->section_count; i++) {
5693 sec_off = sec_tbl->section_offset[i];
5694 sec = I40E_SECTION_HEADER(profile, sec_off);
5695 /* Process generic admin command */
5696 if (sec->section.type == SECTION_TYPE_AQ) {
5697 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5698 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5699 if (status) {
5700 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5701 "Failed to execute aq: section %d, opcode %u\n",
5702 i, ddp_aq->opcode);
5703 break;
5704 }
5705 sec->section.type = SECTION_TYPE_RB_AQ;
5706 }
5707
5708 /* Skip any non-mmio sections */
5709 if (sec->section.type != SECTION_TYPE_MMIO)
5710 continue;
5711
5712 section_size = sec->section.size +
5713 sizeof(struct i40e_profile_section_header);
5714
5715 /* Write MMIO section */
5716 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5717 track_id, &offset, &info, NULL);
5718 if (status) {
5719 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5720 "Failed to write profile: section %d, offset %d, info %d\n",
5721 i, offset, info);
5722 break;
5723 }
5724 }
5725 return status;
5726}
5727
5728/**
5729 * i40e_rollback_profile
5730 * @hw: pointer to the hardware structure
5731 * @profile: pointer to the profile segment of the package to be removed
5732 * @track_id: package tracking id
5733 *
5734 * Rolls back previously loaded package.
5735 */
5736int
5737i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5738 u32 track_id)
5739{
5740 struct i40e_profile_section_header *sec = NULL;
5741 struct i40e_section_table *sec_tbl;
5742 u32 offset = 0, info = 0;
5743 u32 section_size = 0;
5744 int status = 0;
5745 u32 sec_off;
5746 int i;
5747
5748 status = i40e_validate_profile(hw, profile, track_id, true);
5749 if (status)
5750 return status;
5751
5752 I40E_SECTION_TABLE(profile, sec_tbl);
5753
5754 /* For rollback write sections in reverse */
5755 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5756 sec_off = sec_tbl->section_offset[i];
5757 sec = I40E_SECTION_HEADER(profile, sec_off);
5758
5759 /* Skip any non-rollback sections */
5760 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5761 continue;
5762
5763 section_size = sec->section.size +
5764 sizeof(struct i40e_profile_section_header);
5765
5766 /* Write roll-back MMIO section */
5767 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5768 track_id, &offset, &info, NULL);
5769 if (status) {
5770 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5771 "Failed to write profile: section %d, offset %d, info %d\n",
5772 i, offset, info);
5773 break;
5774 }
5775 }
5776 return status;
5777}
5778
5779/**
5780 * i40e_add_pinfo_to_list
5781 * @hw: pointer to the hardware structure
5782 * @profile: pointer to the profile segment of the package
5783 * @profile_info_sec: buffer for information section
5784 * @track_id: package tracking id
5785 *
5786 * Register a profile to the list of loaded profiles.
5787 */
5788int
5789i40e_add_pinfo_to_list(struct i40e_hw *hw,
5790 struct i40e_profile_segment *profile,
5791 u8 *profile_info_sec, u32 track_id)
5792{
5793 struct i40e_profile_section_header *sec = NULL;
5794 struct i40e_profile_info *pinfo;
5795 u32 offset = 0, info = 0;
5796 int status = 0;
5797
5798 sec = (struct i40e_profile_section_header *)profile_info_sec;
5799 sec->tbl_size = 1;
5800 sec->data_end = sizeof(struct i40e_profile_section_header) +
5801 sizeof(struct i40e_profile_info);
5802 sec->section.type = SECTION_TYPE_INFO;
5803 sec->section.offset = sizeof(struct i40e_profile_section_header);
5804 sec->section.size = sizeof(struct i40e_profile_info);
5805 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5806 sec->section.offset);
5807 pinfo->track_id = track_id;
5808 pinfo->version = profile->version;
5809 pinfo->op = I40E_DDP_ADD_TRACKID;
5810 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5811
5812 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5813 track_id, &offset, &info, NULL);
5814
5815 return status;
5816}
5817
5818/**
5819 * i40e_aq_add_cloud_filters
5820 * @hw: pointer to the hardware structure
5821 * @seid: VSI seid to add cloud filters from
5822 * @filters: Buffer which contains the filters to be added
5823 * @filter_count: number of filters contained in the buffer
5824 *
5825 * Set the cloud filters for a given VSI. The contents of the
5826 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5827 * of the function.
5828 *
5829 **/
5830int
5831i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5832 struct i40e_aqc_cloud_filters_element_data *filters,
5833 u8 filter_count)
5834{
5835 struct i40e_aq_desc desc;
5836 struct i40e_aqc_add_remove_cloud_filters *cmd =
5837 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5838 u16 buff_len;
5839 int status;
5840
5841 i40e_fill_default_direct_cmd_desc(&desc,
5842 i40e_aqc_opc_add_cloud_filters);
5843
5844 buff_len = filter_count * sizeof(*filters);
5845 desc.datalen = cpu_to_le16(buff_len);
5846 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5847 cmd->num_filters = filter_count;
5848 cmd->seid = cpu_to_le16(seid);
5849
5850 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5851
5852 return status;
5853}
5854
5855/**
5856 * i40e_aq_add_cloud_filters_bb
5857 * @hw: pointer to the hardware structure
5858 * @seid: VSI seid to add cloud filters from
5859 * @filters: Buffer which contains the filters in big buffer to be added
5860 * @filter_count: number of filters contained in the buffer
5861 *
5862 * Set the big buffer cloud filters for a given VSI. The contents of the
5863 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5864 * function.
5865 *
5866 **/
5867int
5868i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5869 struct i40e_aqc_cloud_filters_element_bb *filters,
5870 u8 filter_count)
5871{
5872 struct i40e_aq_desc desc;
5873 struct i40e_aqc_add_remove_cloud_filters *cmd =
5874 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5875 u16 buff_len;
5876 int status;
5877 int i;
5878
5879 i40e_fill_default_direct_cmd_desc(&desc,
5880 i40e_aqc_opc_add_cloud_filters);
5881
5882 buff_len = filter_count * sizeof(*filters);
5883 desc.datalen = cpu_to_le16(buff_len);
5884 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5885 cmd->num_filters = filter_count;
5886 cmd->seid = cpu_to_le16(seid);
5887 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5888
5889 for (i = 0; i < filter_count; i++) {
5890 u16 tnl_type;
5891 u32 ti;
5892
5893 tnl_type = le16_get_bits(filters[i].element.flags,
5894 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
5895
5896 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5897 * one more byte further than normally used for Tenant ID in
5898 * other tunnel types.
5899 */
5900 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5901 ti = le32_to_cpu(filters[i].element.tenant_id);
5902 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5903 }
5904 }
5905
5906 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5907
5908 return status;
5909}
5910
5911/**
5912 * i40e_aq_rem_cloud_filters
5913 * @hw: pointer to the hardware structure
5914 * @seid: VSI seid to remove cloud filters from
5915 * @filters: Buffer which contains the filters to be removed
5916 * @filter_count: number of filters contained in the buffer
5917 *
5918 * Remove the cloud filters for a given VSI. The contents of the
5919 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5920 * of the function.
5921 *
5922 **/
5923int
5924i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5925 struct i40e_aqc_cloud_filters_element_data *filters,
5926 u8 filter_count)
5927{
5928 struct i40e_aq_desc desc;
5929 struct i40e_aqc_add_remove_cloud_filters *cmd =
5930 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5931 u16 buff_len;
5932 int status;
5933
5934 i40e_fill_default_direct_cmd_desc(&desc,
5935 i40e_aqc_opc_remove_cloud_filters);
5936
5937 buff_len = filter_count * sizeof(*filters);
5938 desc.datalen = cpu_to_le16(buff_len);
5939 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5940 cmd->num_filters = filter_count;
5941 cmd->seid = cpu_to_le16(seid);
5942
5943 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5944
5945 return status;
5946}
5947
5948/**
5949 * i40e_aq_rem_cloud_filters_bb
5950 * @hw: pointer to the hardware structure
5951 * @seid: VSI seid to remove cloud filters from
5952 * @filters: Buffer which contains the filters in big buffer to be removed
5953 * @filter_count: number of filters contained in the buffer
5954 *
5955 * Remove the big buffer cloud filters for a given VSI. The contents of the
5956 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5957 * function.
5958 *
5959 **/
5960int
5961i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5962 struct i40e_aqc_cloud_filters_element_bb *filters,
5963 u8 filter_count)
5964{
5965 struct i40e_aq_desc desc;
5966 struct i40e_aqc_add_remove_cloud_filters *cmd =
5967 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5968 u16 buff_len;
5969 int status;
5970 int i;
5971
5972 i40e_fill_default_direct_cmd_desc(&desc,
5973 i40e_aqc_opc_remove_cloud_filters);
5974
5975 buff_len = filter_count * sizeof(*filters);
5976 desc.datalen = cpu_to_le16(buff_len);
5977 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5978 cmd->num_filters = filter_count;
5979 cmd->seid = cpu_to_le16(seid);
5980 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5981
5982 for (i = 0; i < filter_count; i++) {
5983 u16 tnl_type;
5984 u32 ti;
5985
5986 tnl_type = le16_get_bits(filters[i].element.flags,
5987 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
5988
5989 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5990 * one more byte further than normally used for Tenant ID in
5991 * other tunnel types.
5992 */
5993 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5994 ti = le32_to_cpu(filters[i].element.tenant_id);
5995 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5996 }
5997 }
5998
5999 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6000
6001 return status;
6002}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#include "i40e.h"
5#include "i40e_type.h"
6#include "i40e_adminq.h"
7#include "i40e_prototype.h"
8#include <linux/avf/virtchnl.h>
9
10/**
11 * i40e_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure
13 *
14 * This function sets the mac type of the adapter based on the
15 * vendor ID and device ID stored in the hw structure.
16 **/
17i40e_status i40e_set_mac_type(struct i40e_hw *hw)
18{
19 i40e_status status = 0;
20
21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
22 switch (hw->device_id) {
23 case I40E_DEV_ID_SFP_XL710:
24 case I40E_DEV_ID_QEMU:
25 case I40E_DEV_ID_KX_B:
26 case I40E_DEV_ID_KX_C:
27 case I40E_DEV_ID_QSFP_A:
28 case I40E_DEV_ID_QSFP_B:
29 case I40E_DEV_ID_QSFP_C:
30 case I40E_DEV_ID_5G_BASE_T_BC:
31 case I40E_DEV_ID_10G_BASE_T:
32 case I40E_DEV_ID_10G_BASE_T4:
33 case I40E_DEV_ID_10G_BASE_T_BC:
34 case I40E_DEV_ID_10G_B:
35 case I40E_DEV_ID_10G_SFP:
36 case I40E_DEV_ID_20G_KR2:
37 case I40E_DEV_ID_20G_KR2_A:
38 case I40E_DEV_ID_25G_B:
39 case I40E_DEV_ID_25G_SFP28:
40 case I40E_DEV_ID_X710_N3000:
41 case I40E_DEV_ID_XXV710_N3000:
42 hw->mac.type = I40E_MAC_XL710;
43 break;
44 case I40E_DEV_ID_KX_X722:
45 case I40E_DEV_ID_QSFP_X722:
46 case I40E_DEV_ID_SFP_X722:
47 case I40E_DEV_ID_1G_BASE_T_X722:
48 case I40E_DEV_ID_10G_BASE_T_X722:
49 case I40E_DEV_ID_SFP_I_X722:
50 hw->mac.type = I40E_MAC_X722;
51 break;
52 default:
53 hw->mac.type = I40E_MAC_GENERIC;
54 break;
55 }
56 } else {
57 status = I40E_ERR_DEVICE_NOT_SUPPORTED;
58 }
59
60 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
61 hw->mac.type, status);
62 return status;
63}
64
65/**
66 * i40e_aq_str - convert AQ err code to a string
67 * @hw: pointer to the HW structure
68 * @aq_err: the AQ error code to convert
69 **/
70const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
71{
72 switch (aq_err) {
73 case I40E_AQ_RC_OK:
74 return "OK";
75 case I40E_AQ_RC_EPERM:
76 return "I40E_AQ_RC_EPERM";
77 case I40E_AQ_RC_ENOENT:
78 return "I40E_AQ_RC_ENOENT";
79 case I40E_AQ_RC_ESRCH:
80 return "I40E_AQ_RC_ESRCH";
81 case I40E_AQ_RC_EINTR:
82 return "I40E_AQ_RC_EINTR";
83 case I40E_AQ_RC_EIO:
84 return "I40E_AQ_RC_EIO";
85 case I40E_AQ_RC_ENXIO:
86 return "I40E_AQ_RC_ENXIO";
87 case I40E_AQ_RC_E2BIG:
88 return "I40E_AQ_RC_E2BIG";
89 case I40E_AQ_RC_EAGAIN:
90 return "I40E_AQ_RC_EAGAIN";
91 case I40E_AQ_RC_ENOMEM:
92 return "I40E_AQ_RC_ENOMEM";
93 case I40E_AQ_RC_EACCES:
94 return "I40E_AQ_RC_EACCES";
95 case I40E_AQ_RC_EFAULT:
96 return "I40E_AQ_RC_EFAULT";
97 case I40E_AQ_RC_EBUSY:
98 return "I40E_AQ_RC_EBUSY";
99 case I40E_AQ_RC_EEXIST:
100 return "I40E_AQ_RC_EEXIST";
101 case I40E_AQ_RC_EINVAL:
102 return "I40E_AQ_RC_EINVAL";
103 case I40E_AQ_RC_ENOTTY:
104 return "I40E_AQ_RC_ENOTTY";
105 case I40E_AQ_RC_ENOSPC:
106 return "I40E_AQ_RC_ENOSPC";
107 case I40E_AQ_RC_ENOSYS:
108 return "I40E_AQ_RC_ENOSYS";
109 case I40E_AQ_RC_ERANGE:
110 return "I40E_AQ_RC_ERANGE";
111 case I40E_AQ_RC_EFLUSHED:
112 return "I40E_AQ_RC_EFLUSHED";
113 case I40E_AQ_RC_BAD_ADDR:
114 return "I40E_AQ_RC_BAD_ADDR";
115 case I40E_AQ_RC_EMODE:
116 return "I40E_AQ_RC_EMODE";
117 case I40E_AQ_RC_EFBIG:
118 return "I40E_AQ_RC_EFBIG";
119 }
120
121 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
122 return hw->err_str;
123}
124
125/**
126 * i40e_stat_str - convert status err code to a string
127 * @hw: pointer to the HW structure
128 * @stat_err: the status error code to convert
129 **/
130const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
131{
132 switch (stat_err) {
133 case 0:
134 return "OK";
135 case I40E_ERR_NVM:
136 return "I40E_ERR_NVM";
137 case I40E_ERR_NVM_CHECKSUM:
138 return "I40E_ERR_NVM_CHECKSUM";
139 case I40E_ERR_PHY:
140 return "I40E_ERR_PHY";
141 case I40E_ERR_CONFIG:
142 return "I40E_ERR_CONFIG";
143 case I40E_ERR_PARAM:
144 return "I40E_ERR_PARAM";
145 case I40E_ERR_MAC_TYPE:
146 return "I40E_ERR_MAC_TYPE";
147 case I40E_ERR_UNKNOWN_PHY:
148 return "I40E_ERR_UNKNOWN_PHY";
149 case I40E_ERR_LINK_SETUP:
150 return "I40E_ERR_LINK_SETUP";
151 case I40E_ERR_ADAPTER_STOPPED:
152 return "I40E_ERR_ADAPTER_STOPPED";
153 case I40E_ERR_INVALID_MAC_ADDR:
154 return "I40E_ERR_INVALID_MAC_ADDR";
155 case I40E_ERR_DEVICE_NOT_SUPPORTED:
156 return "I40E_ERR_DEVICE_NOT_SUPPORTED";
157 case I40E_ERR_MASTER_REQUESTS_PENDING:
158 return "I40E_ERR_MASTER_REQUESTS_PENDING";
159 case I40E_ERR_INVALID_LINK_SETTINGS:
160 return "I40E_ERR_INVALID_LINK_SETTINGS";
161 case I40E_ERR_AUTONEG_NOT_COMPLETE:
162 return "I40E_ERR_AUTONEG_NOT_COMPLETE";
163 case I40E_ERR_RESET_FAILED:
164 return "I40E_ERR_RESET_FAILED";
165 case I40E_ERR_SWFW_SYNC:
166 return "I40E_ERR_SWFW_SYNC";
167 case I40E_ERR_NO_AVAILABLE_VSI:
168 return "I40E_ERR_NO_AVAILABLE_VSI";
169 case I40E_ERR_NO_MEMORY:
170 return "I40E_ERR_NO_MEMORY";
171 case I40E_ERR_BAD_PTR:
172 return "I40E_ERR_BAD_PTR";
173 case I40E_ERR_RING_FULL:
174 return "I40E_ERR_RING_FULL";
175 case I40E_ERR_INVALID_PD_ID:
176 return "I40E_ERR_INVALID_PD_ID";
177 case I40E_ERR_INVALID_QP_ID:
178 return "I40E_ERR_INVALID_QP_ID";
179 case I40E_ERR_INVALID_CQ_ID:
180 return "I40E_ERR_INVALID_CQ_ID";
181 case I40E_ERR_INVALID_CEQ_ID:
182 return "I40E_ERR_INVALID_CEQ_ID";
183 case I40E_ERR_INVALID_AEQ_ID:
184 return "I40E_ERR_INVALID_AEQ_ID";
185 case I40E_ERR_INVALID_SIZE:
186 return "I40E_ERR_INVALID_SIZE";
187 case I40E_ERR_INVALID_ARP_INDEX:
188 return "I40E_ERR_INVALID_ARP_INDEX";
189 case I40E_ERR_INVALID_FPM_FUNC_ID:
190 return "I40E_ERR_INVALID_FPM_FUNC_ID";
191 case I40E_ERR_QP_INVALID_MSG_SIZE:
192 return "I40E_ERR_QP_INVALID_MSG_SIZE";
193 case I40E_ERR_QP_TOOMANY_WRS_POSTED:
194 return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
195 case I40E_ERR_INVALID_FRAG_COUNT:
196 return "I40E_ERR_INVALID_FRAG_COUNT";
197 case I40E_ERR_QUEUE_EMPTY:
198 return "I40E_ERR_QUEUE_EMPTY";
199 case I40E_ERR_INVALID_ALIGNMENT:
200 return "I40E_ERR_INVALID_ALIGNMENT";
201 case I40E_ERR_FLUSHED_QUEUE:
202 return "I40E_ERR_FLUSHED_QUEUE";
203 case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
204 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
205 case I40E_ERR_INVALID_IMM_DATA_SIZE:
206 return "I40E_ERR_INVALID_IMM_DATA_SIZE";
207 case I40E_ERR_TIMEOUT:
208 return "I40E_ERR_TIMEOUT";
209 case I40E_ERR_OPCODE_MISMATCH:
210 return "I40E_ERR_OPCODE_MISMATCH";
211 case I40E_ERR_CQP_COMPL_ERROR:
212 return "I40E_ERR_CQP_COMPL_ERROR";
213 case I40E_ERR_INVALID_VF_ID:
214 return "I40E_ERR_INVALID_VF_ID";
215 case I40E_ERR_INVALID_HMCFN_ID:
216 return "I40E_ERR_INVALID_HMCFN_ID";
217 case I40E_ERR_BACKING_PAGE_ERROR:
218 return "I40E_ERR_BACKING_PAGE_ERROR";
219 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
220 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
221 case I40E_ERR_INVALID_PBLE_INDEX:
222 return "I40E_ERR_INVALID_PBLE_INDEX";
223 case I40E_ERR_INVALID_SD_INDEX:
224 return "I40E_ERR_INVALID_SD_INDEX";
225 case I40E_ERR_INVALID_PAGE_DESC_INDEX:
226 return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
227 case I40E_ERR_INVALID_SD_TYPE:
228 return "I40E_ERR_INVALID_SD_TYPE";
229 case I40E_ERR_MEMCPY_FAILED:
230 return "I40E_ERR_MEMCPY_FAILED";
231 case I40E_ERR_INVALID_HMC_OBJ_INDEX:
232 return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
233 case I40E_ERR_INVALID_HMC_OBJ_COUNT:
234 return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
235 case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
236 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
237 case I40E_ERR_SRQ_ENABLED:
238 return "I40E_ERR_SRQ_ENABLED";
239 case I40E_ERR_ADMIN_QUEUE_ERROR:
240 return "I40E_ERR_ADMIN_QUEUE_ERROR";
241 case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
242 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
243 case I40E_ERR_BUF_TOO_SHORT:
244 return "I40E_ERR_BUF_TOO_SHORT";
245 case I40E_ERR_ADMIN_QUEUE_FULL:
246 return "I40E_ERR_ADMIN_QUEUE_FULL";
247 case I40E_ERR_ADMIN_QUEUE_NO_WORK:
248 return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
249 case I40E_ERR_BAD_IWARP_CQE:
250 return "I40E_ERR_BAD_IWARP_CQE";
251 case I40E_ERR_NVM_BLANK_MODE:
252 return "I40E_ERR_NVM_BLANK_MODE";
253 case I40E_ERR_NOT_IMPLEMENTED:
254 return "I40E_ERR_NOT_IMPLEMENTED";
255 case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
256 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
257 case I40E_ERR_DIAG_TEST_FAILED:
258 return "I40E_ERR_DIAG_TEST_FAILED";
259 case I40E_ERR_NOT_READY:
260 return "I40E_ERR_NOT_READY";
261 case I40E_NOT_SUPPORTED:
262 return "I40E_NOT_SUPPORTED";
263 case I40E_ERR_FIRMWARE_API_VERSION:
264 return "I40E_ERR_FIRMWARE_API_VERSION";
265 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
266 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
267 }
268
269 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
270 return hw->err_str;
271}
272
273/**
274 * i40e_debug_aq
275 * @hw: debug mask related to admin queue
276 * @mask: debug mask
277 * @desc: pointer to admin queue descriptor
278 * @buffer: pointer to command buffer
279 * @buf_len: max length of buffer
280 *
281 * Dumps debug log about adminq command with descriptor contents.
282 **/
283void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
284 void *buffer, u16 buf_len)
285{
286 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
287 u32 effective_mask = hw->debug_mask & mask;
288 char prefix[27];
289 u16 len;
290 u8 *buf = (u8 *)buffer;
291
292 if (!effective_mask || !desc)
293 return;
294
295 len = le16_to_cpu(aq_desc->datalen);
296
297 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
298 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
299 le16_to_cpu(aq_desc->opcode),
300 le16_to_cpu(aq_desc->flags),
301 le16_to_cpu(aq_desc->datalen),
302 le16_to_cpu(aq_desc->retval));
303 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
304 "\tcookie (h,l) 0x%08X 0x%08X\n",
305 le32_to_cpu(aq_desc->cookie_high),
306 le32_to_cpu(aq_desc->cookie_low));
307 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
308 "\tparam (0,1) 0x%08X 0x%08X\n",
309 le32_to_cpu(aq_desc->params.internal.param0),
310 le32_to_cpu(aq_desc->params.internal.param1));
311 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
312 "\taddr (h,l) 0x%08X 0x%08X\n",
313 le32_to_cpu(aq_desc->params.external.addr_high),
314 le32_to_cpu(aq_desc->params.external.addr_low));
315
316 if (buffer && buf_len != 0 && len != 0 &&
317 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
318 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
319 if (buf_len < len)
320 len = buf_len;
321
322 snprintf(prefix, sizeof(prefix),
323 "i40e %02x:%02x.%x: \t0x",
324 hw->bus.bus_id,
325 hw->bus.device,
326 hw->bus.func);
327
328 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
329 16, 1, buf, len, false);
330 }
331}
332
333/**
334 * i40e_check_asq_alive
335 * @hw: pointer to the hw struct
336 *
337 * Returns true if Queue is enabled else false.
338 **/
339bool i40e_check_asq_alive(struct i40e_hw *hw)
340{
341 if (hw->aq.asq.len)
342 return !!(rd32(hw, hw->aq.asq.len) &
343 I40E_PF_ATQLEN_ATQENABLE_MASK);
344 else
345 return false;
346}
347
348/**
349 * i40e_aq_queue_shutdown
350 * @hw: pointer to the hw struct
351 * @unloading: is the driver unloading itself
352 *
353 * Tell the Firmware that we're shutting down the AdminQ and whether
354 * or not the driver is unloading as well.
355 **/
356i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
357 bool unloading)
358{
359 struct i40e_aq_desc desc;
360 struct i40e_aqc_queue_shutdown *cmd =
361 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
362 i40e_status status;
363
364 i40e_fill_default_direct_cmd_desc(&desc,
365 i40e_aqc_opc_queue_shutdown);
366
367 if (unloading)
368 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
369 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
370
371 return status;
372}
373
374/**
375 * i40e_aq_get_set_rss_lut
376 * @hw: pointer to the hardware structure
377 * @vsi_id: vsi fw index
378 * @pf_lut: for PF table set true, for VSI table set false
379 * @lut: pointer to the lut buffer provided by the caller
380 * @lut_size: size of the lut buffer
381 * @set: set true to set the table, false to get the table
382 *
383 * Internal function to get or set RSS look up table
384 **/
385static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
386 u16 vsi_id, bool pf_lut,
387 u8 *lut, u16 lut_size,
388 bool set)
389{
390 i40e_status status;
391 struct i40e_aq_desc desc;
392 struct i40e_aqc_get_set_rss_lut *cmd_resp =
393 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
394
395 if (set)
396 i40e_fill_default_direct_cmd_desc(&desc,
397 i40e_aqc_opc_set_rss_lut);
398 else
399 i40e_fill_default_direct_cmd_desc(&desc,
400 i40e_aqc_opc_get_rss_lut);
401
402 /* Indirect command */
403 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
404 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
405
406 cmd_resp->vsi_id =
407 cpu_to_le16((u16)((vsi_id <<
408 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
409 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
410 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
411
412 if (pf_lut)
413 cmd_resp->flags |= cpu_to_le16((u16)
414 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
415 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
416 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
417 else
418 cmd_resp->flags |= cpu_to_le16((u16)
419 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
420 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
421 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
422
423 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
424
425 return status;
426}
427
428/**
429 * i40e_aq_get_rss_lut
430 * @hw: pointer to the hardware structure
431 * @vsi_id: vsi fw index
432 * @pf_lut: for PF table set true, for VSI table set false
433 * @lut: pointer to the lut buffer provided by the caller
434 * @lut_size: size of the lut buffer
435 *
436 * get the RSS lookup table, PF or VSI type
437 **/
438i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
439 bool pf_lut, u8 *lut, u16 lut_size)
440{
441 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
442 false);
443}
444
445/**
446 * i40e_aq_set_rss_lut
447 * @hw: pointer to the hardware structure
448 * @vsi_id: vsi fw index
449 * @pf_lut: for PF table set true, for VSI table set false
450 * @lut: pointer to the lut buffer provided by the caller
451 * @lut_size: size of the lut buffer
452 *
453 * set the RSS lookup table, PF or VSI type
454 **/
455i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
456 bool pf_lut, u8 *lut, u16 lut_size)
457{
458 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
459}
460
461/**
462 * i40e_aq_get_set_rss_key
463 * @hw: pointer to the hw struct
464 * @vsi_id: vsi fw index
465 * @key: pointer to key info struct
466 * @set: set true to set the key, false to get the key
467 *
468 * get the RSS key per VSI
469 **/
470static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
471 u16 vsi_id,
472 struct i40e_aqc_get_set_rss_key_data *key,
473 bool set)
474{
475 i40e_status status;
476 struct i40e_aq_desc desc;
477 struct i40e_aqc_get_set_rss_key *cmd_resp =
478 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
479 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
480
481 if (set)
482 i40e_fill_default_direct_cmd_desc(&desc,
483 i40e_aqc_opc_set_rss_key);
484 else
485 i40e_fill_default_direct_cmd_desc(&desc,
486 i40e_aqc_opc_get_rss_key);
487
488 /* Indirect command */
489 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
490 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
491
492 cmd_resp->vsi_id =
493 cpu_to_le16((u16)((vsi_id <<
494 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
495 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
496 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
497
498 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
499
500 return status;
501}
502
503/**
504 * i40e_aq_get_rss_key
505 * @hw: pointer to the hw struct
506 * @vsi_id: vsi fw index
507 * @key: pointer to key info struct
508 *
509 **/
510i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
511 u16 vsi_id,
512 struct i40e_aqc_get_set_rss_key_data *key)
513{
514 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
515}
516
517/**
518 * i40e_aq_set_rss_key
519 * @hw: pointer to the hw struct
520 * @vsi_id: vsi fw index
521 * @key: pointer to key info struct
522 *
523 * set the RSS key per VSI
524 **/
525i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
526 u16 vsi_id,
527 struct i40e_aqc_get_set_rss_key_data *key)
528{
529 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
530}
531
532/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
533 * hardware to a bit-field that can be used by SW to more easily determine the
534 * packet type.
535 *
536 * Macros are used to shorten the table lines and make this table human
537 * readable.
538 *
539 * We store the PTYPE in the top byte of the bit field - this is just so that
540 * we can check that the table doesn't have a row missing, as the index into
541 * the table should be the PTYPE.
542 *
543 * Typical work flow:
544 *
545 * IF NOT i40e_ptype_lookup[ptype].known
546 * THEN
547 * Packet is unknown
548 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
549 * Use the rest of the fields to look at the tunnels, inner protocols, etc
550 * ELSE
551 * Use the enum i40e_rx_l2_ptype to decode the packet type
552 * ENDIF
553 */
554
555/* macro to make the table lines short */
556#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
557 { PTYPE, \
558 1, \
559 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
560 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
561 I40E_RX_PTYPE_##OUTER_FRAG, \
562 I40E_RX_PTYPE_TUNNEL_##T, \
563 I40E_RX_PTYPE_TUNNEL_END_##TE, \
564 I40E_RX_PTYPE_##TEF, \
565 I40E_RX_PTYPE_INNER_PROT_##I, \
566 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
567
568#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
569 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
570
571/* shorter macros makes the table fit but are terse */
572#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
573#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
574#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
575
576/* Lookup table mapping the HW PTYPE to the bit field for decoding */
577struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
578 /* L2 Packet types */
579 I40E_PTT_UNUSED_ENTRY(0),
580 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
581 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
582 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
583 I40E_PTT_UNUSED_ENTRY(4),
584 I40E_PTT_UNUSED_ENTRY(5),
585 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
586 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
587 I40E_PTT_UNUSED_ENTRY(8),
588 I40E_PTT_UNUSED_ENTRY(9),
589 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
590 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
591 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
592 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
593 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
594 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
595 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
596 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
597 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
598 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
599 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
600 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
601
602 /* Non Tunneled IPv4 */
603 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
604 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
605 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
606 I40E_PTT_UNUSED_ENTRY(25),
607 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
608 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
609 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
610
611 /* IPv4 --> IPv4 */
612 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
613 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
614 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
615 I40E_PTT_UNUSED_ENTRY(32),
616 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
617 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
618 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
619
620 /* IPv4 --> IPv6 */
621 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
622 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
623 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
624 I40E_PTT_UNUSED_ENTRY(39),
625 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
626 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
627 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
628
629 /* IPv4 --> GRE/NAT */
630 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
631
632 /* IPv4 --> GRE/NAT --> IPv4 */
633 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
634 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
635 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
636 I40E_PTT_UNUSED_ENTRY(47),
637 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
638 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
639 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
640
641 /* IPv4 --> GRE/NAT --> IPv6 */
642 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
643 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
644 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
645 I40E_PTT_UNUSED_ENTRY(54),
646 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
647 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
648 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
649
650 /* IPv4 --> GRE/NAT --> MAC */
651 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
652
653 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
654 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
655 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
656 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
657 I40E_PTT_UNUSED_ENTRY(62),
658 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
659 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
660 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
661
662 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
663 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
664 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
665 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
666 I40E_PTT_UNUSED_ENTRY(69),
667 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
668 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
669 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
670
671 /* IPv4 --> GRE/NAT --> MAC/VLAN */
672 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
673
674 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
675 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
676 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
677 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
678 I40E_PTT_UNUSED_ENTRY(77),
679 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
680 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
681 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
682
683 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
684 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
685 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
686 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
687 I40E_PTT_UNUSED_ENTRY(84),
688 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
689 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
690 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
691
692 /* Non Tunneled IPv6 */
693 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
694 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
695 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
696 I40E_PTT_UNUSED_ENTRY(91),
697 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
698 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
699 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
700
701 /* IPv6 --> IPv4 */
702 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
703 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
704 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
705 I40E_PTT_UNUSED_ENTRY(98),
706 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
707 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
708 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
709
710 /* IPv6 --> IPv6 */
711 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
712 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
713 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
714 I40E_PTT_UNUSED_ENTRY(105),
715 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
716 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
717 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
718
719 /* IPv6 --> GRE/NAT */
720 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
721
722 /* IPv6 --> GRE/NAT -> IPv4 */
723 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
724 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
725 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
726 I40E_PTT_UNUSED_ENTRY(113),
727 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
728 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
729 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
730
731 /* IPv6 --> GRE/NAT -> IPv6 */
732 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
733 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
734 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
735 I40E_PTT_UNUSED_ENTRY(120),
736 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
737 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
738 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
739
740 /* IPv6 --> GRE/NAT -> MAC */
741 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
742
743 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
744 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
745 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
746 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
747 I40E_PTT_UNUSED_ENTRY(128),
748 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
749 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
750 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
751
752 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
753 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
754 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
755 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
756 I40E_PTT_UNUSED_ENTRY(135),
757 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
758 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
759 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
760
761 /* IPv6 --> GRE/NAT -> MAC/VLAN */
762 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
763
764 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
765 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
766 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
767 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
768 I40E_PTT_UNUSED_ENTRY(143),
769 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
770 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
771 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
772
773 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
774 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
775 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
776 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
777 I40E_PTT_UNUSED_ENTRY(150),
778 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
779 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
780 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
781
782 /* unused entries */
783 I40E_PTT_UNUSED_ENTRY(154),
784 I40E_PTT_UNUSED_ENTRY(155),
785 I40E_PTT_UNUSED_ENTRY(156),
786 I40E_PTT_UNUSED_ENTRY(157),
787 I40E_PTT_UNUSED_ENTRY(158),
788 I40E_PTT_UNUSED_ENTRY(159),
789
790 I40E_PTT_UNUSED_ENTRY(160),
791 I40E_PTT_UNUSED_ENTRY(161),
792 I40E_PTT_UNUSED_ENTRY(162),
793 I40E_PTT_UNUSED_ENTRY(163),
794 I40E_PTT_UNUSED_ENTRY(164),
795 I40E_PTT_UNUSED_ENTRY(165),
796 I40E_PTT_UNUSED_ENTRY(166),
797 I40E_PTT_UNUSED_ENTRY(167),
798 I40E_PTT_UNUSED_ENTRY(168),
799 I40E_PTT_UNUSED_ENTRY(169),
800
801 I40E_PTT_UNUSED_ENTRY(170),
802 I40E_PTT_UNUSED_ENTRY(171),
803 I40E_PTT_UNUSED_ENTRY(172),
804 I40E_PTT_UNUSED_ENTRY(173),
805 I40E_PTT_UNUSED_ENTRY(174),
806 I40E_PTT_UNUSED_ENTRY(175),
807 I40E_PTT_UNUSED_ENTRY(176),
808 I40E_PTT_UNUSED_ENTRY(177),
809 I40E_PTT_UNUSED_ENTRY(178),
810 I40E_PTT_UNUSED_ENTRY(179),
811
812 I40E_PTT_UNUSED_ENTRY(180),
813 I40E_PTT_UNUSED_ENTRY(181),
814 I40E_PTT_UNUSED_ENTRY(182),
815 I40E_PTT_UNUSED_ENTRY(183),
816 I40E_PTT_UNUSED_ENTRY(184),
817 I40E_PTT_UNUSED_ENTRY(185),
818 I40E_PTT_UNUSED_ENTRY(186),
819 I40E_PTT_UNUSED_ENTRY(187),
820 I40E_PTT_UNUSED_ENTRY(188),
821 I40E_PTT_UNUSED_ENTRY(189),
822
823 I40E_PTT_UNUSED_ENTRY(190),
824 I40E_PTT_UNUSED_ENTRY(191),
825 I40E_PTT_UNUSED_ENTRY(192),
826 I40E_PTT_UNUSED_ENTRY(193),
827 I40E_PTT_UNUSED_ENTRY(194),
828 I40E_PTT_UNUSED_ENTRY(195),
829 I40E_PTT_UNUSED_ENTRY(196),
830 I40E_PTT_UNUSED_ENTRY(197),
831 I40E_PTT_UNUSED_ENTRY(198),
832 I40E_PTT_UNUSED_ENTRY(199),
833
834 I40E_PTT_UNUSED_ENTRY(200),
835 I40E_PTT_UNUSED_ENTRY(201),
836 I40E_PTT_UNUSED_ENTRY(202),
837 I40E_PTT_UNUSED_ENTRY(203),
838 I40E_PTT_UNUSED_ENTRY(204),
839 I40E_PTT_UNUSED_ENTRY(205),
840 I40E_PTT_UNUSED_ENTRY(206),
841 I40E_PTT_UNUSED_ENTRY(207),
842 I40E_PTT_UNUSED_ENTRY(208),
843 I40E_PTT_UNUSED_ENTRY(209),
844
845 I40E_PTT_UNUSED_ENTRY(210),
846 I40E_PTT_UNUSED_ENTRY(211),
847 I40E_PTT_UNUSED_ENTRY(212),
848 I40E_PTT_UNUSED_ENTRY(213),
849 I40E_PTT_UNUSED_ENTRY(214),
850 I40E_PTT_UNUSED_ENTRY(215),
851 I40E_PTT_UNUSED_ENTRY(216),
852 I40E_PTT_UNUSED_ENTRY(217),
853 I40E_PTT_UNUSED_ENTRY(218),
854 I40E_PTT_UNUSED_ENTRY(219),
855
856 I40E_PTT_UNUSED_ENTRY(220),
857 I40E_PTT_UNUSED_ENTRY(221),
858 I40E_PTT_UNUSED_ENTRY(222),
859 I40E_PTT_UNUSED_ENTRY(223),
860 I40E_PTT_UNUSED_ENTRY(224),
861 I40E_PTT_UNUSED_ENTRY(225),
862 I40E_PTT_UNUSED_ENTRY(226),
863 I40E_PTT_UNUSED_ENTRY(227),
864 I40E_PTT_UNUSED_ENTRY(228),
865 I40E_PTT_UNUSED_ENTRY(229),
866
867 I40E_PTT_UNUSED_ENTRY(230),
868 I40E_PTT_UNUSED_ENTRY(231),
869 I40E_PTT_UNUSED_ENTRY(232),
870 I40E_PTT_UNUSED_ENTRY(233),
871 I40E_PTT_UNUSED_ENTRY(234),
872 I40E_PTT_UNUSED_ENTRY(235),
873 I40E_PTT_UNUSED_ENTRY(236),
874 I40E_PTT_UNUSED_ENTRY(237),
875 I40E_PTT_UNUSED_ENTRY(238),
876 I40E_PTT_UNUSED_ENTRY(239),
877
878 I40E_PTT_UNUSED_ENTRY(240),
879 I40E_PTT_UNUSED_ENTRY(241),
880 I40E_PTT_UNUSED_ENTRY(242),
881 I40E_PTT_UNUSED_ENTRY(243),
882 I40E_PTT_UNUSED_ENTRY(244),
883 I40E_PTT_UNUSED_ENTRY(245),
884 I40E_PTT_UNUSED_ENTRY(246),
885 I40E_PTT_UNUSED_ENTRY(247),
886 I40E_PTT_UNUSED_ENTRY(248),
887 I40E_PTT_UNUSED_ENTRY(249),
888
889 I40E_PTT_UNUSED_ENTRY(250),
890 I40E_PTT_UNUSED_ENTRY(251),
891 I40E_PTT_UNUSED_ENTRY(252),
892 I40E_PTT_UNUSED_ENTRY(253),
893 I40E_PTT_UNUSED_ENTRY(254),
894 I40E_PTT_UNUSED_ENTRY(255)
895};
896
897/**
898 * i40e_init_shared_code - Initialize the shared code
899 * @hw: pointer to hardware structure
900 *
901 * This assigns the MAC type and PHY code and inits the NVM.
902 * Does not touch the hardware. This function must be called prior to any
903 * other function in the shared code. The i40e_hw structure should be
904 * memset to 0 prior to calling this function. The following fields in
905 * hw structure should be filled in prior to calling this function:
906 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
907 * subsystem_vendor_id, and revision_id
908 **/
909i40e_status i40e_init_shared_code(struct i40e_hw *hw)
910{
911 i40e_status status = 0;
912 u32 port, ari, func_rid;
913
914 i40e_set_mac_type(hw);
915
916 switch (hw->mac.type) {
917 case I40E_MAC_XL710:
918 case I40E_MAC_X722:
919 break;
920 default:
921 return I40E_ERR_DEVICE_NOT_SUPPORTED;
922 }
923
924 hw->phy.get_link_info = true;
925
926 /* Determine port number and PF number*/
927 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
928 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
929 hw->port = (u8)port;
930 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
931 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
932 func_rid = rd32(hw, I40E_PF_FUNC_RID);
933 if (ari)
934 hw->pf_id = (u8)(func_rid & 0xff);
935 else
936 hw->pf_id = (u8)(func_rid & 0x7);
937
938 status = i40e_init_nvm(hw);
939 return status;
940}
941
942/**
943 * i40e_aq_mac_address_read - Retrieve the MAC addresses
944 * @hw: pointer to the hw struct
945 * @flags: a return indicator of what addresses were added to the addr store
946 * @addrs: the requestor's mac addr store
947 * @cmd_details: pointer to command details structure or NULL
948 **/
949static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
950 u16 *flags,
951 struct i40e_aqc_mac_address_read_data *addrs,
952 struct i40e_asq_cmd_details *cmd_details)
953{
954 struct i40e_aq_desc desc;
955 struct i40e_aqc_mac_address_read *cmd_data =
956 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
957 i40e_status status;
958
959 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
960 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
961
962 status = i40e_asq_send_command(hw, &desc, addrs,
963 sizeof(*addrs), cmd_details);
964 *flags = le16_to_cpu(cmd_data->command_flags);
965
966 return status;
967}
968
969/**
970 * i40e_aq_mac_address_write - Change the MAC addresses
971 * @hw: pointer to the hw struct
972 * @flags: indicates which MAC to be written
973 * @mac_addr: address to write
974 * @cmd_details: pointer to command details structure or NULL
975 **/
976i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
977 u16 flags, u8 *mac_addr,
978 struct i40e_asq_cmd_details *cmd_details)
979{
980 struct i40e_aq_desc desc;
981 struct i40e_aqc_mac_address_write *cmd_data =
982 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
983 i40e_status status;
984
985 i40e_fill_default_direct_cmd_desc(&desc,
986 i40e_aqc_opc_mac_address_write);
987 cmd_data->command_flags = cpu_to_le16(flags);
988 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
989 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
990 ((u32)mac_addr[3] << 16) |
991 ((u32)mac_addr[4] << 8) |
992 mac_addr[5]);
993
994 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
995
996 return status;
997}
998
999/**
1000 * i40e_get_mac_addr - get MAC address
1001 * @hw: pointer to the HW structure
1002 * @mac_addr: pointer to MAC address
1003 *
1004 * Reads the adapter's MAC address from register
1005 **/
1006i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1007{
1008 struct i40e_aqc_mac_address_read_data addrs;
1009 i40e_status status;
1010 u16 flags = 0;
1011
1012 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1013
1014 if (flags & I40E_AQC_LAN_ADDR_VALID)
1015 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
1016
1017 return status;
1018}
1019
1020/**
1021 * i40e_get_port_mac_addr - get Port MAC address
1022 * @hw: pointer to the HW structure
1023 * @mac_addr: pointer to Port MAC address
1024 *
1025 * Reads the adapter's Port MAC address
1026 **/
1027i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1028{
1029 struct i40e_aqc_mac_address_read_data addrs;
1030 i40e_status status;
1031 u16 flags = 0;
1032
1033 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1034 if (status)
1035 return status;
1036
1037 if (flags & I40E_AQC_PORT_ADDR_VALID)
1038 ether_addr_copy(mac_addr, addrs.port_mac);
1039 else
1040 status = I40E_ERR_INVALID_MAC_ADDR;
1041
1042 return status;
1043}
1044
1045/**
1046 * i40e_pre_tx_queue_cfg - pre tx queue configure
1047 * @hw: pointer to the HW structure
1048 * @queue: target PF queue index
1049 * @enable: state change request
1050 *
1051 * Handles hw requirement to indicate intention to enable
1052 * or disable target queue.
1053 **/
1054void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
1055{
1056 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
1057 u32 reg_block = 0;
1058 u32 reg_val;
1059
1060 if (abs_queue_idx >= 128) {
1061 reg_block = abs_queue_idx / 128;
1062 abs_queue_idx %= 128;
1063 }
1064
1065 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1066 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1067 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1068
1069 if (enable)
1070 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
1071 else
1072 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1073
1074 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
1075}
1076
1077/**
1078 * i40e_read_pba_string - Reads part number string from EEPROM
1079 * @hw: pointer to hardware structure
1080 * @pba_num: stores the part number string from the EEPROM
1081 * @pba_num_size: part number string buffer length
1082 *
1083 * Reads the part number string from the EEPROM.
1084 **/
1085i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
1086 u32 pba_num_size)
1087{
1088 i40e_status status = 0;
1089 u16 pba_word = 0;
1090 u16 pba_size = 0;
1091 u16 pba_ptr = 0;
1092 u16 i = 0;
1093
1094 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
1095 if (status || (pba_word != 0xFAFA)) {
1096 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
1097 return status;
1098 }
1099
1100 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
1101 if (status) {
1102 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
1103 return status;
1104 }
1105
1106 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
1107 if (status) {
1108 hw_dbg(hw, "Failed to read PBA Block size.\n");
1109 return status;
1110 }
1111
1112 /* Subtract one to get PBA word count (PBA Size word is included in
1113 * total size)
1114 */
1115 pba_size--;
1116 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1117 hw_dbg(hw, "Buffer too small for PBA data.\n");
1118 return I40E_ERR_PARAM;
1119 }
1120
1121 for (i = 0; i < pba_size; i++) {
1122 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1123 if (status) {
1124 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1125 return status;
1126 }
1127
1128 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1129 pba_num[(i * 2) + 1] = pba_word & 0xFF;
1130 }
1131 pba_num[(pba_size * 2)] = '\0';
1132
1133 return status;
1134}
1135
1136/**
1137 * i40e_get_media_type - Gets media type
1138 * @hw: pointer to the hardware structure
1139 **/
1140static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1141{
1142 enum i40e_media_type media;
1143
1144 switch (hw->phy.link_info.phy_type) {
1145 case I40E_PHY_TYPE_10GBASE_SR:
1146 case I40E_PHY_TYPE_10GBASE_LR:
1147 case I40E_PHY_TYPE_1000BASE_SX:
1148 case I40E_PHY_TYPE_1000BASE_LX:
1149 case I40E_PHY_TYPE_40GBASE_SR4:
1150 case I40E_PHY_TYPE_40GBASE_LR4:
1151 case I40E_PHY_TYPE_25GBASE_LR:
1152 case I40E_PHY_TYPE_25GBASE_SR:
1153 media = I40E_MEDIA_TYPE_FIBER;
1154 break;
1155 case I40E_PHY_TYPE_100BASE_TX:
1156 case I40E_PHY_TYPE_1000BASE_T:
1157 case I40E_PHY_TYPE_2_5GBASE_T:
1158 case I40E_PHY_TYPE_5GBASE_T:
1159 case I40E_PHY_TYPE_10GBASE_T:
1160 media = I40E_MEDIA_TYPE_BASET;
1161 break;
1162 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1163 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1164 case I40E_PHY_TYPE_10GBASE_CR1:
1165 case I40E_PHY_TYPE_40GBASE_CR4:
1166 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1167 case I40E_PHY_TYPE_40GBASE_AOC:
1168 case I40E_PHY_TYPE_10GBASE_AOC:
1169 case I40E_PHY_TYPE_25GBASE_CR:
1170 case I40E_PHY_TYPE_25GBASE_AOC:
1171 case I40E_PHY_TYPE_25GBASE_ACC:
1172 media = I40E_MEDIA_TYPE_DA;
1173 break;
1174 case I40E_PHY_TYPE_1000BASE_KX:
1175 case I40E_PHY_TYPE_10GBASE_KX4:
1176 case I40E_PHY_TYPE_10GBASE_KR:
1177 case I40E_PHY_TYPE_40GBASE_KR4:
1178 case I40E_PHY_TYPE_20GBASE_KR2:
1179 case I40E_PHY_TYPE_25GBASE_KR:
1180 media = I40E_MEDIA_TYPE_BACKPLANE;
1181 break;
1182 case I40E_PHY_TYPE_SGMII:
1183 case I40E_PHY_TYPE_XAUI:
1184 case I40E_PHY_TYPE_XFI:
1185 case I40E_PHY_TYPE_XLAUI:
1186 case I40E_PHY_TYPE_XLPPI:
1187 default:
1188 media = I40E_MEDIA_TYPE_UNKNOWN;
1189 break;
1190 }
1191
1192 return media;
1193}
1194
1195/**
1196 * i40e_poll_globr - Poll for Global Reset completion
1197 * @hw: pointer to the hardware structure
1198 * @retry_limit: how many times to retry before failure
1199 **/
1200static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1201 u32 retry_limit)
1202{
1203 u32 cnt, reg = 0;
1204
1205 for (cnt = 0; cnt < retry_limit; cnt++) {
1206 reg = rd32(hw, I40E_GLGEN_RSTAT);
1207 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1208 return 0;
1209 msleep(100);
1210 }
1211
1212 hw_dbg(hw, "Global reset failed.\n");
1213 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1214
1215 return I40E_ERR_RESET_FAILED;
1216}
1217
1218#define I40E_PF_RESET_WAIT_COUNT_A0 200
1219#define I40E_PF_RESET_WAIT_COUNT 200
1220/**
1221 * i40e_pf_reset - Reset the PF
1222 * @hw: pointer to the hardware structure
1223 *
1224 * Assuming someone else has triggered a global reset,
1225 * assure the global reset is complete and then reset the PF
1226 **/
1227i40e_status i40e_pf_reset(struct i40e_hw *hw)
1228{
1229 u32 cnt = 0;
1230 u32 cnt1 = 0;
1231 u32 reg = 0;
1232 u32 grst_del;
1233
1234 /* Poll for Global Reset steady state in case of recent GRST.
1235 * The grst delay value is in 100ms units, and we'll wait a
1236 * couple counts longer to be sure we don't just miss the end.
1237 */
1238 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1239 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1240 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1241
1242 /* It can take upto 15 secs for GRST steady state.
1243 * Bump it to 16 secs max to be safe.
1244 */
1245 grst_del = grst_del * 20;
1246
1247 for (cnt = 0; cnt < grst_del; cnt++) {
1248 reg = rd32(hw, I40E_GLGEN_RSTAT);
1249 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1250 break;
1251 msleep(100);
1252 }
1253 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1254 hw_dbg(hw, "Global reset polling failed to complete.\n");
1255 return I40E_ERR_RESET_FAILED;
1256 }
1257
1258 /* Now Wait for the FW to be ready */
1259 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1260 reg = rd32(hw, I40E_GLNVM_ULD);
1261 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1262 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1263 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1264 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1265 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1266 break;
1267 }
1268 usleep_range(10000, 20000);
1269 }
1270 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1271 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1272 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1273 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1274 return I40E_ERR_RESET_FAILED;
1275 }
1276
1277 /* If there was a Global Reset in progress when we got here,
1278 * we don't need to do the PF Reset
1279 */
1280 if (!cnt) {
1281 u32 reg2 = 0;
1282 if (hw->revision_id == 0)
1283 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1284 else
1285 cnt = I40E_PF_RESET_WAIT_COUNT;
1286 reg = rd32(hw, I40E_PFGEN_CTRL);
1287 wr32(hw, I40E_PFGEN_CTRL,
1288 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1289 for (; cnt; cnt--) {
1290 reg = rd32(hw, I40E_PFGEN_CTRL);
1291 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1292 break;
1293 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1294 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1295 break;
1296 usleep_range(1000, 2000);
1297 }
1298 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1299 if (i40e_poll_globr(hw, grst_del))
1300 return I40E_ERR_RESET_FAILED;
1301 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1302 hw_dbg(hw, "PF reset polling failed to complete.\n");
1303 return I40E_ERR_RESET_FAILED;
1304 }
1305 }
1306
1307 i40e_clear_pxe_mode(hw);
1308
1309 return 0;
1310}
1311
1312/**
1313 * i40e_clear_hw - clear out any left over hw state
1314 * @hw: pointer to the hw struct
1315 *
1316 * Clear queues and interrupts, typically called at init time,
1317 * but after the capabilities have been found so we know how many
1318 * queues and msix vectors have been allocated.
1319 **/
1320void i40e_clear_hw(struct i40e_hw *hw)
1321{
1322 u32 num_queues, base_queue;
1323 u32 num_pf_int;
1324 u32 num_vf_int;
1325 u32 num_vfs;
1326 u32 i, j;
1327 u32 val;
1328 u32 eol = 0x7ff;
1329
1330 /* get number of interrupts, queues, and VFs */
1331 val = rd32(hw, I40E_GLPCI_CNF2);
1332 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1333 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1334 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1335 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1336
1337 val = rd32(hw, I40E_PFLAN_QALLOC);
1338 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1339 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1340 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1341 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1342 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1343 num_queues = (j - base_queue) + 1;
1344 else
1345 num_queues = 0;
1346
1347 val = rd32(hw, I40E_PF_VT_PFALLOC);
1348 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1349 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1350 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1351 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1352 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1353 num_vfs = (j - i) + 1;
1354 else
1355 num_vfs = 0;
1356
1357 /* stop all the interrupts */
1358 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1359 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1360 for (i = 0; i < num_pf_int - 2; i++)
1361 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1362
1363 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1364 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1365 wr32(hw, I40E_PFINT_LNKLST0, val);
1366 for (i = 0; i < num_pf_int - 2; i++)
1367 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1368 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1369 for (i = 0; i < num_vfs; i++)
1370 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1371 for (i = 0; i < num_vf_int - 2; i++)
1372 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1373
1374 /* warn the HW of the coming Tx disables */
1375 for (i = 0; i < num_queues; i++) {
1376 u32 abs_queue_idx = base_queue + i;
1377 u32 reg_block = 0;
1378
1379 if (abs_queue_idx >= 128) {
1380 reg_block = abs_queue_idx / 128;
1381 abs_queue_idx %= 128;
1382 }
1383
1384 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1385 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1386 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1387 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1388
1389 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1390 }
1391 udelay(400);
1392
1393 /* stop all the queues */
1394 for (i = 0; i < num_queues; i++) {
1395 wr32(hw, I40E_QINT_TQCTL(i), 0);
1396 wr32(hw, I40E_QTX_ENA(i), 0);
1397 wr32(hw, I40E_QINT_RQCTL(i), 0);
1398 wr32(hw, I40E_QRX_ENA(i), 0);
1399 }
1400
1401 /* short wait for all queue disables to settle */
1402 udelay(50);
1403}
1404
1405/**
1406 * i40e_clear_pxe_mode - clear pxe operations mode
1407 * @hw: pointer to the hw struct
1408 *
1409 * Make sure all PXE mode settings are cleared, including things
1410 * like descriptor fetch/write-back mode.
1411 **/
1412void i40e_clear_pxe_mode(struct i40e_hw *hw)
1413{
1414 u32 reg;
1415
1416 if (i40e_check_asq_alive(hw))
1417 i40e_aq_clear_pxe_mode(hw, NULL);
1418
1419 /* Clear single descriptor fetch/write-back mode */
1420 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1421
1422 if (hw->revision_id == 0) {
1423 /* As a work around clear PXE_MODE instead of setting it */
1424 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1425 } else {
1426 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1427 }
1428}
1429
1430/**
1431 * i40e_led_is_mine - helper to find matching led
1432 * @hw: pointer to the hw struct
1433 * @idx: index into GPIO registers
1434 *
1435 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1436 */
1437static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1438{
1439 u32 gpio_val = 0;
1440 u32 port;
1441
1442 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1443 !hw->func_caps.led[idx])
1444 return 0;
1445 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1446 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1447 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1448
1449 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1450 * if it is not our port then ignore
1451 */
1452 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1453 (port != hw->port))
1454 return 0;
1455
1456 return gpio_val;
1457}
1458
1459#define I40E_FW_LED BIT(4)
1460#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1461 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1462
1463#define I40E_LED0 22
1464
1465#define I40E_PIN_FUNC_SDP 0x0
1466#define I40E_PIN_FUNC_LED 0x1
1467
1468/**
1469 * i40e_led_get - return current on/off mode
1470 * @hw: pointer to the hw struct
1471 *
1472 * The value returned is the 'mode' field as defined in the
1473 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1474 * values are variations of possible behaviors relating to
1475 * blink, link, and wire.
1476 **/
1477u32 i40e_led_get(struct i40e_hw *hw)
1478{
1479 u32 mode = 0;
1480 int i;
1481
1482 /* as per the documentation GPIO 22-29 are the LED
1483 * GPIO pins named LED0..LED7
1484 */
1485 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1486 u32 gpio_val = i40e_led_is_mine(hw, i);
1487
1488 if (!gpio_val)
1489 continue;
1490
1491 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1492 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1493 break;
1494 }
1495
1496 return mode;
1497}
1498
1499/**
1500 * i40e_led_set - set new on/off mode
1501 * @hw: pointer to the hw struct
1502 * @mode: 0=off, 0xf=on (else see manual for mode details)
1503 * @blink: true if the LED should blink when on, false if steady
1504 *
1505 * if this function is used to turn on the blink it should
1506 * be used to disable the blink when restoring the original state.
1507 **/
1508void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1509{
1510 int i;
1511
1512 if (mode & ~I40E_LED_MODE_VALID) {
1513 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1514 return;
1515 }
1516
1517 /* as per the documentation GPIO 22-29 are the LED
1518 * GPIO pins named LED0..LED7
1519 */
1520 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1521 u32 gpio_val = i40e_led_is_mine(hw, i);
1522
1523 if (!gpio_val)
1524 continue;
1525
1526 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1527 u32 pin_func = 0;
1528
1529 if (mode & I40E_FW_LED)
1530 pin_func = I40E_PIN_FUNC_SDP;
1531 else
1532 pin_func = I40E_PIN_FUNC_LED;
1533
1534 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1535 gpio_val |= ((pin_func <<
1536 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1537 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1538 }
1539 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1540 /* this & is a bit of paranoia, but serves as a range check */
1541 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1542 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1543
1544 if (blink)
1545 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1546 else
1547 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1548
1549 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1550 break;
1551 }
1552}
1553
1554/* Admin command wrappers */
1555
1556/**
1557 * i40e_aq_get_phy_capabilities
1558 * @hw: pointer to the hw struct
1559 * @abilities: structure for PHY capabilities to be filled
1560 * @qualified_modules: report Qualified Modules
1561 * @report_init: report init capabilities (active are default)
1562 * @cmd_details: pointer to command details structure or NULL
1563 *
1564 * Returns the various PHY abilities supported on the Port.
1565 **/
1566i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1567 bool qualified_modules, bool report_init,
1568 struct i40e_aq_get_phy_abilities_resp *abilities,
1569 struct i40e_asq_cmd_details *cmd_details)
1570{
1571 struct i40e_aq_desc desc;
1572 i40e_status status;
1573 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1574 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1575
1576 if (!abilities)
1577 return I40E_ERR_PARAM;
1578
1579 do {
1580 i40e_fill_default_direct_cmd_desc(&desc,
1581 i40e_aqc_opc_get_phy_abilities);
1582
1583 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1584 if (abilities_size > I40E_AQ_LARGE_BUF)
1585 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1586
1587 if (qualified_modules)
1588 desc.params.external.param0 |=
1589 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1590
1591 if (report_init)
1592 desc.params.external.param0 |=
1593 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1594
1595 status = i40e_asq_send_command(hw, &desc, abilities,
1596 abilities_size, cmd_details);
1597
1598 switch (hw->aq.asq_last_status) {
1599 case I40E_AQ_RC_EIO:
1600 status = I40E_ERR_UNKNOWN_PHY;
1601 break;
1602 case I40E_AQ_RC_EAGAIN:
1603 usleep_range(1000, 2000);
1604 total_delay++;
1605 status = I40E_ERR_TIMEOUT;
1606 break;
1607 /* also covers I40E_AQ_RC_OK */
1608 default:
1609 break;
1610 }
1611
1612 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1613 (total_delay < max_delay));
1614
1615 if (status)
1616 return status;
1617
1618 if (report_init) {
1619 if (hw->mac.type == I40E_MAC_XL710 &&
1620 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1621 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1622 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1623 } else {
1624 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1625 hw->phy.phy_types |=
1626 ((u64)abilities->phy_type_ext << 32);
1627 }
1628 }
1629
1630 return status;
1631}
1632
1633/**
1634 * i40e_aq_set_phy_config
1635 * @hw: pointer to the hw struct
1636 * @config: structure with PHY configuration to be set
1637 * @cmd_details: pointer to command details structure or NULL
1638 *
1639 * Set the various PHY configuration parameters
1640 * supported on the Port.One or more of the Set PHY config parameters may be
1641 * ignored in an MFP mode as the PF may not have the privilege to set some
1642 * of the PHY Config parameters. This status will be indicated by the
1643 * command response.
1644 **/
1645enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1646 struct i40e_aq_set_phy_config *config,
1647 struct i40e_asq_cmd_details *cmd_details)
1648{
1649 struct i40e_aq_desc desc;
1650 struct i40e_aq_set_phy_config *cmd =
1651 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1652 enum i40e_status_code status;
1653
1654 if (!config)
1655 return I40E_ERR_PARAM;
1656
1657 i40e_fill_default_direct_cmd_desc(&desc,
1658 i40e_aqc_opc_set_phy_config);
1659
1660 *cmd = *config;
1661
1662 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1663
1664 return status;
1665}
1666
1667static noinline_for_stack enum i40e_status_code
1668i40e_set_fc_status(struct i40e_hw *hw,
1669 struct i40e_aq_get_phy_abilities_resp *abilities,
1670 bool atomic_restart)
1671{
1672 struct i40e_aq_set_phy_config config;
1673 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1674 u8 pause_mask = 0x0;
1675
1676 switch (fc_mode) {
1677 case I40E_FC_FULL:
1678 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1679 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1680 break;
1681 case I40E_FC_RX_PAUSE:
1682 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1683 break;
1684 case I40E_FC_TX_PAUSE:
1685 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1686 break;
1687 default:
1688 break;
1689 }
1690
1691 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1692 /* clear the old pause settings */
1693 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1694 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1695 /* set the new abilities */
1696 config.abilities |= pause_mask;
1697 /* If the abilities have changed, then set the new config */
1698 if (config.abilities == abilities->abilities)
1699 return 0;
1700
1701 /* Auto restart link so settings take effect */
1702 if (atomic_restart)
1703 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1704 /* Copy over all the old settings */
1705 config.phy_type = abilities->phy_type;
1706 config.phy_type_ext = abilities->phy_type_ext;
1707 config.link_speed = abilities->link_speed;
1708 config.eee_capability = abilities->eee_capability;
1709 config.eeer = abilities->eeer_val;
1710 config.low_power_ctrl = abilities->d3_lpan;
1711 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1712 I40E_AQ_PHY_FEC_CONFIG_MASK;
1713
1714 return i40e_aq_set_phy_config(hw, &config, NULL);
1715}
1716
1717/**
1718 * i40e_set_fc
1719 * @hw: pointer to the hw struct
1720 * @aq_failures: buffer to return AdminQ failure information
1721 * @atomic_restart: whether to enable atomic link restart
1722 *
1723 * Set the requested flow control mode using set_phy_config.
1724 **/
1725enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1726 bool atomic_restart)
1727{
1728 struct i40e_aq_get_phy_abilities_resp abilities;
1729 enum i40e_status_code status;
1730
1731 *aq_failures = 0x0;
1732
1733 /* Get the current phy config */
1734 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1735 NULL);
1736 if (status) {
1737 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1738 return status;
1739 }
1740
1741 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1742 if (status)
1743 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1744
1745 /* Update the link info */
1746 status = i40e_update_link_info(hw);
1747 if (status) {
1748 /* Wait a little bit (on 40G cards it sometimes takes a really
1749 * long time for link to come back from the atomic reset)
1750 * and try once more
1751 */
1752 msleep(1000);
1753 status = i40e_update_link_info(hw);
1754 }
1755 if (status)
1756 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1757
1758 return status;
1759}
1760
1761/**
1762 * i40e_aq_clear_pxe_mode
1763 * @hw: pointer to the hw struct
1764 * @cmd_details: pointer to command details structure or NULL
1765 *
1766 * Tell the firmware that the driver is taking over from PXE
1767 **/
1768i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1769 struct i40e_asq_cmd_details *cmd_details)
1770{
1771 i40e_status status;
1772 struct i40e_aq_desc desc;
1773 struct i40e_aqc_clear_pxe *cmd =
1774 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1775
1776 i40e_fill_default_direct_cmd_desc(&desc,
1777 i40e_aqc_opc_clear_pxe_mode);
1778
1779 cmd->rx_cnt = 0x2;
1780
1781 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1782
1783 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1784
1785 return status;
1786}
1787
1788/**
1789 * i40e_aq_set_link_restart_an
1790 * @hw: pointer to the hw struct
1791 * @enable_link: if true: enable link, if false: disable link
1792 * @cmd_details: pointer to command details structure or NULL
1793 *
1794 * Sets up the link and restarts the Auto-Negotiation over the link.
1795 **/
1796i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1797 bool enable_link,
1798 struct i40e_asq_cmd_details *cmd_details)
1799{
1800 struct i40e_aq_desc desc;
1801 struct i40e_aqc_set_link_restart_an *cmd =
1802 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1803 i40e_status status;
1804
1805 i40e_fill_default_direct_cmd_desc(&desc,
1806 i40e_aqc_opc_set_link_restart_an);
1807
1808 cmd->command = I40E_AQ_PHY_RESTART_AN;
1809 if (enable_link)
1810 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1811 else
1812 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1813
1814 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1815
1816 return status;
1817}
1818
1819/**
1820 * i40e_aq_get_link_info
1821 * @hw: pointer to the hw struct
1822 * @enable_lse: enable/disable LinkStatusEvent reporting
1823 * @link: pointer to link status structure - optional
1824 * @cmd_details: pointer to command details structure or NULL
1825 *
1826 * Returns the link status of the adapter.
1827 **/
1828i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1829 bool enable_lse, struct i40e_link_status *link,
1830 struct i40e_asq_cmd_details *cmd_details)
1831{
1832 struct i40e_aq_desc desc;
1833 struct i40e_aqc_get_link_status *resp =
1834 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1835 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1836 i40e_status status;
1837 bool tx_pause, rx_pause;
1838 u16 command_flags;
1839
1840 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1841
1842 if (enable_lse)
1843 command_flags = I40E_AQ_LSE_ENABLE;
1844 else
1845 command_flags = I40E_AQ_LSE_DISABLE;
1846 resp->command_flags = cpu_to_le16(command_flags);
1847
1848 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1849
1850 if (status)
1851 goto aq_get_link_info_exit;
1852
1853 /* save off old link status information */
1854 hw->phy.link_info_old = *hw_link_info;
1855
1856 /* update link status */
1857 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1858 hw->phy.media_type = i40e_get_media_type(hw);
1859 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1860 hw_link_info->link_info = resp->link_info;
1861 hw_link_info->an_info = resp->an_info;
1862 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1863 I40E_AQ_CONFIG_FEC_RS_ENA);
1864 hw_link_info->ext_info = resp->ext_info;
1865 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1866 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1867 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1868
1869 /* update fc info */
1870 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1871 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1872 if (tx_pause & rx_pause)
1873 hw->fc.current_mode = I40E_FC_FULL;
1874 else if (tx_pause)
1875 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1876 else if (rx_pause)
1877 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1878 else
1879 hw->fc.current_mode = I40E_FC_NONE;
1880
1881 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1882 hw_link_info->crc_enable = true;
1883 else
1884 hw_link_info->crc_enable = false;
1885
1886 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1887 hw_link_info->lse_enable = true;
1888 else
1889 hw_link_info->lse_enable = false;
1890
1891 if ((hw->mac.type == I40E_MAC_XL710) &&
1892 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1893 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1894 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1895
1896 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1897 hw->mac.type != I40E_MAC_X722) {
1898 __le32 tmp;
1899
1900 memcpy(&tmp, resp->link_type, sizeof(tmp));
1901 hw->phy.phy_types = le32_to_cpu(tmp);
1902 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1903 }
1904
1905 /* save link status information */
1906 if (link)
1907 *link = *hw_link_info;
1908
1909 /* flag cleared so helper functions don't call AQ again */
1910 hw->phy.get_link_info = false;
1911
1912aq_get_link_info_exit:
1913 return status;
1914}
1915
1916/**
1917 * i40e_aq_set_phy_int_mask
1918 * @hw: pointer to the hw struct
1919 * @mask: interrupt mask to be set
1920 * @cmd_details: pointer to command details structure or NULL
1921 *
1922 * Set link interrupt mask.
1923 **/
1924i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1925 u16 mask,
1926 struct i40e_asq_cmd_details *cmd_details)
1927{
1928 struct i40e_aq_desc desc;
1929 struct i40e_aqc_set_phy_int_mask *cmd =
1930 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1931 i40e_status status;
1932
1933 i40e_fill_default_direct_cmd_desc(&desc,
1934 i40e_aqc_opc_set_phy_int_mask);
1935
1936 cmd->event_mask = cpu_to_le16(mask);
1937
1938 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1939
1940 return status;
1941}
1942
1943/**
1944 * i40e_aq_set_phy_debug
1945 * @hw: pointer to the hw struct
1946 * @cmd_flags: debug command flags
1947 * @cmd_details: pointer to command details structure or NULL
1948 *
1949 * Reset the external PHY.
1950 **/
1951i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1952 struct i40e_asq_cmd_details *cmd_details)
1953{
1954 struct i40e_aq_desc desc;
1955 struct i40e_aqc_set_phy_debug *cmd =
1956 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1957 i40e_status status;
1958
1959 i40e_fill_default_direct_cmd_desc(&desc,
1960 i40e_aqc_opc_set_phy_debug);
1961
1962 cmd->command_flags = cmd_flags;
1963
1964 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1965
1966 return status;
1967}
1968
1969/**
1970 * i40e_is_aq_api_ver_ge
1971 * @aq: pointer to AdminQ info containing HW API version to compare
1972 * @maj: API major value
1973 * @min: API minor value
1974 *
1975 * Assert whether current HW API version is greater/equal than provided.
1976 **/
1977static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1978 u16 min)
1979{
1980 return (aq->api_maj_ver > maj ||
1981 (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1982}
1983
1984/**
1985 * i40e_aq_add_vsi
1986 * @hw: pointer to the hw struct
1987 * @vsi_ctx: pointer to a vsi context struct
1988 * @cmd_details: pointer to command details structure or NULL
1989 *
1990 * Add a VSI context to the hardware.
1991**/
1992i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1993 struct i40e_vsi_context *vsi_ctx,
1994 struct i40e_asq_cmd_details *cmd_details)
1995{
1996 struct i40e_aq_desc desc;
1997 struct i40e_aqc_add_get_update_vsi *cmd =
1998 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1999 struct i40e_aqc_add_get_update_vsi_completion *resp =
2000 (struct i40e_aqc_add_get_update_vsi_completion *)
2001 &desc.params.raw;
2002 i40e_status status;
2003
2004 i40e_fill_default_direct_cmd_desc(&desc,
2005 i40e_aqc_opc_add_vsi);
2006
2007 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
2008 cmd->connection_type = vsi_ctx->connection_type;
2009 cmd->vf_id = vsi_ctx->vf_num;
2010 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
2011
2012 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2013
2014 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2015 sizeof(vsi_ctx->info), cmd_details);
2016
2017 if (status)
2018 goto aq_add_vsi_exit;
2019
2020 vsi_ctx->seid = le16_to_cpu(resp->seid);
2021 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2022 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2023 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2024
2025aq_add_vsi_exit:
2026 return status;
2027}
2028
2029/**
2030 * i40e_aq_set_default_vsi
2031 * @hw: pointer to the hw struct
2032 * @seid: vsi number
2033 * @cmd_details: pointer to command details structure or NULL
2034 **/
2035i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
2036 u16 seid,
2037 struct i40e_asq_cmd_details *cmd_details)
2038{
2039 struct i40e_aq_desc desc;
2040 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2041 (struct i40e_aqc_set_vsi_promiscuous_modes *)
2042 &desc.params.raw;
2043 i40e_status status;
2044
2045 i40e_fill_default_direct_cmd_desc(&desc,
2046 i40e_aqc_opc_set_vsi_promiscuous_modes);
2047
2048 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2049 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2050 cmd->seid = cpu_to_le16(seid);
2051
2052 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2053
2054 return status;
2055}
2056
2057/**
2058 * i40e_aq_clear_default_vsi
2059 * @hw: pointer to the hw struct
2060 * @seid: vsi number
2061 * @cmd_details: pointer to command details structure or NULL
2062 **/
2063i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
2064 u16 seid,
2065 struct i40e_asq_cmd_details *cmd_details)
2066{
2067 struct i40e_aq_desc desc;
2068 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2069 (struct i40e_aqc_set_vsi_promiscuous_modes *)
2070 &desc.params.raw;
2071 i40e_status status;
2072
2073 i40e_fill_default_direct_cmd_desc(&desc,
2074 i40e_aqc_opc_set_vsi_promiscuous_modes);
2075
2076 cmd->promiscuous_flags = cpu_to_le16(0);
2077 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2078 cmd->seid = cpu_to_le16(seid);
2079
2080 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2081
2082 return status;
2083}
2084
2085/**
2086 * i40e_aq_set_vsi_unicast_promiscuous
2087 * @hw: pointer to the hw struct
2088 * @seid: vsi number
2089 * @set: set unicast promiscuous enable/disable
2090 * @cmd_details: pointer to command details structure or NULL
2091 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
2092 **/
2093i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
2094 u16 seid, bool set,
2095 struct i40e_asq_cmd_details *cmd_details,
2096 bool rx_only_promisc)
2097{
2098 struct i40e_aq_desc desc;
2099 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2100 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2101 i40e_status status;
2102 u16 flags = 0;
2103
2104 i40e_fill_default_direct_cmd_desc(&desc,
2105 i40e_aqc_opc_set_vsi_promiscuous_modes);
2106
2107 if (set) {
2108 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2109 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2110 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2111 }
2112
2113 cmd->promiscuous_flags = cpu_to_le16(flags);
2114
2115 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2116 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2117 cmd->valid_flags |=
2118 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2119
2120 cmd->seid = cpu_to_le16(seid);
2121 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2122
2123 return status;
2124}
2125
2126/**
2127 * i40e_aq_set_vsi_multicast_promiscuous
2128 * @hw: pointer to the hw struct
2129 * @seid: vsi number
2130 * @set: set multicast promiscuous enable/disable
2131 * @cmd_details: pointer to command details structure or NULL
2132 **/
2133i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2134 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2135{
2136 struct i40e_aq_desc desc;
2137 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2138 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2139 i40e_status status;
2140 u16 flags = 0;
2141
2142 i40e_fill_default_direct_cmd_desc(&desc,
2143 i40e_aqc_opc_set_vsi_promiscuous_modes);
2144
2145 if (set)
2146 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2147
2148 cmd->promiscuous_flags = cpu_to_le16(flags);
2149
2150 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2151
2152 cmd->seid = cpu_to_le16(seid);
2153 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2154
2155 return status;
2156}
2157
2158/**
2159 * i40e_aq_set_vsi_mc_promisc_on_vlan
2160 * @hw: pointer to the hw struct
2161 * @seid: vsi number
2162 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2163 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2164 * @cmd_details: pointer to command details structure or NULL
2165 **/
2166enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2167 u16 seid, bool enable,
2168 u16 vid,
2169 struct i40e_asq_cmd_details *cmd_details)
2170{
2171 struct i40e_aq_desc desc;
2172 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2173 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2174 enum i40e_status_code status;
2175 u16 flags = 0;
2176
2177 i40e_fill_default_direct_cmd_desc(&desc,
2178 i40e_aqc_opc_set_vsi_promiscuous_modes);
2179
2180 if (enable)
2181 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2182
2183 cmd->promiscuous_flags = cpu_to_le16(flags);
2184 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2185 cmd->seid = cpu_to_le16(seid);
2186 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2187
2188 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2189
2190 return status;
2191}
2192
2193/**
2194 * i40e_aq_set_vsi_uc_promisc_on_vlan
2195 * @hw: pointer to the hw struct
2196 * @seid: vsi number
2197 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2198 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2199 * @cmd_details: pointer to command details structure or NULL
2200 **/
2201enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2202 u16 seid, bool enable,
2203 u16 vid,
2204 struct i40e_asq_cmd_details *cmd_details)
2205{
2206 struct i40e_aq_desc desc;
2207 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2208 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2209 enum i40e_status_code status;
2210 u16 flags = 0;
2211
2212 i40e_fill_default_direct_cmd_desc(&desc,
2213 i40e_aqc_opc_set_vsi_promiscuous_modes);
2214
2215 if (enable) {
2216 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2217 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2218 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2219 }
2220
2221 cmd->promiscuous_flags = cpu_to_le16(flags);
2222 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2223 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2224 cmd->valid_flags |=
2225 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2226 cmd->seid = cpu_to_le16(seid);
2227 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2228
2229 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2230
2231 return status;
2232}
2233
2234/**
2235 * i40e_aq_set_vsi_bc_promisc_on_vlan
2236 * @hw: pointer to the hw struct
2237 * @seid: vsi number
2238 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2239 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2240 * @cmd_details: pointer to command details structure or NULL
2241 **/
2242i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2243 u16 seid, bool enable, u16 vid,
2244 struct i40e_asq_cmd_details *cmd_details)
2245{
2246 struct i40e_aq_desc desc;
2247 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2248 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2249 i40e_status status;
2250 u16 flags = 0;
2251
2252 i40e_fill_default_direct_cmd_desc(&desc,
2253 i40e_aqc_opc_set_vsi_promiscuous_modes);
2254
2255 if (enable)
2256 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2257
2258 cmd->promiscuous_flags = cpu_to_le16(flags);
2259 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2260 cmd->seid = cpu_to_le16(seid);
2261 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2262
2263 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2264
2265 return status;
2266}
2267
2268/**
2269 * i40e_aq_set_vsi_broadcast
2270 * @hw: pointer to the hw struct
2271 * @seid: vsi number
2272 * @set_filter: true to set filter, false to clear filter
2273 * @cmd_details: pointer to command details structure or NULL
2274 *
2275 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2276 **/
2277i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2278 u16 seid, bool set_filter,
2279 struct i40e_asq_cmd_details *cmd_details)
2280{
2281 struct i40e_aq_desc desc;
2282 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2283 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2284 i40e_status status;
2285
2286 i40e_fill_default_direct_cmd_desc(&desc,
2287 i40e_aqc_opc_set_vsi_promiscuous_modes);
2288
2289 if (set_filter)
2290 cmd->promiscuous_flags
2291 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2292 else
2293 cmd->promiscuous_flags
2294 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2295
2296 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2297 cmd->seid = cpu_to_le16(seid);
2298 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2299
2300 return status;
2301}
2302
2303/**
2304 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2305 * @hw: pointer to the hw struct
2306 * @seid: vsi number
2307 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2308 * @cmd_details: pointer to command details structure or NULL
2309 **/
2310i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2311 u16 seid, bool enable,
2312 struct i40e_asq_cmd_details *cmd_details)
2313{
2314 struct i40e_aq_desc desc;
2315 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2316 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2317 i40e_status status;
2318 u16 flags = 0;
2319
2320 i40e_fill_default_direct_cmd_desc(&desc,
2321 i40e_aqc_opc_set_vsi_promiscuous_modes);
2322 if (enable)
2323 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2324
2325 cmd->promiscuous_flags = cpu_to_le16(flags);
2326 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2327 cmd->seid = cpu_to_le16(seid);
2328
2329 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2330
2331 return status;
2332}
2333
2334/**
2335 * i40e_get_vsi_params - get VSI configuration info
2336 * @hw: pointer to the hw struct
2337 * @vsi_ctx: pointer to a vsi context struct
2338 * @cmd_details: pointer to command details structure or NULL
2339 **/
2340i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2341 struct i40e_vsi_context *vsi_ctx,
2342 struct i40e_asq_cmd_details *cmd_details)
2343{
2344 struct i40e_aq_desc desc;
2345 struct i40e_aqc_add_get_update_vsi *cmd =
2346 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2347 struct i40e_aqc_add_get_update_vsi_completion *resp =
2348 (struct i40e_aqc_add_get_update_vsi_completion *)
2349 &desc.params.raw;
2350 i40e_status status;
2351
2352 i40e_fill_default_direct_cmd_desc(&desc,
2353 i40e_aqc_opc_get_vsi_parameters);
2354
2355 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2356
2357 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2358
2359 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2360 sizeof(vsi_ctx->info), NULL);
2361
2362 if (status)
2363 goto aq_get_vsi_params_exit;
2364
2365 vsi_ctx->seid = le16_to_cpu(resp->seid);
2366 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2367 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2368 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2369
2370aq_get_vsi_params_exit:
2371 return status;
2372}
2373
2374/**
2375 * i40e_aq_update_vsi_params
2376 * @hw: pointer to the hw struct
2377 * @vsi_ctx: pointer to a vsi context struct
2378 * @cmd_details: pointer to command details structure or NULL
2379 *
2380 * Update a VSI context.
2381 **/
2382i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2383 struct i40e_vsi_context *vsi_ctx,
2384 struct i40e_asq_cmd_details *cmd_details)
2385{
2386 struct i40e_aq_desc desc;
2387 struct i40e_aqc_add_get_update_vsi *cmd =
2388 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2389 struct i40e_aqc_add_get_update_vsi_completion *resp =
2390 (struct i40e_aqc_add_get_update_vsi_completion *)
2391 &desc.params.raw;
2392 i40e_status status;
2393
2394 i40e_fill_default_direct_cmd_desc(&desc,
2395 i40e_aqc_opc_update_vsi_parameters);
2396 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2397
2398 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2399
2400 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2401 sizeof(vsi_ctx->info), cmd_details);
2402
2403 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2404 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2405
2406 return status;
2407}
2408
2409/**
2410 * i40e_aq_get_switch_config
2411 * @hw: pointer to the hardware structure
2412 * @buf: pointer to the result buffer
2413 * @buf_size: length of input buffer
2414 * @start_seid: seid to start for the report, 0 == beginning
2415 * @cmd_details: pointer to command details structure or NULL
2416 *
2417 * Fill the buf with switch configuration returned from AdminQ command
2418 **/
2419i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2420 struct i40e_aqc_get_switch_config_resp *buf,
2421 u16 buf_size, u16 *start_seid,
2422 struct i40e_asq_cmd_details *cmd_details)
2423{
2424 struct i40e_aq_desc desc;
2425 struct i40e_aqc_switch_seid *scfg =
2426 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2427 i40e_status status;
2428
2429 i40e_fill_default_direct_cmd_desc(&desc,
2430 i40e_aqc_opc_get_switch_config);
2431 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2432 if (buf_size > I40E_AQ_LARGE_BUF)
2433 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2434 scfg->seid = cpu_to_le16(*start_seid);
2435
2436 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2437 *start_seid = le16_to_cpu(scfg->seid);
2438
2439 return status;
2440}
2441
2442/**
2443 * i40e_aq_set_switch_config
2444 * @hw: pointer to the hardware structure
2445 * @flags: bit flag values to set
2446 * @mode: cloud filter mode
2447 * @valid_flags: which bit flags to set
2448 * @mode: cloud filter mode
2449 * @cmd_details: pointer to command details structure or NULL
2450 *
2451 * Set switch configuration bits
2452 **/
2453enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2454 u16 flags,
2455 u16 valid_flags, u8 mode,
2456 struct i40e_asq_cmd_details *cmd_details)
2457{
2458 struct i40e_aq_desc desc;
2459 struct i40e_aqc_set_switch_config *scfg =
2460 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2461 enum i40e_status_code status;
2462
2463 i40e_fill_default_direct_cmd_desc(&desc,
2464 i40e_aqc_opc_set_switch_config);
2465 scfg->flags = cpu_to_le16(flags);
2466 scfg->valid_flags = cpu_to_le16(valid_flags);
2467 scfg->mode = mode;
2468 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2469 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2470 scfg->first_tag = cpu_to_le16(hw->first_tag);
2471 scfg->second_tag = cpu_to_le16(hw->second_tag);
2472 }
2473 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2474
2475 return status;
2476}
2477
2478/**
2479 * i40e_aq_get_firmware_version
2480 * @hw: pointer to the hw struct
2481 * @fw_major_version: firmware major version
2482 * @fw_minor_version: firmware minor version
2483 * @fw_build: firmware build number
2484 * @api_major_version: major queue version
2485 * @api_minor_version: minor queue version
2486 * @cmd_details: pointer to command details structure or NULL
2487 *
2488 * Get the firmware version from the admin queue commands
2489 **/
2490i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2491 u16 *fw_major_version, u16 *fw_minor_version,
2492 u32 *fw_build,
2493 u16 *api_major_version, u16 *api_minor_version,
2494 struct i40e_asq_cmd_details *cmd_details)
2495{
2496 struct i40e_aq_desc desc;
2497 struct i40e_aqc_get_version *resp =
2498 (struct i40e_aqc_get_version *)&desc.params.raw;
2499 i40e_status status;
2500
2501 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2502
2503 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2504
2505 if (!status) {
2506 if (fw_major_version)
2507 *fw_major_version = le16_to_cpu(resp->fw_major);
2508 if (fw_minor_version)
2509 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2510 if (fw_build)
2511 *fw_build = le32_to_cpu(resp->fw_build);
2512 if (api_major_version)
2513 *api_major_version = le16_to_cpu(resp->api_major);
2514 if (api_minor_version)
2515 *api_minor_version = le16_to_cpu(resp->api_minor);
2516 }
2517
2518 return status;
2519}
2520
2521/**
2522 * i40e_aq_send_driver_version
2523 * @hw: pointer to the hw struct
2524 * @dv: driver's major, minor version
2525 * @cmd_details: pointer to command details structure or NULL
2526 *
2527 * Send the driver version to the firmware
2528 **/
2529i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2530 struct i40e_driver_version *dv,
2531 struct i40e_asq_cmd_details *cmd_details)
2532{
2533 struct i40e_aq_desc desc;
2534 struct i40e_aqc_driver_version *cmd =
2535 (struct i40e_aqc_driver_version *)&desc.params.raw;
2536 i40e_status status;
2537 u16 len;
2538
2539 if (dv == NULL)
2540 return I40E_ERR_PARAM;
2541
2542 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2543
2544 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2545 cmd->driver_major_ver = dv->major_version;
2546 cmd->driver_minor_ver = dv->minor_version;
2547 cmd->driver_build_ver = dv->build_version;
2548 cmd->driver_subbuild_ver = dv->subbuild_version;
2549
2550 len = 0;
2551 while (len < sizeof(dv->driver_string) &&
2552 (dv->driver_string[len] < 0x80) &&
2553 dv->driver_string[len])
2554 len++;
2555 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2556 len, cmd_details);
2557
2558 return status;
2559}
2560
2561/**
2562 * i40e_get_link_status - get status of the HW network link
2563 * @hw: pointer to the hw struct
2564 * @link_up: pointer to bool (true/false = linkup/linkdown)
2565 *
2566 * Variable link_up true if link is up, false if link is down.
2567 * The variable link_up is invalid if returned value of status != 0
2568 *
2569 * Side effect: LinkStatusEvent reporting becomes enabled
2570 **/
2571i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2572{
2573 i40e_status status = 0;
2574
2575 if (hw->phy.get_link_info) {
2576 status = i40e_update_link_info(hw);
2577
2578 if (status)
2579 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2580 status);
2581 }
2582
2583 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2584
2585 return status;
2586}
2587
2588/**
2589 * i40e_updatelink_status - update status of the HW network link
2590 * @hw: pointer to the hw struct
2591 **/
2592noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
2593{
2594 struct i40e_aq_get_phy_abilities_resp abilities;
2595 i40e_status status = 0;
2596
2597 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2598 if (status)
2599 return status;
2600
2601 /* extra checking needed to ensure link info to user is timely */
2602 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2603 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2604 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2605 status = i40e_aq_get_phy_capabilities(hw, false, false,
2606 &abilities, NULL);
2607 if (status)
2608 return status;
2609
2610 if (abilities.fec_cfg_curr_mod_ext_info &
2611 I40E_AQ_ENABLE_FEC_AUTO)
2612 hw->phy.link_info.req_fec_info =
2613 (I40E_AQ_REQUEST_FEC_KR |
2614 I40E_AQ_REQUEST_FEC_RS);
2615 else
2616 hw->phy.link_info.req_fec_info =
2617 abilities.fec_cfg_curr_mod_ext_info &
2618 (I40E_AQ_REQUEST_FEC_KR |
2619 I40E_AQ_REQUEST_FEC_RS);
2620
2621 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2622 sizeof(hw->phy.link_info.module_type));
2623 }
2624
2625 return status;
2626}
2627
2628/**
2629 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2630 * @hw: pointer to the hw struct
2631 * @uplink_seid: the MAC or other gizmo SEID
2632 * @downlink_seid: the VSI SEID
2633 * @enabled_tc: bitmap of TCs to be enabled
2634 * @default_port: true for default port VSI, false for control port
2635 * @veb_seid: pointer to where to put the resulting VEB SEID
2636 * @enable_stats: true to turn on VEB stats
2637 * @cmd_details: pointer to command details structure or NULL
2638 *
2639 * This asks the FW to add a VEB between the uplink and downlink
2640 * elements. If the uplink SEID is 0, this will be a floating VEB.
2641 **/
2642i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2643 u16 downlink_seid, u8 enabled_tc,
2644 bool default_port, u16 *veb_seid,
2645 bool enable_stats,
2646 struct i40e_asq_cmd_details *cmd_details)
2647{
2648 struct i40e_aq_desc desc;
2649 struct i40e_aqc_add_veb *cmd =
2650 (struct i40e_aqc_add_veb *)&desc.params.raw;
2651 struct i40e_aqc_add_veb_completion *resp =
2652 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2653 i40e_status status;
2654 u16 veb_flags = 0;
2655
2656 /* SEIDs need to either both be set or both be 0 for floating VEB */
2657 if (!!uplink_seid != !!downlink_seid)
2658 return I40E_ERR_PARAM;
2659
2660 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2661
2662 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2663 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2664 cmd->enable_tcs = enabled_tc;
2665 if (!uplink_seid)
2666 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2667 if (default_port)
2668 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2669 else
2670 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2671
2672 /* reverse logic here: set the bitflag to disable the stats */
2673 if (!enable_stats)
2674 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2675
2676 cmd->veb_flags = cpu_to_le16(veb_flags);
2677
2678 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2679
2680 if (!status && veb_seid)
2681 *veb_seid = le16_to_cpu(resp->veb_seid);
2682
2683 return status;
2684}
2685
2686/**
2687 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2688 * @hw: pointer to the hw struct
2689 * @veb_seid: the SEID of the VEB to query
2690 * @switch_id: the uplink switch id
2691 * @floating: set to true if the VEB is floating
2692 * @statistic_index: index of the stats counter block for this VEB
2693 * @vebs_used: number of VEB's used by function
2694 * @vebs_free: total VEB's not reserved by any function
2695 * @cmd_details: pointer to command details structure or NULL
2696 *
2697 * This retrieves the parameters for a particular VEB, specified by
2698 * uplink_seid, and returns them to the caller.
2699 **/
2700i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2701 u16 veb_seid, u16 *switch_id,
2702 bool *floating, u16 *statistic_index,
2703 u16 *vebs_used, u16 *vebs_free,
2704 struct i40e_asq_cmd_details *cmd_details)
2705{
2706 struct i40e_aq_desc desc;
2707 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2708 (struct i40e_aqc_get_veb_parameters_completion *)
2709 &desc.params.raw;
2710 i40e_status status;
2711
2712 if (veb_seid == 0)
2713 return I40E_ERR_PARAM;
2714
2715 i40e_fill_default_direct_cmd_desc(&desc,
2716 i40e_aqc_opc_get_veb_parameters);
2717 cmd_resp->seid = cpu_to_le16(veb_seid);
2718
2719 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2720 if (status)
2721 goto get_veb_exit;
2722
2723 if (switch_id)
2724 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2725 if (statistic_index)
2726 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2727 if (vebs_used)
2728 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2729 if (vebs_free)
2730 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2731 if (floating) {
2732 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2733
2734 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2735 *floating = true;
2736 else
2737 *floating = false;
2738 }
2739
2740get_veb_exit:
2741 return status;
2742}
2743
2744/**
2745 * i40e_aq_add_macvlan
2746 * @hw: pointer to the hw struct
2747 * @seid: VSI for the mac address
2748 * @mv_list: list of macvlans to be added
2749 * @count: length of the list
2750 * @cmd_details: pointer to command details structure or NULL
2751 *
2752 * Add MAC/VLAN addresses to the HW filtering
2753 **/
2754i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2755 struct i40e_aqc_add_macvlan_element_data *mv_list,
2756 u16 count, struct i40e_asq_cmd_details *cmd_details)
2757{
2758 struct i40e_aq_desc desc;
2759 struct i40e_aqc_macvlan *cmd =
2760 (struct i40e_aqc_macvlan *)&desc.params.raw;
2761 i40e_status status;
2762 u16 buf_size;
2763 int i;
2764
2765 if (count == 0 || !mv_list || !hw)
2766 return I40E_ERR_PARAM;
2767
2768 buf_size = count * sizeof(*mv_list);
2769
2770 /* prep the rest of the request */
2771 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
2772 cmd->num_addresses = cpu_to_le16(count);
2773 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2774 cmd->seid[1] = 0;
2775 cmd->seid[2] = 0;
2776
2777 for (i = 0; i < count; i++)
2778 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2779 mv_list[i].flags |=
2780 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2781
2782 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2783 if (buf_size > I40E_AQ_LARGE_BUF)
2784 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2785
2786 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2787 cmd_details);
2788
2789 return status;
2790}
2791
2792/**
2793 * i40e_aq_remove_macvlan
2794 * @hw: pointer to the hw struct
2795 * @seid: VSI for the mac address
2796 * @mv_list: list of macvlans to be removed
2797 * @count: length of the list
2798 * @cmd_details: pointer to command details structure or NULL
2799 *
2800 * Remove MAC/VLAN addresses from the HW filtering
2801 **/
2802i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2803 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2804 u16 count, struct i40e_asq_cmd_details *cmd_details)
2805{
2806 struct i40e_aq_desc desc;
2807 struct i40e_aqc_macvlan *cmd =
2808 (struct i40e_aqc_macvlan *)&desc.params.raw;
2809 i40e_status status;
2810 u16 buf_size;
2811
2812 if (count == 0 || !mv_list || !hw)
2813 return I40E_ERR_PARAM;
2814
2815 buf_size = count * sizeof(*mv_list);
2816
2817 /* prep the rest of the request */
2818 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2819 cmd->num_addresses = cpu_to_le16(count);
2820 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2821 cmd->seid[1] = 0;
2822 cmd->seid[2] = 0;
2823
2824 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2825 if (buf_size > I40E_AQ_LARGE_BUF)
2826 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2827
2828 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2829 cmd_details);
2830
2831 return status;
2832}
2833
2834/**
2835 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2836 * @hw: pointer to the hw struct
2837 * @opcode: AQ opcode for add or delete mirror rule
2838 * @sw_seid: Switch SEID (to which rule refers)
2839 * @rule_type: Rule Type (ingress/egress/VLAN)
2840 * @id: Destination VSI SEID or Rule ID
2841 * @count: length of the list
2842 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2843 * @cmd_details: pointer to command details structure or NULL
2844 * @rule_id: Rule ID returned from FW
2845 * @rules_used: Number of rules used in internal switch
2846 * @rules_free: Number of rules free in internal switch
2847 *
2848 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2849 * VEBs/VEPA elements only
2850 **/
2851static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2852 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2853 u16 count, __le16 *mr_list,
2854 struct i40e_asq_cmd_details *cmd_details,
2855 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2856{
2857 struct i40e_aq_desc desc;
2858 struct i40e_aqc_add_delete_mirror_rule *cmd =
2859 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2860 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2861 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2862 i40e_status status;
2863 u16 buf_size;
2864
2865 buf_size = count * sizeof(*mr_list);
2866
2867 /* prep the rest of the request */
2868 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2869 cmd->seid = cpu_to_le16(sw_seid);
2870 cmd->rule_type = cpu_to_le16(rule_type &
2871 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2872 cmd->num_entries = cpu_to_le16(count);
2873 /* Dest VSI for add, rule_id for delete */
2874 cmd->destination = cpu_to_le16(id);
2875 if (mr_list) {
2876 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2877 I40E_AQ_FLAG_RD));
2878 if (buf_size > I40E_AQ_LARGE_BUF)
2879 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2880 }
2881
2882 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2883 cmd_details);
2884 if (!status ||
2885 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2886 if (rule_id)
2887 *rule_id = le16_to_cpu(resp->rule_id);
2888 if (rules_used)
2889 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2890 if (rules_free)
2891 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2892 }
2893 return status;
2894}
2895
2896/**
2897 * i40e_aq_add_mirrorrule - add a mirror rule
2898 * @hw: pointer to the hw struct
2899 * @sw_seid: Switch SEID (to which rule refers)
2900 * @rule_type: Rule Type (ingress/egress/VLAN)
2901 * @dest_vsi: SEID of VSI to which packets will be mirrored
2902 * @count: length of the list
2903 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2904 * @cmd_details: pointer to command details structure or NULL
2905 * @rule_id: Rule ID returned from FW
2906 * @rules_used: Number of rules used in internal switch
2907 * @rules_free: Number of rules free in internal switch
2908 *
2909 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2910 **/
2911i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2912 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2913 struct i40e_asq_cmd_details *cmd_details,
2914 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2915{
2916 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2917 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2918 if (count == 0 || !mr_list)
2919 return I40E_ERR_PARAM;
2920 }
2921
2922 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2923 rule_type, dest_vsi, count, mr_list,
2924 cmd_details, rule_id, rules_used, rules_free);
2925}
2926
2927/**
2928 * i40e_aq_delete_mirrorrule - delete a mirror rule
2929 * @hw: pointer to the hw struct
2930 * @sw_seid: Switch SEID (to which rule refers)
2931 * @rule_type: Rule Type (ingress/egress/VLAN)
2932 * @count: length of the list
2933 * @rule_id: Rule ID that is returned in the receive desc as part of
2934 * add_mirrorrule.
2935 * @mr_list: list of mirrored VLAN IDs to be removed
2936 * @cmd_details: pointer to command details structure or NULL
2937 * @rules_used: Number of rules used in internal switch
2938 * @rules_free: Number of rules free in internal switch
2939 *
2940 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2941 **/
2942i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2943 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2944 struct i40e_asq_cmd_details *cmd_details,
2945 u16 *rules_used, u16 *rules_free)
2946{
2947 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2948 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2949 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2950 * mirroring. For other rule_type, count and rule_type should
2951 * not matter.
2952 */
2953 if (count == 0 || !mr_list)
2954 return I40E_ERR_PARAM;
2955 }
2956
2957 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2958 rule_type, rule_id, count, mr_list,
2959 cmd_details, NULL, rules_used, rules_free);
2960}
2961
2962/**
2963 * i40e_aq_send_msg_to_vf
2964 * @hw: pointer to the hardware structure
2965 * @vfid: VF id to send msg
2966 * @v_opcode: opcodes for VF-PF communication
2967 * @v_retval: return error code
2968 * @msg: pointer to the msg buffer
2969 * @msglen: msg length
2970 * @cmd_details: pointer to command details
2971 *
2972 * send msg to vf
2973 **/
2974i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2975 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2976 struct i40e_asq_cmd_details *cmd_details)
2977{
2978 struct i40e_aq_desc desc;
2979 struct i40e_aqc_pf_vf_message *cmd =
2980 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2981 i40e_status status;
2982
2983 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2984 cmd->id = cpu_to_le32(vfid);
2985 desc.cookie_high = cpu_to_le32(v_opcode);
2986 desc.cookie_low = cpu_to_le32(v_retval);
2987 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2988 if (msglen) {
2989 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2990 I40E_AQ_FLAG_RD));
2991 if (msglen > I40E_AQ_LARGE_BUF)
2992 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2993 desc.datalen = cpu_to_le16(msglen);
2994 }
2995 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2996
2997 return status;
2998}
2999
3000/**
3001 * i40e_aq_debug_read_register
3002 * @hw: pointer to the hw struct
3003 * @reg_addr: register address
3004 * @reg_val: register value
3005 * @cmd_details: pointer to command details structure or NULL
3006 *
3007 * Read the register using the admin queue commands
3008 **/
3009i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
3010 u32 reg_addr, u64 *reg_val,
3011 struct i40e_asq_cmd_details *cmd_details)
3012{
3013 struct i40e_aq_desc desc;
3014 struct i40e_aqc_debug_reg_read_write *cmd_resp =
3015 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3016 i40e_status status;
3017
3018 if (reg_val == NULL)
3019 return I40E_ERR_PARAM;
3020
3021 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
3022
3023 cmd_resp->address = cpu_to_le32(reg_addr);
3024
3025 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3026
3027 if (!status) {
3028 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
3029 (u64)le32_to_cpu(cmd_resp->value_low);
3030 }
3031
3032 return status;
3033}
3034
3035/**
3036 * i40e_aq_debug_write_register
3037 * @hw: pointer to the hw struct
3038 * @reg_addr: register address
3039 * @reg_val: register value
3040 * @cmd_details: pointer to command details structure or NULL
3041 *
3042 * Write to a register using the admin queue commands
3043 **/
3044i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
3045 u32 reg_addr, u64 reg_val,
3046 struct i40e_asq_cmd_details *cmd_details)
3047{
3048 struct i40e_aq_desc desc;
3049 struct i40e_aqc_debug_reg_read_write *cmd =
3050 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3051 i40e_status status;
3052
3053 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
3054
3055 cmd->address = cpu_to_le32(reg_addr);
3056 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
3057 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
3058
3059 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3060
3061 return status;
3062}
3063
3064/**
3065 * i40e_aq_request_resource
3066 * @hw: pointer to the hw struct
3067 * @resource: resource id
3068 * @access: access type
3069 * @sdp_number: resource number
3070 * @timeout: the maximum time in ms that the driver may hold the resource
3071 * @cmd_details: pointer to command details structure or NULL
3072 *
3073 * requests common resource using the admin queue commands
3074 **/
3075i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3076 enum i40e_aq_resources_ids resource,
3077 enum i40e_aq_resource_access_type access,
3078 u8 sdp_number, u64 *timeout,
3079 struct i40e_asq_cmd_details *cmd_details)
3080{
3081 struct i40e_aq_desc desc;
3082 struct i40e_aqc_request_resource *cmd_resp =
3083 (struct i40e_aqc_request_resource *)&desc.params.raw;
3084 i40e_status status;
3085
3086 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3087
3088 cmd_resp->resource_id = cpu_to_le16(resource);
3089 cmd_resp->access_type = cpu_to_le16(access);
3090 cmd_resp->resource_number = cpu_to_le32(sdp_number);
3091
3092 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3093 /* The completion specifies the maximum time in ms that the driver
3094 * may hold the resource in the Timeout field.
3095 * If the resource is held by someone else, the command completes with
3096 * busy return value and the timeout field indicates the maximum time
3097 * the current owner of the resource has to free it.
3098 */
3099 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3100 *timeout = le32_to_cpu(cmd_resp->timeout);
3101
3102 return status;
3103}
3104
3105/**
3106 * i40e_aq_release_resource
3107 * @hw: pointer to the hw struct
3108 * @resource: resource id
3109 * @sdp_number: resource number
3110 * @cmd_details: pointer to command details structure or NULL
3111 *
3112 * release common resource using the admin queue commands
3113 **/
3114i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3115 enum i40e_aq_resources_ids resource,
3116 u8 sdp_number,
3117 struct i40e_asq_cmd_details *cmd_details)
3118{
3119 struct i40e_aq_desc desc;
3120 struct i40e_aqc_request_resource *cmd =
3121 (struct i40e_aqc_request_resource *)&desc.params.raw;
3122 i40e_status status;
3123
3124 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3125
3126 cmd->resource_id = cpu_to_le16(resource);
3127 cmd->resource_number = cpu_to_le32(sdp_number);
3128
3129 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3130
3131 return status;
3132}
3133
3134/**
3135 * i40e_aq_read_nvm
3136 * @hw: pointer to the hw struct
3137 * @module_pointer: module pointer location in words from the NVM beginning
3138 * @offset: byte offset from the module beginning
3139 * @length: length of the section to be read (in bytes from the offset)
3140 * @data: command buffer (size [bytes] = length)
3141 * @last_command: tells if this is the last command in a series
3142 * @cmd_details: pointer to command details structure or NULL
3143 *
3144 * Read the NVM using the admin queue commands
3145 **/
3146i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3147 u32 offset, u16 length, void *data,
3148 bool last_command,
3149 struct i40e_asq_cmd_details *cmd_details)
3150{
3151 struct i40e_aq_desc desc;
3152 struct i40e_aqc_nvm_update *cmd =
3153 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3154 i40e_status status;
3155
3156 /* In offset the highest byte must be zeroed. */
3157 if (offset & 0xFF000000) {
3158 status = I40E_ERR_PARAM;
3159 goto i40e_aq_read_nvm_exit;
3160 }
3161
3162 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3163
3164 /* If this is the last command in a series, set the proper flag. */
3165 if (last_command)
3166 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3167 cmd->module_pointer = module_pointer;
3168 cmd->offset = cpu_to_le32(offset);
3169 cmd->length = cpu_to_le16(length);
3170
3171 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3172 if (length > I40E_AQ_LARGE_BUF)
3173 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3174
3175 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3176
3177i40e_aq_read_nvm_exit:
3178 return status;
3179}
3180
3181/**
3182 * i40e_aq_erase_nvm
3183 * @hw: pointer to the hw struct
3184 * @module_pointer: module pointer location in words from the NVM beginning
3185 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3186 * @length: length of the section to be erased (expressed in 4 KB)
3187 * @last_command: tells if this is the last command in a series
3188 * @cmd_details: pointer to command details structure or NULL
3189 *
3190 * Erase the NVM sector using the admin queue commands
3191 **/
3192i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3193 u32 offset, u16 length, bool last_command,
3194 struct i40e_asq_cmd_details *cmd_details)
3195{
3196 struct i40e_aq_desc desc;
3197 struct i40e_aqc_nvm_update *cmd =
3198 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3199 i40e_status status;
3200
3201 /* In offset the highest byte must be zeroed. */
3202 if (offset & 0xFF000000) {
3203 status = I40E_ERR_PARAM;
3204 goto i40e_aq_erase_nvm_exit;
3205 }
3206
3207 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3208
3209 /* If this is the last command in a series, set the proper flag. */
3210 if (last_command)
3211 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3212 cmd->module_pointer = module_pointer;
3213 cmd->offset = cpu_to_le32(offset);
3214 cmd->length = cpu_to_le16(length);
3215
3216 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3217
3218i40e_aq_erase_nvm_exit:
3219 return status;
3220}
3221
3222/**
3223 * i40e_parse_discover_capabilities
3224 * @hw: pointer to the hw struct
3225 * @buff: pointer to a buffer containing device/function capability records
3226 * @cap_count: number of capability records in the list
3227 * @list_type_opc: type of capabilities list to parse
3228 *
3229 * Parse the device/function capabilities list.
3230 **/
3231static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3232 u32 cap_count,
3233 enum i40e_admin_queue_opc list_type_opc)
3234{
3235 struct i40e_aqc_list_capabilities_element_resp *cap;
3236 u32 valid_functions, num_functions;
3237 u32 number, logical_id, phys_id;
3238 struct i40e_hw_capabilities *p;
3239 u16 id, ocp_cfg_word0;
3240 i40e_status status;
3241 u8 major_rev;
3242 u32 i = 0;
3243
3244 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3245
3246 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3247 p = &hw->dev_caps;
3248 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3249 p = &hw->func_caps;
3250 else
3251 return;
3252
3253 for (i = 0; i < cap_count; i++, cap++) {
3254 id = le16_to_cpu(cap->id);
3255 number = le32_to_cpu(cap->number);
3256 logical_id = le32_to_cpu(cap->logical_id);
3257 phys_id = le32_to_cpu(cap->phys_id);
3258 major_rev = cap->major_rev;
3259
3260 switch (id) {
3261 case I40E_AQ_CAP_ID_SWITCH_MODE:
3262 p->switch_mode = number;
3263 break;
3264 case I40E_AQ_CAP_ID_MNG_MODE:
3265 p->management_mode = number;
3266 if (major_rev > 1) {
3267 p->mng_protocols_over_mctp = logical_id;
3268 i40e_debug(hw, I40E_DEBUG_INIT,
3269 "HW Capability: Protocols over MCTP = %d\n",
3270 p->mng_protocols_over_mctp);
3271 } else {
3272 p->mng_protocols_over_mctp = 0;
3273 }
3274 break;
3275 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3276 p->npar_enable = number;
3277 break;
3278 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3279 p->os2bmc = number;
3280 break;
3281 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3282 p->valid_functions = number;
3283 break;
3284 case I40E_AQ_CAP_ID_SRIOV:
3285 if (number == 1)
3286 p->sr_iov_1_1 = true;
3287 break;
3288 case I40E_AQ_CAP_ID_VF:
3289 p->num_vfs = number;
3290 p->vf_base_id = logical_id;
3291 break;
3292 case I40E_AQ_CAP_ID_VMDQ:
3293 if (number == 1)
3294 p->vmdq = true;
3295 break;
3296 case I40E_AQ_CAP_ID_8021QBG:
3297 if (number == 1)
3298 p->evb_802_1_qbg = true;
3299 break;
3300 case I40E_AQ_CAP_ID_8021QBR:
3301 if (number == 1)
3302 p->evb_802_1_qbh = true;
3303 break;
3304 case I40E_AQ_CAP_ID_VSI:
3305 p->num_vsis = number;
3306 break;
3307 case I40E_AQ_CAP_ID_DCB:
3308 if (number == 1) {
3309 p->dcb = true;
3310 p->enabled_tcmap = logical_id;
3311 p->maxtc = phys_id;
3312 }
3313 break;
3314 case I40E_AQ_CAP_ID_FCOE:
3315 if (number == 1)
3316 p->fcoe = true;
3317 break;
3318 case I40E_AQ_CAP_ID_ISCSI:
3319 if (number == 1)
3320 p->iscsi = true;
3321 break;
3322 case I40E_AQ_CAP_ID_RSS:
3323 p->rss = true;
3324 p->rss_table_size = number;
3325 p->rss_table_entry_width = logical_id;
3326 break;
3327 case I40E_AQ_CAP_ID_RXQ:
3328 p->num_rx_qp = number;
3329 p->base_queue = phys_id;
3330 break;
3331 case I40E_AQ_CAP_ID_TXQ:
3332 p->num_tx_qp = number;
3333 p->base_queue = phys_id;
3334 break;
3335 case I40E_AQ_CAP_ID_MSIX:
3336 p->num_msix_vectors = number;
3337 i40e_debug(hw, I40E_DEBUG_INIT,
3338 "HW Capability: MSIX vector count = %d\n",
3339 p->num_msix_vectors);
3340 break;
3341 case I40E_AQ_CAP_ID_VF_MSIX:
3342 p->num_msix_vectors_vf = number;
3343 break;
3344 case I40E_AQ_CAP_ID_FLEX10:
3345 if (major_rev == 1) {
3346 if (number == 1) {
3347 p->flex10_enable = true;
3348 p->flex10_capable = true;
3349 }
3350 } else {
3351 /* Capability revision >= 2 */
3352 if (number & 1)
3353 p->flex10_enable = true;
3354 if (number & 2)
3355 p->flex10_capable = true;
3356 }
3357 p->flex10_mode = logical_id;
3358 p->flex10_status = phys_id;
3359 break;
3360 case I40E_AQ_CAP_ID_CEM:
3361 if (number == 1)
3362 p->mgmt_cem = true;
3363 break;
3364 case I40E_AQ_CAP_ID_IWARP:
3365 if (number == 1)
3366 p->iwarp = true;
3367 break;
3368 case I40E_AQ_CAP_ID_LED:
3369 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3370 p->led[phys_id] = true;
3371 break;
3372 case I40E_AQ_CAP_ID_SDP:
3373 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3374 p->sdp[phys_id] = true;
3375 break;
3376 case I40E_AQ_CAP_ID_MDIO:
3377 if (number == 1) {
3378 p->mdio_port_num = phys_id;
3379 p->mdio_port_mode = logical_id;
3380 }
3381 break;
3382 case I40E_AQ_CAP_ID_1588:
3383 if (number == 1)
3384 p->ieee_1588 = true;
3385 break;
3386 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3387 p->fd = true;
3388 p->fd_filters_guaranteed = number;
3389 p->fd_filters_best_effort = logical_id;
3390 break;
3391 case I40E_AQ_CAP_ID_WSR_PROT:
3392 p->wr_csr_prot = (u64)number;
3393 p->wr_csr_prot |= (u64)logical_id << 32;
3394 break;
3395 case I40E_AQ_CAP_ID_NVM_MGMT:
3396 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3397 p->sec_rev_disabled = true;
3398 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3399 p->update_disabled = true;
3400 break;
3401 default:
3402 break;
3403 }
3404 }
3405
3406 if (p->fcoe)
3407 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3408
3409 /* Software override ensuring FCoE is disabled if npar or mfp
3410 * mode because it is not supported in these modes.
3411 */
3412 if (p->npar_enable || p->flex10_enable)
3413 p->fcoe = false;
3414
3415 /* count the enabled ports (aka the "not disabled" ports) */
3416 hw->num_ports = 0;
3417 for (i = 0; i < 4; i++) {
3418 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3419 u64 port_cfg = 0;
3420
3421 /* use AQ read to get the physical register offset instead
3422 * of the port relative offset
3423 */
3424 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3425 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3426 hw->num_ports++;
3427 }
3428
3429 /* OCP cards case: if a mezz is removed the Ethernet port is at
3430 * disabled state in PRTGEN_CNF register. Additional NVM read is
3431 * needed in order to check if we are dealing with OCP card.
3432 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3433 * physical ports results in wrong partition id calculation and thus
3434 * not supporting WoL.
3435 */
3436 if (hw->mac.type == I40E_MAC_X722) {
3437 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3438 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3439 2 * I40E_SR_OCP_CFG_WORD0,
3440 sizeof(ocp_cfg_word0),
3441 &ocp_cfg_word0, true, NULL);
3442 if (!status &&
3443 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3444 hw->num_ports = 4;
3445 i40e_release_nvm(hw);
3446 }
3447 }
3448
3449 valid_functions = p->valid_functions;
3450 num_functions = 0;
3451 while (valid_functions) {
3452 if (valid_functions & 1)
3453 num_functions++;
3454 valid_functions >>= 1;
3455 }
3456
3457 /* partition id is 1-based, and functions are evenly spread
3458 * across the ports as partitions
3459 */
3460 if (hw->num_ports != 0) {
3461 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3462 hw->num_partitions = num_functions / hw->num_ports;
3463 }
3464
3465 /* additional HW specific goodies that might
3466 * someday be HW version specific
3467 */
3468 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3469}
3470
3471/**
3472 * i40e_aq_discover_capabilities
3473 * @hw: pointer to the hw struct
3474 * @buff: a virtual buffer to hold the capabilities
3475 * @buff_size: Size of the virtual buffer
3476 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3477 * @list_type_opc: capabilities type to discover - pass in the command opcode
3478 * @cmd_details: pointer to command details structure or NULL
3479 *
3480 * Get the device capabilities descriptions from the firmware
3481 **/
3482i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3483 void *buff, u16 buff_size, u16 *data_size,
3484 enum i40e_admin_queue_opc list_type_opc,
3485 struct i40e_asq_cmd_details *cmd_details)
3486{
3487 struct i40e_aqc_list_capabilites *cmd;
3488 struct i40e_aq_desc desc;
3489 i40e_status status = 0;
3490
3491 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3492
3493 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3494 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3495 status = I40E_ERR_PARAM;
3496 goto exit;
3497 }
3498
3499 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3500
3501 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3502 if (buff_size > I40E_AQ_LARGE_BUF)
3503 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3504
3505 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3506 *data_size = le16_to_cpu(desc.datalen);
3507
3508 if (status)
3509 goto exit;
3510
3511 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3512 list_type_opc);
3513
3514exit:
3515 return status;
3516}
3517
3518/**
3519 * i40e_aq_update_nvm
3520 * @hw: pointer to the hw struct
3521 * @module_pointer: module pointer location in words from the NVM beginning
3522 * @offset: byte offset from the module beginning
3523 * @length: length of the section to be written (in bytes from the offset)
3524 * @data: command buffer (size [bytes] = length)
3525 * @last_command: tells if this is the last command in a series
3526 * @preservation_flags: Preservation mode flags
3527 * @cmd_details: pointer to command details structure or NULL
3528 *
3529 * Update the NVM using the admin queue commands
3530 **/
3531i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3532 u32 offset, u16 length, void *data,
3533 bool last_command, u8 preservation_flags,
3534 struct i40e_asq_cmd_details *cmd_details)
3535{
3536 struct i40e_aq_desc desc;
3537 struct i40e_aqc_nvm_update *cmd =
3538 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3539 i40e_status status;
3540
3541 /* In offset the highest byte must be zeroed. */
3542 if (offset & 0xFF000000) {
3543 status = I40E_ERR_PARAM;
3544 goto i40e_aq_update_nvm_exit;
3545 }
3546
3547 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3548
3549 /* If this is the last command in a series, set the proper flag. */
3550 if (last_command)
3551 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3552 if (hw->mac.type == I40E_MAC_X722) {
3553 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3554 cmd->command_flags |=
3555 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3556 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3557 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3558 cmd->command_flags |=
3559 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3560 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3561 }
3562 cmd->module_pointer = module_pointer;
3563 cmd->offset = cpu_to_le32(offset);
3564 cmd->length = cpu_to_le16(length);
3565
3566 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3567 if (length > I40E_AQ_LARGE_BUF)
3568 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3569
3570 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3571
3572i40e_aq_update_nvm_exit:
3573 return status;
3574}
3575
3576/**
3577 * i40e_aq_rearrange_nvm
3578 * @hw: pointer to the hw struct
3579 * @rearrange_nvm: defines direction of rearrangement
3580 * @cmd_details: pointer to command details structure or NULL
3581 *
3582 * Rearrange NVM structure, available only for transition FW
3583 **/
3584i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3585 u8 rearrange_nvm,
3586 struct i40e_asq_cmd_details *cmd_details)
3587{
3588 struct i40e_aqc_nvm_update *cmd;
3589 i40e_status status;
3590 struct i40e_aq_desc desc;
3591
3592 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3593
3594 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3595
3596 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3597 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3598
3599 if (!rearrange_nvm) {
3600 status = I40E_ERR_PARAM;
3601 goto i40e_aq_rearrange_nvm_exit;
3602 }
3603
3604 cmd->command_flags |= rearrange_nvm;
3605 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3606
3607i40e_aq_rearrange_nvm_exit:
3608 return status;
3609}
3610
3611/**
3612 * i40e_aq_get_lldp_mib
3613 * @hw: pointer to the hw struct
3614 * @bridge_type: type of bridge requested
3615 * @mib_type: Local, Remote or both Local and Remote MIBs
3616 * @buff: pointer to a user supplied buffer to store the MIB block
3617 * @buff_size: size of the buffer (in bytes)
3618 * @local_len : length of the returned Local LLDP MIB
3619 * @remote_len: length of the returned Remote LLDP MIB
3620 * @cmd_details: pointer to command details structure or NULL
3621 *
3622 * Requests the complete LLDP MIB (entire packet).
3623 **/
3624i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3625 u8 mib_type, void *buff, u16 buff_size,
3626 u16 *local_len, u16 *remote_len,
3627 struct i40e_asq_cmd_details *cmd_details)
3628{
3629 struct i40e_aq_desc desc;
3630 struct i40e_aqc_lldp_get_mib *cmd =
3631 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3632 struct i40e_aqc_lldp_get_mib *resp =
3633 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3634 i40e_status status;
3635
3636 if (buff_size == 0 || !buff)
3637 return I40E_ERR_PARAM;
3638
3639 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3640 /* Indirect Command */
3641 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3642
3643 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3644 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3645 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3646
3647 desc.datalen = cpu_to_le16(buff_size);
3648
3649 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3650 if (buff_size > I40E_AQ_LARGE_BUF)
3651 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3652
3653 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3654 if (!status) {
3655 if (local_len != NULL)
3656 *local_len = le16_to_cpu(resp->local_len);
3657 if (remote_len != NULL)
3658 *remote_len = le16_to_cpu(resp->remote_len);
3659 }
3660
3661 return status;
3662}
3663
3664/**
3665 * i40e_aq_cfg_lldp_mib_change_event
3666 * @hw: pointer to the hw struct
3667 * @enable_update: Enable or Disable event posting
3668 * @cmd_details: pointer to command details structure or NULL
3669 *
3670 * Enable or Disable posting of an event on ARQ when LLDP MIB
3671 * associated with the interface changes
3672 **/
3673i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3674 bool enable_update,
3675 struct i40e_asq_cmd_details *cmd_details)
3676{
3677 struct i40e_aq_desc desc;
3678 struct i40e_aqc_lldp_update_mib *cmd =
3679 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3680 i40e_status status;
3681
3682 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3683
3684 if (!enable_update)
3685 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3686
3687 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3688
3689 return status;
3690}
3691
3692/**
3693 * i40e_aq_restore_lldp
3694 * @hw: pointer to the hw struct
3695 * @setting: pointer to factory setting variable or NULL
3696 * @restore: True if factory settings should be restored
3697 * @cmd_details: pointer to command details structure or NULL
3698 *
3699 * Restore LLDP Agent factory settings if @restore set to True. In other case
3700 * only returns factory setting in AQ response.
3701 **/
3702enum i40e_status_code
3703i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3704 struct i40e_asq_cmd_details *cmd_details)
3705{
3706 struct i40e_aq_desc desc;
3707 struct i40e_aqc_lldp_restore *cmd =
3708 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3709 i40e_status status;
3710
3711 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3712 i40e_debug(hw, I40E_DEBUG_ALL,
3713 "Restore LLDP not supported by current FW version.\n");
3714 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3715 }
3716
3717 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3718
3719 if (restore)
3720 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3721
3722 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3723
3724 if (setting)
3725 *setting = cmd->command & 1;
3726
3727 return status;
3728}
3729
3730/**
3731 * i40e_aq_stop_lldp
3732 * @hw: pointer to the hw struct
3733 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3734 * @persist: True if stop of LLDP should be persistent across power cycles
3735 * @cmd_details: pointer to command details structure or NULL
3736 *
3737 * Stop or Shutdown the embedded LLDP Agent
3738 **/
3739i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3740 bool persist,
3741 struct i40e_asq_cmd_details *cmd_details)
3742{
3743 struct i40e_aq_desc desc;
3744 struct i40e_aqc_lldp_stop *cmd =
3745 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3746 i40e_status status;
3747
3748 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3749
3750 if (shutdown_agent)
3751 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3752
3753 if (persist) {
3754 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3755 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3756 else
3757 i40e_debug(hw, I40E_DEBUG_ALL,
3758 "Persistent Stop LLDP not supported by current FW version.\n");
3759 }
3760
3761 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3762
3763 return status;
3764}
3765
3766/**
3767 * i40e_aq_start_lldp
3768 * @hw: pointer to the hw struct
3769 * @buff: buffer for result
3770 * @persist: True if start of LLDP should be persistent across power cycles
3771 * @buff_size: buffer size
3772 * @cmd_details: pointer to command details structure or NULL
3773 *
3774 * Start the embedded LLDP Agent on all ports.
3775 **/
3776i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3777 struct i40e_asq_cmd_details *cmd_details)
3778{
3779 struct i40e_aq_desc desc;
3780 struct i40e_aqc_lldp_start *cmd =
3781 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3782 i40e_status status;
3783
3784 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3785
3786 cmd->command = I40E_AQ_LLDP_AGENT_START;
3787
3788 if (persist) {
3789 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3790 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3791 else
3792 i40e_debug(hw, I40E_DEBUG_ALL,
3793 "Persistent Start LLDP not supported by current FW version.\n");
3794 }
3795
3796 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3797
3798 return status;
3799}
3800
3801/**
3802 * i40e_aq_set_dcb_parameters
3803 * @hw: pointer to the hw struct
3804 * @cmd_details: pointer to command details structure or NULL
3805 * @dcb_enable: True if DCB configuration needs to be applied
3806 *
3807 **/
3808enum i40e_status_code
3809i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3810 struct i40e_asq_cmd_details *cmd_details)
3811{
3812 struct i40e_aq_desc desc;
3813 struct i40e_aqc_set_dcb_parameters *cmd =
3814 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3815 i40e_status status;
3816
3817 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3818 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3819
3820 i40e_fill_default_direct_cmd_desc(&desc,
3821 i40e_aqc_opc_set_dcb_parameters);
3822
3823 if (dcb_enable) {
3824 cmd->valid_flags = I40E_DCB_VALID;
3825 cmd->command = I40E_AQ_DCB_SET_AGENT;
3826 }
3827 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3828
3829 return status;
3830}
3831
3832/**
3833 * i40e_aq_get_cee_dcb_config
3834 * @hw: pointer to the hw struct
3835 * @buff: response buffer that stores CEE operational configuration
3836 * @buff_size: size of the buffer passed
3837 * @cmd_details: pointer to command details structure or NULL
3838 *
3839 * Get CEE DCBX mode operational configuration from firmware
3840 **/
3841i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3842 void *buff, u16 buff_size,
3843 struct i40e_asq_cmd_details *cmd_details)
3844{
3845 struct i40e_aq_desc desc;
3846 i40e_status status;
3847
3848 if (buff_size == 0 || !buff)
3849 return I40E_ERR_PARAM;
3850
3851 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3852
3853 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3854 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3855 cmd_details);
3856
3857 return status;
3858}
3859
3860/**
3861 * i40e_aq_add_udp_tunnel
3862 * @hw: pointer to the hw struct
3863 * @udp_port: the UDP port to add in Host byte order
3864 * @protocol_index: protocol index type
3865 * @filter_index: pointer to filter index
3866 * @cmd_details: pointer to command details structure or NULL
3867 *
3868 * Note: Firmware expects the udp_port value to be in Little Endian format,
3869 * and this function will call cpu_to_le16 to convert from Host byte order to
3870 * Little Endian order.
3871 **/
3872i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3873 u16 udp_port, u8 protocol_index,
3874 u8 *filter_index,
3875 struct i40e_asq_cmd_details *cmd_details)
3876{
3877 struct i40e_aq_desc desc;
3878 struct i40e_aqc_add_udp_tunnel *cmd =
3879 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3880 struct i40e_aqc_del_udp_tunnel_completion *resp =
3881 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3882 i40e_status status;
3883
3884 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3885
3886 cmd->udp_port = cpu_to_le16(udp_port);
3887 cmd->protocol_type = protocol_index;
3888
3889 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3890
3891 if (!status && filter_index)
3892 *filter_index = resp->index;
3893
3894 return status;
3895}
3896
3897/**
3898 * i40e_aq_del_udp_tunnel
3899 * @hw: pointer to the hw struct
3900 * @index: filter index
3901 * @cmd_details: pointer to command details structure or NULL
3902 **/
3903i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3904 struct i40e_asq_cmd_details *cmd_details)
3905{
3906 struct i40e_aq_desc desc;
3907 struct i40e_aqc_remove_udp_tunnel *cmd =
3908 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3909 i40e_status status;
3910
3911 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3912
3913 cmd->index = index;
3914
3915 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3916
3917 return status;
3918}
3919
3920/**
3921 * i40e_aq_delete_element - Delete switch element
3922 * @hw: pointer to the hw struct
3923 * @seid: the SEID to delete from the switch
3924 * @cmd_details: pointer to command details structure or NULL
3925 *
3926 * This deletes a switch element from the switch.
3927 **/
3928i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3929 struct i40e_asq_cmd_details *cmd_details)
3930{
3931 struct i40e_aq_desc desc;
3932 struct i40e_aqc_switch_seid *cmd =
3933 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3934 i40e_status status;
3935
3936 if (seid == 0)
3937 return I40E_ERR_PARAM;
3938
3939 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3940
3941 cmd->seid = cpu_to_le16(seid);
3942
3943 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3944
3945 return status;
3946}
3947
3948/**
3949 * i40e_aq_dcb_updated - DCB Updated Command
3950 * @hw: pointer to the hw struct
3951 * @cmd_details: pointer to command details structure or NULL
3952 *
3953 * EMP will return when the shared RPB settings have been
3954 * recomputed and modified. The retval field in the descriptor
3955 * will be set to 0 when RPB is modified.
3956 **/
3957i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
3958 struct i40e_asq_cmd_details *cmd_details)
3959{
3960 struct i40e_aq_desc desc;
3961 i40e_status status;
3962
3963 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3964
3965 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3966
3967 return status;
3968}
3969
3970/**
3971 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3972 * @hw: pointer to the hw struct
3973 * @seid: seid for the physical port/switching component/vsi
3974 * @buff: Indirect buffer to hold data parameters and response
3975 * @buff_size: Indirect buffer size
3976 * @opcode: Tx scheduler AQ command opcode
3977 * @cmd_details: pointer to command details structure or NULL
3978 *
3979 * Generic command handler for Tx scheduler AQ commands
3980 **/
3981static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3982 void *buff, u16 buff_size,
3983 enum i40e_admin_queue_opc opcode,
3984 struct i40e_asq_cmd_details *cmd_details)
3985{
3986 struct i40e_aq_desc desc;
3987 struct i40e_aqc_tx_sched_ind *cmd =
3988 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3989 i40e_status status;
3990 bool cmd_param_flag = false;
3991
3992 switch (opcode) {
3993 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3994 case i40e_aqc_opc_configure_vsi_tc_bw:
3995 case i40e_aqc_opc_enable_switching_comp_ets:
3996 case i40e_aqc_opc_modify_switching_comp_ets:
3997 case i40e_aqc_opc_disable_switching_comp_ets:
3998 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3999 case i40e_aqc_opc_configure_switching_comp_bw_config:
4000 cmd_param_flag = true;
4001 break;
4002 case i40e_aqc_opc_query_vsi_bw_config:
4003 case i40e_aqc_opc_query_vsi_ets_sla_config:
4004 case i40e_aqc_opc_query_switching_comp_ets_config:
4005 case i40e_aqc_opc_query_port_ets_config:
4006 case i40e_aqc_opc_query_switching_comp_bw_config:
4007 cmd_param_flag = false;
4008 break;
4009 default:
4010 return I40E_ERR_PARAM;
4011 }
4012
4013 i40e_fill_default_direct_cmd_desc(&desc, opcode);
4014
4015 /* Indirect command */
4016 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4017 if (cmd_param_flag)
4018 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4019 if (buff_size > I40E_AQ_LARGE_BUF)
4020 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4021
4022 desc.datalen = cpu_to_le16(buff_size);
4023
4024 cmd->vsi_seid = cpu_to_le16(seid);
4025
4026 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4027
4028 return status;
4029}
4030
4031/**
4032 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
4033 * @hw: pointer to the hw struct
4034 * @seid: VSI seid
4035 * @credit: BW limit credits (0 = disabled)
4036 * @max_credit: Max BW limit credits
4037 * @cmd_details: pointer to command details structure or NULL
4038 **/
4039i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
4040 u16 seid, u16 credit, u8 max_credit,
4041 struct i40e_asq_cmd_details *cmd_details)
4042{
4043 struct i40e_aq_desc desc;
4044 struct i40e_aqc_configure_vsi_bw_limit *cmd =
4045 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
4046 i40e_status status;
4047
4048 i40e_fill_default_direct_cmd_desc(&desc,
4049 i40e_aqc_opc_configure_vsi_bw_limit);
4050
4051 cmd->vsi_seid = cpu_to_le16(seid);
4052 cmd->credit = cpu_to_le16(credit);
4053 cmd->max_credit = max_credit;
4054
4055 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4056
4057 return status;
4058}
4059
4060/**
4061 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
4062 * @hw: pointer to the hw struct
4063 * @seid: VSI seid
4064 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
4065 * @cmd_details: pointer to command details structure or NULL
4066 **/
4067i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
4068 u16 seid,
4069 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
4070 struct i40e_asq_cmd_details *cmd_details)
4071{
4072 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4073 i40e_aqc_opc_configure_vsi_tc_bw,
4074 cmd_details);
4075}
4076
4077/**
4078 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4079 * @hw: pointer to the hw struct
4080 * @seid: seid of the switching component connected to Physical Port
4081 * @ets_data: Buffer holding ETS parameters
4082 * @opcode: Tx scheduler AQ command opcode
4083 * @cmd_details: pointer to command details structure or NULL
4084 **/
4085i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4086 u16 seid,
4087 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4088 enum i40e_admin_queue_opc opcode,
4089 struct i40e_asq_cmd_details *cmd_details)
4090{
4091 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4092 sizeof(*ets_data), opcode, cmd_details);
4093}
4094
4095/**
4096 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4097 * @hw: pointer to the hw struct
4098 * @seid: seid of the switching component
4099 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4100 * @cmd_details: pointer to command details structure or NULL
4101 **/
4102i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4103 u16 seid,
4104 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4105 struct i40e_asq_cmd_details *cmd_details)
4106{
4107 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4108 i40e_aqc_opc_configure_switching_comp_bw_config,
4109 cmd_details);
4110}
4111
4112/**
4113 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4114 * @hw: pointer to the hw struct
4115 * @seid: seid of the VSI
4116 * @bw_data: Buffer to hold VSI BW configuration
4117 * @cmd_details: pointer to command details structure or NULL
4118 **/
4119i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4120 u16 seid,
4121 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4122 struct i40e_asq_cmd_details *cmd_details)
4123{
4124 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4125 i40e_aqc_opc_query_vsi_bw_config,
4126 cmd_details);
4127}
4128
4129/**
4130 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4131 * @hw: pointer to the hw struct
4132 * @seid: seid of the VSI
4133 * @bw_data: Buffer to hold VSI BW configuration per TC
4134 * @cmd_details: pointer to command details structure or NULL
4135 **/
4136i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4137 u16 seid,
4138 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4139 struct i40e_asq_cmd_details *cmd_details)
4140{
4141 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4142 i40e_aqc_opc_query_vsi_ets_sla_config,
4143 cmd_details);
4144}
4145
4146/**
4147 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4148 * @hw: pointer to the hw struct
4149 * @seid: seid of the switching component
4150 * @bw_data: Buffer to hold switching component's per TC BW config
4151 * @cmd_details: pointer to command details structure or NULL
4152 **/
4153i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4154 u16 seid,
4155 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4156 struct i40e_asq_cmd_details *cmd_details)
4157{
4158 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4159 i40e_aqc_opc_query_switching_comp_ets_config,
4160 cmd_details);
4161}
4162
4163/**
4164 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4165 * @hw: pointer to the hw struct
4166 * @seid: seid of the VSI or switching component connected to Physical Port
4167 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4168 * @cmd_details: pointer to command details structure or NULL
4169 **/
4170i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4171 u16 seid,
4172 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4173 struct i40e_asq_cmd_details *cmd_details)
4174{
4175 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4176 i40e_aqc_opc_query_port_ets_config,
4177 cmd_details);
4178}
4179
4180/**
4181 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4182 * @hw: pointer to the hw struct
4183 * @seid: seid of the switching component
4184 * @bw_data: Buffer to hold switching component's BW configuration
4185 * @cmd_details: pointer to command details structure or NULL
4186 **/
4187i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4188 u16 seid,
4189 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4190 struct i40e_asq_cmd_details *cmd_details)
4191{
4192 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4193 i40e_aqc_opc_query_switching_comp_bw_config,
4194 cmd_details);
4195}
4196
4197/**
4198 * i40e_validate_filter_settings
4199 * @hw: pointer to the hardware structure
4200 * @settings: Filter control settings
4201 *
4202 * Check and validate the filter control settings passed.
4203 * The function checks for the valid filter/context sizes being
4204 * passed for FCoE and PE.
4205 *
4206 * Returns 0 if the values passed are valid and within
4207 * range else returns an error.
4208 **/
4209static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4210 struct i40e_filter_control_settings *settings)
4211{
4212 u32 fcoe_cntx_size, fcoe_filt_size;
4213 u32 pe_cntx_size, pe_filt_size;
4214 u32 fcoe_fmax;
4215 u32 val;
4216
4217 /* Validate FCoE settings passed */
4218 switch (settings->fcoe_filt_num) {
4219 case I40E_HASH_FILTER_SIZE_1K:
4220 case I40E_HASH_FILTER_SIZE_2K:
4221 case I40E_HASH_FILTER_SIZE_4K:
4222 case I40E_HASH_FILTER_SIZE_8K:
4223 case I40E_HASH_FILTER_SIZE_16K:
4224 case I40E_HASH_FILTER_SIZE_32K:
4225 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4226 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4227 break;
4228 default:
4229 return I40E_ERR_PARAM;
4230 }
4231
4232 switch (settings->fcoe_cntx_num) {
4233 case I40E_DMA_CNTX_SIZE_512:
4234 case I40E_DMA_CNTX_SIZE_1K:
4235 case I40E_DMA_CNTX_SIZE_2K:
4236 case I40E_DMA_CNTX_SIZE_4K:
4237 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4238 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4239 break;
4240 default:
4241 return I40E_ERR_PARAM;
4242 }
4243
4244 /* Validate PE settings passed */
4245 switch (settings->pe_filt_num) {
4246 case I40E_HASH_FILTER_SIZE_1K:
4247 case I40E_HASH_FILTER_SIZE_2K:
4248 case I40E_HASH_FILTER_SIZE_4K:
4249 case I40E_HASH_FILTER_SIZE_8K:
4250 case I40E_HASH_FILTER_SIZE_16K:
4251 case I40E_HASH_FILTER_SIZE_32K:
4252 case I40E_HASH_FILTER_SIZE_64K:
4253 case I40E_HASH_FILTER_SIZE_128K:
4254 case I40E_HASH_FILTER_SIZE_256K:
4255 case I40E_HASH_FILTER_SIZE_512K:
4256 case I40E_HASH_FILTER_SIZE_1M:
4257 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4258 pe_filt_size <<= (u32)settings->pe_filt_num;
4259 break;
4260 default:
4261 return I40E_ERR_PARAM;
4262 }
4263
4264 switch (settings->pe_cntx_num) {
4265 case I40E_DMA_CNTX_SIZE_512:
4266 case I40E_DMA_CNTX_SIZE_1K:
4267 case I40E_DMA_CNTX_SIZE_2K:
4268 case I40E_DMA_CNTX_SIZE_4K:
4269 case I40E_DMA_CNTX_SIZE_8K:
4270 case I40E_DMA_CNTX_SIZE_16K:
4271 case I40E_DMA_CNTX_SIZE_32K:
4272 case I40E_DMA_CNTX_SIZE_64K:
4273 case I40E_DMA_CNTX_SIZE_128K:
4274 case I40E_DMA_CNTX_SIZE_256K:
4275 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4276 pe_cntx_size <<= (u32)settings->pe_cntx_num;
4277 break;
4278 default:
4279 return I40E_ERR_PARAM;
4280 }
4281
4282 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4283 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4284 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4285 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4286 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4287 return I40E_ERR_INVALID_SIZE;
4288
4289 return 0;
4290}
4291
4292/**
4293 * i40e_set_filter_control
4294 * @hw: pointer to the hardware structure
4295 * @settings: Filter control settings
4296 *
4297 * Set the Queue Filters for PE/FCoE and enable filters required
4298 * for a single PF. It is expected that these settings are programmed
4299 * at the driver initialization time.
4300 **/
4301i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4302 struct i40e_filter_control_settings *settings)
4303{
4304 i40e_status ret = 0;
4305 u32 hash_lut_size = 0;
4306 u32 val;
4307
4308 if (!settings)
4309 return I40E_ERR_PARAM;
4310
4311 /* Validate the input settings */
4312 ret = i40e_validate_filter_settings(hw, settings);
4313 if (ret)
4314 return ret;
4315
4316 /* Read the PF Queue Filter control register */
4317 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4318
4319 /* Program required PE hash buckets for the PF */
4320 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4321 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4322 I40E_PFQF_CTL_0_PEHSIZE_MASK;
4323 /* Program required PE contexts for the PF */
4324 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4325 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4326 I40E_PFQF_CTL_0_PEDSIZE_MASK;
4327
4328 /* Program required FCoE hash buckets for the PF */
4329 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4330 val |= ((u32)settings->fcoe_filt_num <<
4331 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4332 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4333 /* Program required FCoE DDP contexts for the PF */
4334 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4335 val |= ((u32)settings->fcoe_cntx_num <<
4336 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4337 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4338
4339 /* Program Hash LUT size for the PF */
4340 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4341 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4342 hash_lut_size = 1;
4343 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4344 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4345
4346 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4347 if (settings->enable_fdir)
4348 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4349 if (settings->enable_ethtype)
4350 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4351 if (settings->enable_macvlan)
4352 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4353
4354 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4355
4356 return 0;
4357}
4358
4359/**
4360 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4361 * @hw: pointer to the hw struct
4362 * @mac_addr: MAC address to use in the filter
4363 * @ethtype: Ethertype to use in the filter
4364 * @flags: Flags that needs to be applied to the filter
4365 * @vsi_seid: seid of the control VSI
4366 * @queue: VSI queue number to send the packet to
4367 * @is_add: Add control packet filter if True else remove
4368 * @stats: Structure to hold information on control filter counts
4369 * @cmd_details: pointer to command details structure or NULL
4370 *
4371 * This command will Add or Remove control packet filter for a control VSI.
4372 * In return it will update the total number of perfect filter count in
4373 * the stats member.
4374 **/
4375i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4376 u8 *mac_addr, u16 ethtype, u16 flags,
4377 u16 vsi_seid, u16 queue, bool is_add,
4378 struct i40e_control_filter_stats *stats,
4379 struct i40e_asq_cmd_details *cmd_details)
4380{
4381 struct i40e_aq_desc desc;
4382 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4383 (struct i40e_aqc_add_remove_control_packet_filter *)
4384 &desc.params.raw;
4385 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4386 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4387 &desc.params.raw;
4388 i40e_status status;
4389
4390 if (vsi_seid == 0)
4391 return I40E_ERR_PARAM;
4392
4393 if (is_add) {
4394 i40e_fill_default_direct_cmd_desc(&desc,
4395 i40e_aqc_opc_add_control_packet_filter);
4396 cmd->queue = cpu_to_le16(queue);
4397 } else {
4398 i40e_fill_default_direct_cmd_desc(&desc,
4399 i40e_aqc_opc_remove_control_packet_filter);
4400 }
4401
4402 if (mac_addr)
4403 ether_addr_copy(cmd->mac, mac_addr);
4404
4405 cmd->etype = cpu_to_le16(ethtype);
4406 cmd->flags = cpu_to_le16(flags);
4407 cmd->seid = cpu_to_le16(vsi_seid);
4408
4409 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4410
4411 if (!status && stats) {
4412 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4413 stats->etype_used = le16_to_cpu(resp->etype_used);
4414 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4415 stats->etype_free = le16_to_cpu(resp->etype_free);
4416 }
4417
4418 return status;
4419}
4420
4421/**
4422 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4423 * @hw: pointer to the hw struct
4424 * @seid: VSI seid to add ethertype filter from
4425 **/
4426void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4427 u16 seid)
4428{
4429#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4430 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4431 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4432 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4433 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4434 i40e_status status;
4435
4436 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4437 seid, 0, true, NULL,
4438 NULL);
4439 if (status)
4440 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4441}
4442
4443/**
4444 * i40e_aq_alternate_read
4445 * @hw: pointer to the hardware structure
4446 * @reg_addr0: address of first dword to be read
4447 * @reg_val0: pointer for data read from 'reg_addr0'
4448 * @reg_addr1: address of second dword to be read
4449 * @reg_val1: pointer for data read from 'reg_addr1'
4450 *
4451 * Read one or two dwords from alternate structure. Fields are indicated
4452 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4453 * is not passed then only register at 'reg_addr0' is read.
4454 *
4455 **/
4456static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4457 u32 reg_addr0, u32 *reg_val0,
4458 u32 reg_addr1, u32 *reg_val1)
4459{
4460 struct i40e_aq_desc desc;
4461 struct i40e_aqc_alternate_write *cmd_resp =
4462 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4463 i40e_status status;
4464
4465 if (!reg_val0)
4466 return I40E_ERR_PARAM;
4467
4468 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4469 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4470 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4471
4472 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4473
4474 if (!status) {
4475 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4476
4477 if (reg_val1)
4478 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4479 }
4480
4481 return status;
4482}
4483
4484/**
4485 * i40e_aq_resume_port_tx
4486 * @hw: pointer to the hardware structure
4487 * @cmd_details: pointer to command details structure or NULL
4488 *
4489 * Resume port's Tx traffic
4490 **/
4491i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4492 struct i40e_asq_cmd_details *cmd_details)
4493{
4494 struct i40e_aq_desc desc;
4495 i40e_status status;
4496
4497 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4498
4499 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4500
4501 return status;
4502}
4503
4504/**
4505 * i40e_set_pci_config_data - store PCI bus info
4506 * @hw: pointer to hardware structure
4507 * @link_status: the link status word from PCI config space
4508 *
4509 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4510 **/
4511void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4512{
4513 hw->bus.type = i40e_bus_type_pci_express;
4514
4515 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4516 case PCI_EXP_LNKSTA_NLW_X1:
4517 hw->bus.width = i40e_bus_width_pcie_x1;
4518 break;
4519 case PCI_EXP_LNKSTA_NLW_X2:
4520 hw->bus.width = i40e_bus_width_pcie_x2;
4521 break;
4522 case PCI_EXP_LNKSTA_NLW_X4:
4523 hw->bus.width = i40e_bus_width_pcie_x4;
4524 break;
4525 case PCI_EXP_LNKSTA_NLW_X8:
4526 hw->bus.width = i40e_bus_width_pcie_x8;
4527 break;
4528 default:
4529 hw->bus.width = i40e_bus_width_unknown;
4530 break;
4531 }
4532
4533 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4534 case PCI_EXP_LNKSTA_CLS_2_5GB:
4535 hw->bus.speed = i40e_bus_speed_2500;
4536 break;
4537 case PCI_EXP_LNKSTA_CLS_5_0GB:
4538 hw->bus.speed = i40e_bus_speed_5000;
4539 break;
4540 case PCI_EXP_LNKSTA_CLS_8_0GB:
4541 hw->bus.speed = i40e_bus_speed_8000;
4542 break;
4543 default:
4544 hw->bus.speed = i40e_bus_speed_unknown;
4545 break;
4546 }
4547}
4548
4549/**
4550 * i40e_aq_debug_dump
4551 * @hw: pointer to the hardware structure
4552 * @cluster_id: specific cluster to dump
4553 * @table_id: table id within cluster
4554 * @start_index: index of line in the block to read
4555 * @buff_size: dump buffer size
4556 * @buff: dump buffer
4557 * @ret_buff_size: actual buffer size returned
4558 * @ret_next_table: next block to read
4559 * @ret_next_index: next index to read
4560 * @cmd_details: pointer to command details structure or NULL
4561 *
4562 * Dump internal FW/HW data for debug purposes.
4563 *
4564 **/
4565i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4566 u8 table_id, u32 start_index, u16 buff_size,
4567 void *buff, u16 *ret_buff_size,
4568 u8 *ret_next_table, u32 *ret_next_index,
4569 struct i40e_asq_cmd_details *cmd_details)
4570{
4571 struct i40e_aq_desc desc;
4572 struct i40e_aqc_debug_dump_internals *cmd =
4573 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4574 struct i40e_aqc_debug_dump_internals *resp =
4575 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4576 i40e_status status;
4577
4578 if (buff_size == 0 || !buff)
4579 return I40E_ERR_PARAM;
4580
4581 i40e_fill_default_direct_cmd_desc(&desc,
4582 i40e_aqc_opc_debug_dump_internals);
4583 /* Indirect Command */
4584 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4585 if (buff_size > I40E_AQ_LARGE_BUF)
4586 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4587
4588 cmd->cluster_id = cluster_id;
4589 cmd->table_id = table_id;
4590 cmd->idx = cpu_to_le32(start_index);
4591
4592 desc.datalen = cpu_to_le16(buff_size);
4593
4594 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4595 if (!status) {
4596 if (ret_buff_size)
4597 *ret_buff_size = le16_to_cpu(desc.datalen);
4598 if (ret_next_table)
4599 *ret_next_table = resp->table_id;
4600 if (ret_next_index)
4601 *ret_next_index = le32_to_cpu(resp->idx);
4602 }
4603
4604 return status;
4605}
4606
4607/**
4608 * i40e_read_bw_from_alt_ram
4609 * @hw: pointer to the hardware structure
4610 * @max_bw: pointer for max_bw read
4611 * @min_bw: pointer for min_bw read
4612 * @min_valid: pointer for bool that is true if min_bw is a valid value
4613 * @max_valid: pointer for bool that is true if max_bw is a valid value
4614 *
4615 * Read bw from the alternate ram for the given pf
4616 **/
4617i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4618 u32 *max_bw, u32 *min_bw,
4619 bool *min_valid, bool *max_valid)
4620{
4621 i40e_status status;
4622 u32 max_bw_addr, min_bw_addr;
4623
4624 /* Calculate the address of the min/max bw registers */
4625 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4626 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4627 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4628 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4629 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4630 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4631
4632 /* Read the bandwidths from alt ram */
4633 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4634 min_bw_addr, min_bw);
4635
4636 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4637 *min_valid = true;
4638 else
4639 *min_valid = false;
4640
4641 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4642 *max_valid = true;
4643 else
4644 *max_valid = false;
4645
4646 return status;
4647}
4648
4649/**
4650 * i40e_aq_configure_partition_bw
4651 * @hw: pointer to the hardware structure
4652 * @bw_data: Buffer holding valid pfs and bw limits
4653 * @cmd_details: pointer to command details
4654 *
4655 * Configure partitions guaranteed/max bw
4656 **/
4657i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4658 struct i40e_aqc_configure_partition_bw_data *bw_data,
4659 struct i40e_asq_cmd_details *cmd_details)
4660{
4661 i40e_status status;
4662 struct i40e_aq_desc desc;
4663 u16 bwd_size = sizeof(*bw_data);
4664
4665 i40e_fill_default_direct_cmd_desc(&desc,
4666 i40e_aqc_opc_configure_partition_bw);
4667
4668 /* Indirect command */
4669 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4670 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4671
4672 if (bwd_size > I40E_AQ_LARGE_BUF)
4673 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4674
4675 desc.datalen = cpu_to_le16(bwd_size);
4676
4677 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4678 cmd_details);
4679
4680 return status;
4681}
4682
4683/**
4684 * i40e_read_phy_register_clause22
4685 * @hw: pointer to the HW structure
4686 * @reg: register address in the page
4687 * @phy_addr: PHY address on MDIO interface
4688 * @value: PHY register value
4689 *
4690 * Reads specified PHY register value
4691 **/
4692i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4693 u16 reg, u8 phy_addr, u16 *value)
4694{
4695 i40e_status status = I40E_ERR_TIMEOUT;
4696 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4697 u32 command = 0;
4698 u16 retry = 1000;
4699
4700 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4701 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4702 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4703 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4704 (I40E_GLGEN_MSCA_MDICMD_MASK);
4705 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4706 do {
4707 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4708 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4709 status = 0;
4710 break;
4711 }
4712 udelay(10);
4713 retry--;
4714 } while (retry);
4715
4716 if (status) {
4717 i40e_debug(hw, I40E_DEBUG_PHY,
4718 "PHY: Can't write command to external PHY.\n");
4719 } else {
4720 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4721 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4722 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4723 }
4724
4725 return status;
4726}
4727
4728/**
4729 * i40e_write_phy_register_clause22
4730 * @hw: pointer to the HW structure
4731 * @reg: register address in the page
4732 * @phy_addr: PHY address on MDIO interface
4733 * @value: PHY register value
4734 *
4735 * Writes specified PHY register value
4736 **/
4737i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4738 u16 reg, u8 phy_addr, u16 value)
4739{
4740 i40e_status status = I40E_ERR_TIMEOUT;
4741 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4742 u32 command = 0;
4743 u16 retry = 1000;
4744
4745 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4746 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4747
4748 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4749 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4750 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4751 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4752 (I40E_GLGEN_MSCA_MDICMD_MASK);
4753
4754 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4755 do {
4756 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4757 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4758 status = 0;
4759 break;
4760 }
4761 udelay(10);
4762 retry--;
4763 } while (retry);
4764
4765 return status;
4766}
4767
4768/**
4769 * i40e_read_phy_register_clause45
4770 * @hw: pointer to the HW structure
4771 * @page: registers page number
4772 * @reg: register address in the page
4773 * @phy_addr: PHY address on MDIO interface
4774 * @value: PHY register value
4775 *
4776 * Reads specified PHY register value
4777 **/
4778i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4779 u8 page, u16 reg, u8 phy_addr, u16 *value)
4780{
4781 i40e_status status = I40E_ERR_TIMEOUT;
4782 u32 command = 0;
4783 u16 retry = 1000;
4784 u8 port_num = hw->func_caps.mdio_port_num;
4785
4786 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4787 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4788 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4789 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4790 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4791 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4792 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4793 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4794 do {
4795 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4796 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4797 status = 0;
4798 break;
4799 }
4800 usleep_range(10, 20);
4801 retry--;
4802 } while (retry);
4803
4804 if (status) {
4805 i40e_debug(hw, I40E_DEBUG_PHY,
4806 "PHY: Can't write command to external PHY.\n");
4807 goto phy_read_end;
4808 }
4809
4810 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4811 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4812 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4813 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4814 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4815 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4816 status = I40E_ERR_TIMEOUT;
4817 retry = 1000;
4818 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4819 do {
4820 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4821 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4822 status = 0;
4823 break;
4824 }
4825 usleep_range(10, 20);
4826 retry--;
4827 } while (retry);
4828
4829 if (!status) {
4830 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4831 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4832 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4833 } else {
4834 i40e_debug(hw, I40E_DEBUG_PHY,
4835 "PHY: Can't read register value from external PHY.\n");
4836 }
4837
4838phy_read_end:
4839 return status;
4840}
4841
4842/**
4843 * i40e_write_phy_register_clause45
4844 * @hw: pointer to the HW structure
4845 * @page: registers page number
4846 * @reg: register address in the page
4847 * @phy_addr: PHY address on MDIO interface
4848 * @value: PHY register value
4849 *
4850 * Writes value to specified PHY register
4851 **/
4852i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4853 u8 page, u16 reg, u8 phy_addr, u16 value)
4854{
4855 i40e_status status = I40E_ERR_TIMEOUT;
4856 u32 command = 0;
4857 u16 retry = 1000;
4858 u8 port_num = hw->func_caps.mdio_port_num;
4859
4860 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4861 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4862 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4863 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4864 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4865 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4866 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4867 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4868 do {
4869 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4870 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4871 status = 0;
4872 break;
4873 }
4874 usleep_range(10, 20);
4875 retry--;
4876 } while (retry);
4877 if (status) {
4878 i40e_debug(hw, I40E_DEBUG_PHY,
4879 "PHY: Can't write command to external PHY.\n");
4880 goto phy_write_end;
4881 }
4882
4883 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4884 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4885
4886 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4887 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4888 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4889 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4890 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4891 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4892 status = I40E_ERR_TIMEOUT;
4893 retry = 1000;
4894 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4895 do {
4896 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4897 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4898 status = 0;
4899 break;
4900 }
4901 usleep_range(10, 20);
4902 retry--;
4903 } while (retry);
4904
4905phy_write_end:
4906 return status;
4907}
4908
4909/**
4910 * i40e_write_phy_register
4911 * @hw: pointer to the HW structure
4912 * @page: registers page number
4913 * @reg: register address in the page
4914 * @phy_addr: PHY address on MDIO interface
4915 * @value: PHY register value
4916 *
4917 * Writes value to specified PHY register
4918 **/
4919i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4920 u8 page, u16 reg, u8 phy_addr, u16 value)
4921{
4922 i40e_status status;
4923
4924 switch (hw->device_id) {
4925 case I40E_DEV_ID_1G_BASE_T_X722:
4926 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4927 value);
4928 break;
4929 case I40E_DEV_ID_5G_BASE_T_BC:
4930 case I40E_DEV_ID_10G_BASE_T:
4931 case I40E_DEV_ID_10G_BASE_T4:
4932 case I40E_DEV_ID_10G_BASE_T_BC:
4933 case I40E_DEV_ID_10G_BASE_T_X722:
4934 case I40E_DEV_ID_25G_B:
4935 case I40E_DEV_ID_25G_SFP28:
4936 status = i40e_write_phy_register_clause45(hw, page, reg,
4937 phy_addr, value);
4938 break;
4939 default:
4940 status = I40E_ERR_UNKNOWN_PHY;
4941 break;
4942 }
4943
4944 return status;
4945}
4946
4947/**
4948 * i40e_read_phy_register
4949 * @hw: pointer to the HW structure
4950 * @page: registers page number
4951 * @reg: register address in the page
4952 * @phy_addr: PHY address on MDIO interface
4953 * @value: PHY register value
4954 *
4955 * Reads specified PHY register value
4956 **/
4957i40e_status i40e_read_phy_register(struct i40e_hw *hw,
4958 u8 page, u16 reg, u8 phy_addr, u16 *value)
4959{
4960 i40e_status status;
4961
4962 switch (hw->device_id) {
4963 case I40E_DEV_ID_1G_BASE_T_X722:
4964 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4965 value);
4966 break;
4967 case I40E_DEV_ID_5G_BASE_T_BC:
4968 case I40E_DEV_ID_10G_BASE_T:
4969 case I40E_DEV_ID_10G_BASE_T4:
4970 case I40E_DEV_ID_10G_BASE_T_BC:
4971 case I40E_DEV_ID_10G_BASE_T_X722:
4972 case I40E_DEV_ID_25G_B:
4973 case I40E_DEV_ID_25G_SFP28:
4974 status = i40e_read_phy_register_clause45(hw, page, reg,
4975 phy_addr, value);
4976 break;
4977 default:
4978 status = I40E_ERR_UNKNOWN_PHY;
4979 break;
4980 }
4981
4982 return status;
4983}
4984
4985/**
4986 * i40e_get_phy_address
4987 * @hw: pointer to the HW structure
4988 * @dev_num: PHY port num that address we want
4989 *
4990 * Gets PHY address for current port
4991 **/
4992u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4993{
4994 u8 port_num = hw->func_caps.mdio_port_num;
4995 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4996
4997 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4998}
4999
5000/**
5001 * i40e_blink_phy_led
5002 * @hw: pointer to the HW structure
5003 * @time: time how long led will blinks in secs
5004 * @interval: gap between LED on and off in msecs
5005 *
5006 * Blinks PHY link LED
5007 **/
5008i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
5009 u32 time, u32 interval)
5010{
5011 i40e_status status = 0;
5012 u32 i;
5013 u16 led_ctl;
5014 u16 gpio_led_port;
5015 u16 led_reg;
5016 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
5017 u8 phy_addr = 0;
5018 u8 port_num;
5019
5020 i = rd32(hw, I40E_PFGEN_PORTNUM);
5021 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5022 phy_addr = i40e_get_phy_address(hw, port_num);
5023
5024 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5025 led_addr++) {
5026 status = i40e_read_phy_register_clause45(hw,
5027 I40E_PHY_COM_REG_PAGE,
5028 led_addr, phy_addr,
5029 &led_reg);
5030 if (status)
5031 goto phy_blinking_end;
5032 led_ctl = led_reg;
5033 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5034 led_reg = 0;
5035 status = i40e_write_phy_register_clause45(hw,
5036 I40E_PHY_COM_REG_PAGE,
5037 led_addr, phy_addr,
5038 led_reg);
5039 if (status)
5040 goto phy_blinking_end;
5041 break;
5042 }
5043 }
5044
5045 if (time > 0 && interval > 0) {
5046 for (i = 0; i < time * 1000; i += interval) {
5047 status = i40e_read_phy_register_clause45(hw,
5048 I40E_PHY_COM_REG_PAGE,
5049 led_addr, phy_addr, &led_reg);
5050 if (status)
5051 goto restore_config;
5052 if (led_reg & I40E_PHY_LED_MANUAL_ON)
5053 led_reg = 0;
5054 else
5055 led_reg = I40E_PHY_LED_MANUAL_ON;
5056 status = i40e_write_phy_register_clause45(hw,
5057 I40E_PHY_COM_REG_PAGE,
5058 led_addr, phy_addr, led_reg);
5059 if (status)
5060 goto restore_config;
5061 msleep(interval);
5062 }
5063 }
5064
5065restore_config:
5066 status = i40e_write_phy_register_clause45(hw,
5067 I40E_PHY_COM_REG_PAGE,
5068 led_addr, phy_addr, led_ctl);
5069
5070phy_blinking_end:
5071 return status;
5072}
5073
5074/**
5075 * i40e_led_get_reg - read LED register
5076 * @hw: pointer to the HW structure
5077 * @led_addr: LED register address
5078 * @reg_val: read register value
5079 **/
5080static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5081 u32 *reg_val)
5082{
5083 enum i40e_status_code status;
5084 u8 phy_addr = 0;
5085 u8 port_num;
5086 u32 i;
5087
5088 *reg_val = 0;
5089 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5090 status =
5091 i40e_aq_get_phy_register(hw,
5092 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5093 I40E_PHY_COM_REG_PAGE, true,
5094 I40E_PHY_LED_PROV_REG_1,
5095 reg_val, NULL);
5096 } else {
5097 i = rd32(hw, I40E_PFGEN_PORTNUM);
5098 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5099 phy_addr = i40e_get_phy_address(hw, port_num);
5100 status = i40e_read_phy_register_clause45(hw,
5101 I40E_PHY_COM_REG_PAGE,
5102 led_addr, phy_addr,
5103 (u16 *)reg_val);
5104 }
5105 return status;
5106}
5107
5108/**
5109 * i40e_led_set_reg - write LED register
5110 * @hw: pointer to the HW structure
5111 * @led_addr: LED register address
5112 * @reg_val: register value to write
5113 **/
5114static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5115 u32 reg_val)
5116{
5117 enum i40e_status_code status;
5118 u8 phy_addr = 0;
5119 u8 port_num;
5120 u32 i;
5121
5122 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5123 status =
5124 i40e_aq_set_phy_register(hw,
5125 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5126 I40E_PHY_COM_REG_PAGE, true,
5127 I40E_PHY_LED_PROV_REG_1,
5128 reg_val, NULL);
5129 } else {
5130 i = rd32(hw, I40E_PFGEN_PORTNUM);
5131 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5132 phy_addr = i40e_get_phy_address(hw, port_num);
5133 status = i40e_write_phy_register_clause45(hw,
5134 I40E_PHY_COM_REG_PAGE,
5135 led_addr, phy_addr,
5136 (u16)reg_val);
5137 }
5138
5139 return status;
5140}
5141
5142/**
5143 * i40e_led_get_phy - return current on/off mode
5144 * @hw: pointer to the hw struct
5145 * @led_addr: address of led register to use
5146 * @val: original value of register to use
5147 *
5148 **/
5149i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5150 u16 *val)
5151{
5152 i40e_status status = 0;
5153 u16 gpio_led_port;
5154 u8 phy_addr = 0;
5155 u16 reg_val;
5156 u16 temp_addr;
5157 u8 port_num;
5158 u32 i;
5159 u32 reg_val_aq;
5160
5161 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5162 status =
5163 i40e_aq_get_phy_register(hw,
5164 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5165 I40E_PHY_COM_REG_PAGE, true,
5166 I40E_PHY_LED_PROV_REG_1,
5167 ®_val_aq, NULL);
5168 if (status == I40E_SUCCESS)
5169 *val = (u16)reg_val_aq;
5170 return status;
5171 }
5172 temp_addr = I40E_PHY_LED_PROV_REG_1;
5173 i = rd32(hw, I40E_PFGEN_PORTNUM);
5174 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5175 phy_addr = i40e_get_phy_address(hw, port_num);
5176
5177 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5178 temp_addr++) {
5179 status = i40e_read_phy_register_clause45(hw,
5180 I40E_PHY_COM_REG_PAGE,
5181 temp_addr, phy_addr,
5182 ®_val);
5183 if (status)
5184 return status;
5185 *val = reg_val;
5186 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5187 *led_addr = temp_addr;
5188 break;
5189 }
5190 }
5191 return status;
5192}
5193
5194/**
5195 * i40e_led_set_phy
5196 * @hw: pointer to the HW structure
5197 * @on: true or false
5198 * @led_addr: address of led register to use
5199 * @mode: original val plus bit for set or ignore
5200 *
5201 * Set led's on or off when controlled by the PHY
5202 *
5203 **/
5204i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5205 u16 led_addr, u32 mode)
5206{
5207 i40e_status status = 0;
5208 u32 led_ctl = 0;
5209 u32 led_reg = 0;
5210
5211 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5212 if (status)
5213 return status;
5214 led_ctl = led_reg;
5215 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5216 led_reg = 0;
5217 status = i40e_led_set_reg(hw, led_addr, led_reg);
5218 if (status)
5219 return status;
5220 }
5221 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5222 if (status)
5223 goto restore_config;
5224 if (on)
5225 led_reg = I40E_PHY_LED_MANUAL_ON;
5226 else
5227 led_reg = 0;
5228
5229 status = i40e_led_set_reg(hw, led_addr, led_reg);
5230 if (status)
5231 goto restore_config;
5232 if (mode & I40E_PHY_LED_MODE_ORIG) {
5233 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5234 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5235 }
5236 return status;
5237
5238restore_config:
5239 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5240 return status;
5241}
5242
5243/**
5244 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5245 * @hw: pointer to the hw struct
5246 * @reg_addr: register address
5247 * @reg_val: ptr to register value
5248 * @cmd_details: pointer to command details structure or NULL
5249 *
5250 * Use the firmware to read the Rx control register,
5251 * especially useful if the Rx unit is under heavy pressure
5252 **/
5253i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5254 u32 reg_addr, u32 *reg_val,
5255 struct i40e_asq_cmd_details *cmd_details)
5256{
5257 struct i40e_aq_desc desc;
5258 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5259 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5260 i40e_status status;
5261
5262 if (!reg_val)
5263 return I40E_ERR_PARAM;
5264
5265 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5266
5267 cmd_resp->address = cpu_to_le32(reg_addr);
5268
5269 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5270
5271 if (status == 0)
5272 *reg_val = le32_to_cpu(cmd_resp->value);
5273
5274 return status;
5275}
5276
5277/**
5278 * i40e_read_rx_ctl - read from an Rx control register
5279 * @hw: pointer to the hw struct
5280 * @reg_addr: register address
5281 **/
5282u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5283{
5284 i40e_status status = 0;
5285 bool use_register;
5286 int retry = 5;
5287 u32 val = 0;
5288
5289 use_register = (((hw->aq.api_maj_ver == 1) &&
5290 (hw->aq.api_min_ver < 5)) ||
5291 (hw->mac.type == I40E_MAC_X722));
5292 if (!use_register) {
5293do_retry:
5294 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5295 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5296 usleep_range(1000, 2000);
5297 retry--;
5298 goto do_retry;
5299 }
5300 }
5301
5302 /* if the AQ access failed, try the old-fashioned way */
5303 if (status || use_register)
5304 val = rd32(hw, reg_addr);
5305
5306 return val;
5307}
5308
5309/**
5310 * i40e_aq_rx_ctl_write_register
5311 * @hw: pointer to the hw struct
5312 * @reg_addr: register address
5313 * @reg_val: register value
5314 * @cmd_details: pointer to command details structure or NULL
5315 *
5316 * Use the firmware to write to an Rx control register,
5317 * especially useful if the Rx unit is under heavy pressure
5318 **/
5319i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5320 u32 reg_addr, u32 reg_val,
5321 struct i40e_asq_cmd_details *cmd_details)
5322{
5323 struct i40e_aq_desc desc;
5324 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5325 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5326 i40e_status status;
5327
5328 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5329
5330 cmd->address = cpu_to_le32(reg_addr);
5331 cmd->value = cpu_to_le32(reg_val);
5332
5333 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5334
5335 return status;
5336}
5337
5338/**
5339 * i40e_write_rx_ctl - write to an Rx control register
5340 * @hw: pointer to the hw struct
5341 * @reg_addr: register address
5342 * @reg_val: register value
5343 **/
5344void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5345{
5346 i40e_status status = 0;
5347 bool use_register;
5348 int retry = 5;
5349
5350 use_register = (((hw->aq.api_maj_ver == 1) &&
5351 (hw->aq.api_min_ver < 5)) ||
5352 (hw->mac.type == I40E_MAC_X722));
5353 if (!use_register) {
5354do_retry:
5355 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5356 reg_val, NULL);
5357 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5358 usleep_range(1000, 2000);
5359 retry--;
5360 goto do_retry;
5361 }
5362 }
5363
5364 /* if the AQ access failed, try the old-fashioned way */
5365 if (status || use_register)
5366 wr32(hw, reg_addr, reg_val);
5367}
5368
5369/**
5370 * i40e_mdio_if_number_selection - MDIO I/F number selection
5371 * @hw: pointer to the hw struct
5372 * @set_mdio: use MDIO I/F number specified by mdio_num
5373 * @mdio_num: MDIO I/F number
5374 * @cmd: pointer to PHY Register command structure
5375 **/
5376static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5377 u8 mdio_num,
5378 struct i40e_aqc_phy_register_access *cmd)
5379{
5380 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5381 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5382 cmd->cmd_flags |=
5383 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5384 ((mdio_num <<
5385 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5386 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5387 else
5388 i40e_debug(hw, I40E_DEBUG_PHY,
5389 "MDIO I/F number selection not supported by current FW version.\n");
5390 }
5391}
5392
5393/**
5394 * i40e_aq_set_phy_register_ext
5395 * @hw: pointer to the hw struct
5396 * @phy_select: select which phy should be accessed
5397 * @dev_addr: PHY device address
5398 * @set_mdio: use MDIO I/F number specified by mdio_num
5399 * @mdio_num: MDIO I/F number
5400 * @reg_addr: PHY register address
5401 * @reg_val: new register value
5402 * @cmd_details: pointer to command details structure or NULL
5403 *
5404 * Write the external PHY register.
5405 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5406 * may use simple wrapper i40e_aq_set_phy_register.
5407 **/
5408enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5409 u8 phy_select, u8 dev_addr, bool page_change,
5410 bool set_mdio, u8 mdio_num,
5411 u32 reg_addr, u32 reg_val,
5412 struct i40e_asq_cmd_details *cmd_details)
5413{
5414 struct i40e_aq_desc desc;
5415 struct i40e_aqc_phy_register_access *cmd =
5416 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5417 i40e_status status;
5418
5419 i40e_fill_default_direct_cmd_desc(&desc,
5420 i40e_aqc_opc_set_phy_register);
5421
5422 cmd->phy_interface = phy_select;
5423 cmd->dev_address = dev_addr;
5424 cmd->reg_address = cpu_to_le32(reg_addr);
5425 cmd->reg_value = cpu_to_le32(reg_val);
5426
5427 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5428
5429 if (!page_change)
5430 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5431
5432 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5433
5434 return status;
5435}
5436
5437/**
5438 * i40e_aq_get_phy_register_ext
5439 * @hw: pointer to the hw struct
5440 * @phy_select: select which phy should be accessed
5441 * @dev_addr: PHY device address
5442 * @set_mdio: use MDIO I/F number specified by mdio_num
5443 * @mdio_num: MDIO I/F number
5444 * @reg_addr: PHY register address
5445 * @reg_val: read register value
5446 * @cmd_details: pointer to command details structure or NULL
5447 *
5448 * Read the external PHY register.
5449 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5450 * may use simple wrapper i40e_aq_get_phy_register.
5451 **/
5452enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5453 u8 phy_select, u8 dev_addr, bool page_change,
5454 bool set_mdio, u8 mdio_num,
5455 u32 reg_addr, u32 *reg_val,
5456 struct i40e_asq_cmd_details *cmd_details)
5457{
5458 struct i40e_aq_desc desc;
5459 struct i40e_aqc_phy_register_access *cmd =
5460 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5461 i40e_status status;
5462
5463 i40e_fill_default_direct_cmd_desc(&desc,
5464 i40e_aqc_opc_get_phy_register);
5465
5466 cmd->phy_interface = phy_select;
5467 cmd->dev_address = dev_addr;
5468 cmd->reg_address = cpu_to_le32(reg_addr);
5469
5470 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5471
5472 if (!page_change)
5473 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5474
5475 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5476 if (!status)
5477 *reg_val = le32_to_cpu(cmd->reg_value);
5478
5479 return status;
5480}
5481
5482/**
5483 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5484 * @hw: pointer to the hw struct
5485 * @buff: command buffer (size in bytes = buff_size)
5486 * @buff_size: buffer size in bytes
5487 * @track_id: package tracking id
5488 * @error_offset: returns error offset
5489 * @error_info: returns error information
5490 * @cmd_details: pointer to command details structure or NULL
5491 **/
5492enum
5493i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5494 u16 buff_size, u32 track_id,
5495 u32 *error_offset, u32 *error_info,
5496 struct i40e_asq_cmd_details *cmd_details)
5497{
5498 struct i40e_aq_desc desc;
5499 struct i40e_aqc_write_personalization_profile *cmd =
5500 (struct i40e_aqc_write_personalization_profile *)
5501 &desc.params.raw;
5502 struct i40e_aqc_write_ddp_resp *resp;
5503 i40e_status status;
5504
5505 i40e_fill_default_direct_cmd_desc(&desc,
5506 i40e_aqc_opc_write_personalization_profile);
5507
5508 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5509 if (buff_size > I40E_AQ_LARGE_BUF)
5510 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5511
5512 desc.datalen = cpu_to_le16(buff_size);
5513
5514 cmd->profile_track_id = cpu_to_le32(track_id);
5515
5516 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5517 if (!status) {
5518 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5519 if (error_offset)
5520 *error_offset = le32_to_cpu(resp->error_offset);
5521 if (error_info)
5522 *error_info = le32_to_cpu(resp->error_info);
5523 }
5524
5525 return status;
5526}
5527
5528/**
5529 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5530 * @hw: pointer to the hw struct
5531 * @buff: command buffer (size in bytes = buff_size)
5532 * @buff_size: buffer size in bytes
5533 * @flags: AdminQ command flags
5534 * @cmd_details: pointer to command details structure or NULL
5535 **/
5536enum
5537i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5538 u16 buff_size, u8 flags,
5539 struct i40e_asq_cmd_details *cmd_details)
5540{
5541 struct i40e_aq_desc desc;
5542 struct i40e_aqc_get_applied_profiles *cmd =
5543 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5544 i40e_status status;
5545
5546 i40e_fill_default_direct_cmd_desc(&desc,
5547 i40e_aqc_opc_get_personalization_profile_list);
5548
5549 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5550 if (buff_size > I40E_AQ_LARGE_BUF)
5551 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5552 desc.datalen = cpu_to_le16(buff_size);
5553
5554 cmd->flags = flags;
5555
5556 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5557
5558 return status;
5559}
5560
5561/**
5562 * i40e_find_segment_in_package
5563 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5564 * @pkg_hdr: pointer to the package header to be searched
5565 *
5566 * This function searches a package file for a particular segment type. On
5567 * success it returns a pointer to the segment header, otherwise it will
5568 * return NULL.
5569 **/
5570struct i40e_generic_seg_header *
5571i40e_find_segment_in_package(u32 segment_type,
5572 struct i40e_package_header *pkg_hdr)
5573{
5574 struct i40e_generic_seg_header *segment;
5575 u32 i;
5576
5577 /* Search all package segments for the requested segment type */
5578 for (i = 0; i < pkg_hdr->segment_count; i++) {
5579 segment =
5580 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5581 pkg_hdr->segment_offset[i]);
5582
5583 if (segment->type == segment_type)
5584 return segment;
5585 }
5586
5587 return NULL;
5588}
5589
5590/* Get section table in profile */
5591#define I40E_SECTION_TABLE(profile, sec_tbl) \
5592 do { \
5593 struct i40e_profile_segment *p = (profile); \
5594 u32 count; \
5595 u32 *nvm; \
5596 count = p->device_table_count; \
5597 nvm = (u32 *)&p->device_table[count]; \
5598 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5599 } while (0)
5600
5601/* Get section header in profile */
5602#define I40E_SECTION_HEADER(profile, offset) \
5603 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5604
5605/**
5606 * i40e_find_section_in_profile
5607 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5608 * @profile: pointer to the i40e segment header to be searched
5609 *
5610 * This function searches i40e segment for a particular section type. On
5611 * success it returns a pointer to the section header, otherwise it will
5612 * return NULL.
5613 **/
5614struct i40e_profile_section_header *
5615i40e_find_section_in_profile(u32 section_type,
5616 struct i40e_profile_segment *profile)
5617{
5618 struct i40e_profile_section_header *sec;
5619 struct i40e_section_table *sec_tbl;
5620 u32 sec_off;
5621 u32 i;
5622
5623 if (profile->header.type != SEGMENT_TYPE_I40E)
5624 return NULL;
5625
5626 I40E_SECTION_TABLE(profile, sec_tbl);
5627
5628 for (i = 0; i < sec_tbl->section_count; i++) {
5629 sec_off = sec_tbl->section_offset[i];
5630 sec = I40E_SECTION_HEADER(profile, sec_off);
5631 if (sec->section.type == section_type)
5632 return sec;
5633 }
5634
5635 return NULL;
5636}
5637
5638/**
5639 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5640 * @hw: pointer to the hw struct
5641 * @aq: command buffer containing all data to execute AQ
5642 **/
5643static enum
5644i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5645 struct i40e_profile_aq_section *aq)
5646{
5647 i40e_status status;
5648 struct i40e_aq_desc desc;
5649 u8 *msg = NULL;
5650 u16 msglen;
5651
5652 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5653 desc.flags |= cpu_to_le16(aq->flags);
5654 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5655
5656 msglen = aq->datalen;
5657 if (msglen) {
5658 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5659 I40E_AQ_FLAG_RD));
5660 if (msglen > I40E_AQ_LARGE_BUF)
5661 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5662 desc.datalen = cpu_to_le16(msglen);
5663 msg = &aq->data[0];
5664 }
5665
5666 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5667
5668 if (status) {
5669 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5670 "unable to exec DDP AQ opcode %u, error %d\n",
5671 aq->opcode, status);
5672 return status;
5673 }
5674
5675 /* copy returned desc to aq_buf */
5676 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5677
5678 return 0;
5679}
5680
5681/**
5682 * i40e_validate_profile
5683 * @hw: pointer to the hardware structure
5684 * @profile: pointer to the profile segment of the package to be validated
5685 * @track_id: package tracking id
5686 * @rollback: flag if the profile is for rollback.
5687 *
5688 * Validates supported devices and profile's sections.
5689 */
5690static enum i40e_status_code
5691i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5692 u32 track_id, bool rollback)
5693{
5694 struct i40e_profile_section_header *sec = NULL;
5695 i40e_status status = 0;
5696 struct i40e_section_table *sec_tbl;
5697 u32 vendor_dev_id;
5698 u32 dev_cnt;
5699 u32 sec_off;
5700 u32 i;
5701
5702 if (track_id == I40E_DDP_TRACKID_INVALID) {
5703 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5704 return I40E_NOT_SUPPORTED;
5705 }
5706
5707 dev_cnt = profile->device_table_count;
5708 for (i = 0; i < dev_cnt; i++) {
5709 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5710 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5711 hw->device_id == (vendor_dev_id & 0xFFFF))
5712 break;
5713 }
5714 if (dev_cnt && i == dev_cnt) {
5715 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5716 "Device doesn't support DDP\n");
5717 return I40E_ERR_DEVICE_NOT_SUPPORTED;
5718 }
5719
5720 I40E_SECTION_TABLE(profile, sec_tbl);
5721
5722 /* Validate sections types */
5723 for (i = 0; i < sec_tbl->section_count; i++) {
5724 sec_off = sec_tbl->section_offset[i];
5725 sec = I40E_SECTION_HEADER(profile, sec_off);
5726 if (rollback) {
5727 if (sec->section.type == SECTION_TYPE_MMIO ||
5728 sec->section.type == SECTION_TYPE_AQ ||
5729 sec->section.type == SECTION_TYPE_RB_AQ) {
5730 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5731 "Not a roll-back package\n");
5732 return I40E_NOT_SUPPORTED;
5733 }
5734 } else {
5735 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5736 sec->section.type == SECTION_TYPE_RB_MMIO) {
5737 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5738 "Not an original package\n");
5739 return I40E_NOT_SUPPORTED;
5740 }
5741 }
5742 }
5743
5744 return status;
5745}
5746
5747/**
5748 * i40e_write_profile
5749 * @hw: pointer to the hardware structure
5750 * @profile: pointer to the profile segment of the package to be downloaded
5751 * @track_id: package tracking id
5752 *
5753 * Handles the download of a complete package.
5754 */
5755enum i40e_status_code
5756i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5757 u32 track_id)
5758{
5759 i40e_status status = 0;
5760 struct i40e_section_table *sec_tbl;
5761 struct i40e_profile_section_header *sec = NULL;
5762 struct i40e_profile_aq_section *ddp_aq;
5763 u32 section_size = 0;
5764 u32 offset = 0, info = 0;
5765 u32 sec_off;
5766 u32 i;
5767
5768 status = i40e_validate_profile(hw, profile, track_id, false);
5769 if (status)
5770 return status;
5771
5772 I40E_SECTION_TABLE(profile, sec_tbl);
5773
5774 for (i = 0; i < sec_tbl->section_count; i++) {
5775 sec_off = sec_tbl->section_offset[i];
5776 sec = I40E_SECTION_HEADER(profile, sec_off);
5777 /* Process generic admin command */
5778 if (sec->section.type == SECTION_TYPE_AQ) {
5779 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5780 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5781 if (status) {
5782 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5783 "Failed to execute aq: section %d, opcode %u\n",
5784 i, ddp_aq->opcode);
5785 break;
5786 }
5787 sec->section.type = SECTION_TYPE_RB_AQ;
5788 }
5789
5790 /* Skip any non-mmio sections */
5791 if (sec->section.type != SECTION_TYPE_MMIO)
5792 continue;
5793
5794 section_size = sec->section.size +
5795 sizeof(struct i40e_profile_section_header);
5796
5797 /* Write MMIO section */
5798 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5799 track_id, &offset, &info, NULL);
5800 if (status) {
5801 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5802 "Failed to write profile: section %d, offset %d, info %d\n",
5803 i, offset, info);
5804 break;
5805 }
5806 }
5807 return status;
5808}
5809
5810/**
5811 * i40e_rollback_profile
5812 * @hw: pointer to the hardware structure
5813 * @profile: pointer to the profile segment of the package to be removed
5814 * @track_id: package tracking id
5815 *
5816 * Rolls back previously loaded package.
5817 */
5818enum i40e_status_code
5819i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5820 u32 track_id)
5821{
5822 struct i40e_profile_section_header *sec = NULL;
5823 i40e_status status = 0;
5824 struct i40e_section_table *sec_tbl;
5825 u32 offset = 0, info = 0;
5826 u32 section_size = 0;
5827 u32 sec_off;
5828 int i;
5829
5830 status = i40e_validate_profile(hw, profile, track_id, true);
5831 if (status)
5832 return status;
5833
5834 I40E_SECTION_TABLE(profile, sec_tbl);
5835
5836 /* For rollback write sections in reverse */
5837 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5838 sec_off = sec_tbl->section_offset[i];
5839 sec = I40E_SECTION_HEADER(profile, sec_off);
5840
5841 /* Skip any non-rollback sections */
5842 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5843 continue;
5844
5845 section_size = sec->section.size +
5846 sizeof(struct i40e_profile_section_header);
5847
5848 /* Write roll-back MMIO section */
5849 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5850 track_id, &offset, &info, NULL);
5851 if (status) {
5852 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5853 "Failed to write profile: section %d, offset %d, info %d\n",
5854 i, offset, info);
5855 break;
5856 }
5857 }
5858 return status;
5859}
5860
5861/**
5862 * i40e_add_pinfo_to_list
5863 * @hw: pointer to the hardware structure
5864 * @profile: pointer to the profile segment of the package
5865 * @profile_info_sec: buffer for information section
5866 * @track_id: package tracking id
5867 *
5868 * Register a profile to the list of loaded profiles.
5869 */
5870enum i40e_status_code
5871i40e_add_pinfo_to_list(struct i40e_hw *hw,
5872 struct i40e_profile_segment *profile,
5873 u8 *profile_info_sec, u32 track_id)
5874{
5875 i40e_status status = 0;
5876 struct i40e_profile_section_header *sec = NULL;
5877 struct i40e_profile_info *pinfo;
5878 u32 offset = 0, info = 0;
5879
5880 sec = (struct i40e_profile_section_header *)profile_info_sec;
5881 sec->tbl_size = 1;
5882 sec->data_end = sizeof(struct i40e_profile_section_header) +
5883 sizeof(struct i40e_profile_info);
5884 sec->section.type = SECTION_TYPE_INFO;
5885 sec->section.offset = sizeof(struct i40e_profile_section_header);
5886 sec->section.size = sizeof(struct i40e_profile_info);
5887 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5888 sec->section.offset);
5889 pinfo->track_id = track_id;
5890 pinfo->version = profile->version;
5891 pinfo->op = I40E_DDP_ADD_TRACKID;
5892 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5893
5894 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5895 track_id, &offset, &info, NULL);
5896
5897 return status;
5898}
5899
5900/**
5901 * i40e_aq_add_cloud_filters
5902 * @hw: pointer to the hardware structure
5903 * @seid: VSI seid to add cloud filters from
5904 * @filters: Buffer which contains the filters to be added
5905 * @filter_count: number of filters contained in the buffer
5906 *
5907 * Set the cloud filters for a given VSI. The contents of the
5908 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5909 * of the function.
5910 *
5911 **/
5912enum i40e_status_code
5913i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5914 struct i40e_aqc_cloud_filters_element_data *filters,
5915 u8 filter_count)
5916{
5917 struct i40e_aq_desc desc;
5918 struct i40e_aqc_add_remove_cloud_filters *cmd =
5919 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5920 enum i40e_status_code status;
5921 u16 buff_len;
5922
5923 i40e_fill_default_direct_cmd_desc(&desc,
5924 i40e_aqc_opc_add_cloud_filters);
5925
5926 buff_len = filter_count * sizeof(*filters);
5927 desc.datalen = cpu_to_le16(buff_len);
5928 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5929 cmd->num_filters = filter_count;
5930 cmd->seid = cpu_to_le16(seid);
5931
5932 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5933
5934 return status;
5935}
5936
5937/**
5938 * i40e_aq_add_cloud_filters_bb
5939 * @hw: pointer to the hardware structure
5940 * @seid: VSI seid to add cloud filters from
5941 * @filters: Buffer which contains the filters in big buffer to be added
5942 * @filter_count: number of filters contained in the buffer
5943 *
5944 * Set the big buffer cloud filters for a given VSI. The contents of the
5945 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5946 * function.
5947 *
5948 **/
5949enum i40e_status_code
5950i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5951 struct i40e_aqc_cloud_filters_element_bb *filters,
5952 u8 filter_count)
5953{
5954 struct i40e_aq_desc desc;
5955 struct i40e_aqc_add_remove_cloud_filters *cmd =
5956 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5957 i40e_status status;
5958 u16 buff_len;
5959 int i;
5960
5961 i40e_fill_default_direct_cmd_desc(&desc,
5962 i40e_aqc_opc_add_cloud_filters);
5963
5964 buff_len = filter_count * sizeof(*filters);
5965 desc.datalen = cpu_to_le16(buff_len);
5966 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5967 cmd->num_filters = filter_count;
5968 cmd->seid = cpu_to_le16(seid);
5969 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5970
5971 for (i = 0; i < filter_count; i++) {
5972 u16 tnl_type;
5973 u32 ti;
5974
5975 tnl_type = (le16_to_cpu(filters[i].element.flags) &
5976 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5977 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5978
5979 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5980 * one more byte further than normally used for Tenant ID in
5981 * other tunnel types.
5982 */
5983 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5984 ti = le32_to_cpu(filters[i].element.tenant_id);
5985 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5986 }
5987 }
5988
5989 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5990
5991 return status;
5992}
5993
5994/**
5995 * i40e_aq_rem_cloud_filters
5996 * @hw: pointer to the hardware structure
5997 * @seid: VSI seid to remove cloud filters from
5998 * @filters: Buffer which contains the filters to be removed
5999 * @filter_count: number of filters contained in the buffer
6000 *
6001 * Remove the cloud filters for a given VSI. The contents of the
6002 * i40e_aqc_cloud_filters_element_data are filled in by the caller
6003 * of the function.
6004 *
6005 **/
6006enum i40e_status_code
6007i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
6008 struct i40e_aqc_cloud_filters_element_data *filters,
6009 u8 filter_count)
6010{
6011 struct i40e_aq_desc desc;
6012 struct i40e_aqc_add_remove_cloud_filters *cmd =
6013 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6014 enum i40e_status_code status;
6015 u16 buff_len;
6016
6017 i40e_fill_default_direct_cmd_desc(&desc,
6018 i40e_aqc_opc_remove_cloud_filters);
6019
6020 buff_len = filter_count * sizeof(*filters);
6021 desc.datalen = cpu_to_le16(buff_len);
6022 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6023 cmd->num_filters = filter_count;
6024 cmd->seid = cpu_to_le16(seid);
6025
6026 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6027
6028 return status;
6029}
6030
6031/**
6032 * i40e_aq_rem_cloud_filters_bb
6033 * @hw: pointer to the hardware structure
6034 * @seid: VSI seid to remove cloud filters from
6035 * @filters: Buffer which contains the filters in big buffer to be removed
6036 * @filter_count: number of filters contained in the buffer
6037 *
6038 * Remove the big buffer cloud filters for a given VSI. The contents of the
6039 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6040 * function.
6041 *
6042 **/
6043enum i40e_status_code
6044i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6045 struct i40e_aqc_cloud_filters_element_bb *filters,
6046 u8 filter_count)
6047{
6048 struct i40e_aq_desc desc;
6049 struct i40e_aqc_add_remove_cloud_filters *cmd =
6050 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6051 i40e_status status;
6052 u16 buff_len;
6053 int i;
6054
6055 i40e_fill_default_direct_cmd_desc(&desc,
6056 i40e_aqc_opc_remove_cloud_filters);
6057
6058 buff_len = filter_count * sizeof(*filters);
6059 desc.datalen = cpu_to_le16(buff_len);
6060 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6061 cmd->num_filters = filter_count;
6062 cmd->seid = cpu_to_le16(seid);
6063 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6064
6065 for (i = 0; i < filter_count; i++) {
6066 u16 tnl_type;
6067 u32 ti;
6068
6069 tnl_type = (le16_to_cpu(filters[i].element.flags) &
6070 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6071 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6072
6073 /* Due to hardware eccentricities, the VNI for Geneve is shifted
6074 * one more byte further than normally used for Tenant ID in
6075 * other tunnel types.
6076 */
6077 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6078 ti = le32_to_cpu(filters[i].element.tenant_id);
6079 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6080 }
6081 }
6082
6083 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6084
6085 return status;
6086}