Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#include "i40e.h"
5#include "i40e_type.h"
6#include "i40e_adminq.h"
7#include "i40e_prototype.h"
8#include <linux/avf/virtchnl.h>
9
10/**
11 * i40e_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure
13 *
14 * This function sets the mac type of the adapter based on the
15 * vendor ID and device ID stored in the hw structure.
16 **/
17i40e_status i40e_set_mac_type(struct i40e_hw *hw)
18{
19 i40e_status status = 0;
20
21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
22 switch (hw->device_id) {
23 case I40E_DEV_ID_SFP_XL710:
24 case I40E_DEV_ID_QEMU:
25 case I40E_DEV_ID_KX_B:
26 case I40E_DEV_ID_KX_C:
27 case I40E_DEV_ID_QSFP_A:
28 case I40E_DEV_ID_QSFP_B:
29 case I40E_DEV_ID_QSFP_C:
30 case I40E_DEV_ID_10G_BASE_T:
31 case I40E_DEV_ID_10G_BASE_T4:
32 case I40E_DEV_ID_10G_B:
33 case I40E_DEV_ID_10G_SFP:
34 case I40E_DEV_ID_20G_KR2:
35 case I40E_DEV_ID_20G_KR2_A:
36 case I40E_DEV_ID_25G_B:
37 case I40E_DEV_ID_25G_SFP28:
38 case I40E_DEV_ID_X710_N3000:
39 case I40E_DEV_ID_XXV710_N3000:
40 hw->mac.type = I40E_MAC_XL710;
41 break;
42 case I40E_DEV_ID_KX_X722:
43 case I40E_DEV_ID_QSFP_X722:
44 case I40E_DEV_ID_SFP_X722:
45 case I40E_DEV_ID_1G_BASE_T_X722:
46 case I40E_DEV_ID_10G_BASE_T_X722:
47 case I40E_DEV_ID_SFP_I_X722:
48 hw->mac.type = I40E_MAC_X722;
49 break;
50 default:
51 hw->mac.type = I40E_MAC_GENERIC;
52 break;
53 }
54 } else {
55 status = I40E_ERR_DEVICE_NOT_SUPPORTED;
56 }
57
58 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
59 hw->mac.type, status);
60 return status;
61}
62
63/**
64 * i40e_aq_str - convert AQ err code to a string
65 * @hw: pointer to the HW structure
66 * @aq_err: the AQ error code to convert
67 **/
68const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
69{
70 switch (aq_err) {
71 case I40E_AQ_RC_OK:
72 return "OK";
73 case I40E_AQ_RC_EPERM:
74 return "I40E_AQ_RC_EPERM";
75 case I40E_AQ_RC_ENOENT:
76 return "I40E_AQ_RC_ENOENT";
77 case I40E_AQ_RC_ESRCH:
78 return "I40E_AQ_RC_ESRCH";
79 case I40E_AQ_RC_EINTR:
80 return "I40E_AQ_RC_EINTR";
81 case I40E_AQ_RC_EIO:
82 return "I40E_AQ_RC_EIO";
83 case I40E_AQ_RC_ENXIO:
84 return "I40E_AQ_RC_ENXIO";
85 case I40E_AQ_RC_E2BIG:
86 return "I40E_AQ_RC_E2BIG";
87 case I40E_AQ_RC_EAGAIN:
88 return "I40E_AQ_RC_EAGAIN";
89 case I40E_AQ_RC_ENOMEM:
90 return "I40E_AQ_RC_ENOMEM";
91 case I40E_AQ_RC_EACCES:
92 return "I40E_AQ_RC_EACCES";
93 case I40E_AQ_RC_EFAULT:
94 return "I40E_AQ_RC_EFAULT";
95 case I40E_AQ_RC_EBUSY:
96 return "I40E_AQ_RC_EBUSY";
97 case I40E_AQ_RC_EEXIST:
98 return "I40E_AQ_RC_EEXIST";
99 case I40E_AQ_RC_EINVAL:
100 return "I40E_AQ_RC_EINVAL";
101 case I40E_AQ_RC_ENOTTY:
102 return "I40E_AQ_RC_ENOTTY";
103 case I40E_AQ_RC_ENOSPC:
104 return "I40E_AQ_RC_ENOSPC";
105 case I40E_AQ_RC_ENOSYS:
106 return "I40E_AQ_RC_ENOSYS";
107 case I40E_AQ_RC_ERANGE:
108 return "I40E_AQ_RC_ERANGE";
109 case I40E_AQ_RC_EFLUSHED:
110 return "I40E_AQ_RC_EFLUSHED";
111 case I40E_AQ_RC_BAD_ADDR:
112 return "I40E_AQ_RC_BAD_ADDR";
113 case I40E_AQ_RC_EMODE:
114 return "I40E_AQ_RC_EMODE";
115 case I40E_AQ_RC_EFBIG:
116 return "I40E_AQ_RC_EFBIG";
117 }
118
119 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
120 return hw->err_str;
121}
122
123/**
124 * i40e_stat_str - convert status err code to a string
125 * @hw: pointer to the HW structure
126 * @stat_err: the status error code to convert
127 **/
128const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
129{
130 switch (stat_err) {
131 case 0:
132 return "OK";
133 case I40E_ERR_NVM:
134 return "I40E_ERR_NVM";
135 case I40E_ERR_NVM_CHECKSUM:
136 return "I40E_ERR_NVM_CHECKSUM";
137 case I40E_ERR_PHY:
138 return "I40E_ERR_PHY";
139 case I40E_ERR_CONFIG:
140 return "I40E_ERR_CONFIG";
141 case I40E_ERR_PARAM:
142 return "I40E_ERR_PARAM";
143 case I40E_ERR_MAC_TYPE:
144 return "I40E_ERR_MAC_TYPE";
145 case I40E_ERR_UNKNOWN_PHY:
146 return "I40E_ERR_UNKNOWN_PHY";
147 case I40E_ERR_LINK_SETUP:
148 return "I40E_ERR_LINK_SETUP";
149 case I40E_ERR_ADAPTER_STOPPED:
150 return "I40E_ERR_ADAPTER_STOPPED";
151 case I40E_ERR_INVALID_MAC_ADDR:
152 return "I40E_ERR_INVALID_MAC_ADDR";
153 case I40E_ERR_DEVICE_NOT_SUPPORTED:
154 return "I40E_ERR_DEVICE_NOT_SUPPORTED";
155 case I40E_ERR_MASTER_REQUESTS_PENDING:
156 return "I40E_ERR_MASTER_REQUESTS_PENDING";
157 case I40E_ERR_INVALID_LINK_SETTINGS:
158 return "I40E_ERR_INVALID_LINK_SETTINGS";
159 case I40E_ERR_AUTONEG_NOT_COMPLETE:
160 return "I40E_ERR_AUTONEG_NOT_COMPLETE";
161 case I40E_ERR_RESET_FAILED:
162 return "I40E_ERR_RESET_FAILED";
163 case I40E_ERR_SWFW_SYNC:
164 return "I40E_ERR_SWFW_SYNC";
165 case I40E_ERR_NO_AVAILABLE_VSI:
166 return "I40E_ERR_NO_AVAILABLE_VSI";
167 case I40E_ERR_NO_MEMORY:
168 return "I40E_ERR_NO_MEMORY";
169 case I40E_ERR_BAD_PTR:
170 return "I40E_ERR_BAD_PTR";
171 case I40E_ERR_RING_FULL:
172 return "I40E_ERR_RING_FULL";
173 case I40E_ERR_INVALID_PD_ID:
174 return "I40E_ERR_INVALID_PD_ID";
175 case I40E_ERR_INVALID_QP_ID:
176 return "I40E_ERR_INVALID_QP_ID";
177 case I40E_ERR_INVALID_CQ_ID:
178 return "I40E_ERR_INVALID_CQ_ID";
179 case I40E_ERR_INVALID_CEQ_ID:
180 return "I40E_ERR_INVALID_CEQ_ID";
181 case I40E_ERR_INVALID_AEQ_ID:
182 return "I40E_ERR_INVALID_AEQ_ID";
183 case I40E_ERR_INVALID_SIZE:
184 return "I40E_ERR_INVALID_SIZE";
185 case I40E_ERR_INVALID_ARP_INDEX:
186 return "I40E_ERR_INVALID_ARP_INDEX";
187 case I40E_ERR_INVALID_FPM_FUNC_ID:
188 return "I40E_ERR_INVALID_FPM_FUNC_ID";
189 case I40E_ERR_QP_INVALID_MSG_SIZE:
190 return "I40E_ERR_QP_INVALID_MSG_SIZE";
191 case I40E_ERR_QP_TOOMANY_WRS_POSTED:
192 return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
193 case I40E_ERR_INVALID_FRAG_COUNT:
194 return "I40E_ERR_INVALID_FRAG_COUNT";
195 case I40E_ERR_QUEUE_EMPTY:
196 return "I40E_ERR_QUEUE_EMPTY";
197 case I40E_ERR_INVALID_ALIGNMENT:
198 return "I40E_ERR_INVALID_ALIGNMENT";
199 case I40E_ERR_FLUSHED_QUEUE:
200 return "I40E_ERR_FLUSHED_QUEUE";
201 case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
202 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
203 case I40E_ERR_INVALID_IMM_DATA_SIZE:
204 return "I40E_ERR_INVALID_IMM_DATA_SIZE";
205 case I40E_ERR_TIMEOUT:
206 return "I40E_ERR_TIMEOUT";
207 case I40E_ERR_OPCODE_MISMATCH:
208 return "I40E_ERR_OPCODE_MISMATCH";
209 case I40E_ERR_CQP_COMPL_ERROR:
210 return "I40E_ERR_CQP_COMPL_ERROR";
211 case I40E_ERR_INVALID_VF_ID:
212 return "I40E_ERR_INVALID_VF_ID";
213 case I40E_ERR_INVALID_HMCFN_ID:
214 return "I40E_ERR_INVALID_HMCFN_ID";
215 case I40E_ERR_BACKING_PAGE_ERROR:
216 return "I40E_ERR_BACKING_PAGE_ERROR";
217 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
218 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
219 case I40E_ERR_INVALID_PBLE_INDEX:
220 return "I40E_ERR_INVALID_PBLE_INDEX";
221 case I40E_ERR_INVALID_SD_INDEX:
222 return "I40E_ERR_INVALID_SD_INDEX";
223 case I40E_ERR_INVALID_PAGE_DESC_INDEX:
224 return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
225 case I40E_ERR_INVALID_SD_TYPE:
226 return "I40E_ERR_INVALID_SD_TYPE";
227 case I40E_ERR_MEMCPY_FAILED:
228 return "I40E_ERR_MEMCPY_FAILED";
229 case I40E_ERR_INVALID_HMC_OBJ_INDEX:
230 return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
231 case I40E_ERR_INVALID_HMC_OBJ_COUNT:
232 return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
233 case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
234 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
235 case I40E_ERR_SRQ_ENABLED:
236 return "I40E_ERR_SRQ_ENABLED";
237 case I40E_ERR_ADMIN_QUEUE_ERROR:
238 return "I40E_ERR_ADMIN_QUEUE_ERROR";
239 case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
240 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
241 case I40E_ERR_BUF_TOO_SHORT:
242 return "I40E_ERR_BUF_TOO_SHORT";
243 case I40E_ERR_ADMIN_QUEUE_FULL:
244 return "I40E_ERR_ADMIN_QUEUE_FULL";
245 case I40E_ERR_ADMIN_QUEUE_NO_WORK:
246 return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
247 case I40E_ERR_BAD_IWARP_CQE:
248 return "I40E_ERR_BAD_IWARP_CQE";
249 case I40E_ERR_NVM_BLANK_MODE:
250 return "I40E_ERR_NVM_BLANK_MODE";
251 case I40E_ERR_NOT_IMPLEMENTED:
252 return "I40E_ERR_NOT_IMPLEMENTED";
253 case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
254 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
255 case I40E_ERR_DIAG_TEST_FAILED:
256 return "I40E_ERR_DIAG_TEST_FAILED";
257 case I40E_ERR_NOT_READY:
258 return "I40E_ERR_NOT_READY";
259 case I40E_NOT_SUPPORTED:
260 return "I40E_NOT_SUPPORTED";
261 case I40E_ERR_FIRMWARE_API_VERSION:
262 return "I40E_ERR_FIRMWARE_API_VERSION";
263 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
264 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
265 }
266
267 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
268 return hw->err_str;
269}
270
271/**
272 * i40e_debug_aq
273 * @hw: debug mask related to admin queue
274 * @mask: debug mask
275 * @desc: pointer to admin queue descriptor
276 * @buffer: pointer to command buffer
277 * @buf_len: max length of buffer
278 *
279 * Dumps debug log about adminq command with descriptor contents.
280 **/
281void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
282 void *buffer, u16 buf_len)
283{
284 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
285 u32 effective_mask = hw->debug_mask & mask;
286 char prefix[27];
287 u16 len;
288 u8 *buf = (u8 *)buffer;
289
290 if (!effective_mask || !desc)
291 return;
292
293 len = le16_to_cpu(aq_desc->datalen);
294
295 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
296 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
297 le16_to_cpu(aq_desc->opcode),
298 le16_to_cpu(aq_desc->flags),
299 le16_to_cpu(aq_desc->datalen),
300 le16_to_cpu(aq_desc->retval));
301 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
302 "\tcookie (h,l) 0x%08X 0x%08X\n",
303 le32_to_cpu(aq_desc->cookie_high),
304 le32_to_cpu(aq_desc->cookie_low));
305 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
306 "\tparam (0,1) 0x%08X 0x%08X\n",
307 le32_to_cpu(aq_desc->params.internal.param0),
308 le32_to_cpu(aq_desc->params.internal.param1));
309 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
310 "\taddr (h,l) 0x%08X 0x%08X\n",
311 le32_to_cpu(aq_desc->params.external.addr_high),
312 le32_to_cpu(aq_desc->params.external.addr_low));
313
314 if (buffer && buf_len != 0 && len != 0 &&
315 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
316 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
317 if (buf_len < len)
318 len = buf_len;
319
320 snprintf(prefix, sizeof(prefix),
321 "i40e %02x:%02x.%x: \t0x",
322 hw->bus.bus_id,
323 hw->bus.device,
324 hw->bus.func);
325
326 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
327 16, 1, buf, len, false);
328 }
329}
330
331/**
332 * i40e_check_asq_alive
333 * @hw: pointer to the hw struct
334 *
335 * Returns true if Queue is enabled else false.
336 **/
337bool i40e_check_asq_alive(struct i40e_hw *hw)
338{
339 if (hw->aq.asq.len)
340 return !!(rd32(hw, hw->aq.asq.len) &
341 I40E_PF_ATQLEN_ATQENABLE_MASK);
342 else
343 return false;
344}
345
346/**
347 * i40e_aq_queue_shutdown
348 * @hw: pointer to the hw struct
349 * @unloading: is the driver unloading itself
350 *
351 * Tell the Firmware that we're shutting down the AdminQ and whether
352 * or not the driver is unloading as well.
353 **/
354i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
355 bool unloading)
356{
357 struct i40e_aq_desc desc;
358 struct i40e_aqc_queue_shutdown *cmd =
359 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
360 i40e_status status;
361
362 i40e_fill_default_direct_cmd_desc(&desc,
363 i40e_aqc_opc_queue_shutdown);
364
365 if (unloading)
366 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
367 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
368
369 return status;
370}
371
372/**
373 * i40e_aq_get_set_rss_lut
374 * @hw: pointer to the hardware structure
375 * @vsi_id: vsi fw index
376 * @pf_lut: for PF table set true, for VSI table set false
377 * @lut: pointer to the lut buffer provided by the caller
378 * @lut_size: size of the lut buffer
379 * @set: set true to set the table, false to get the table
380 *
381 * Internal function to get or set RSS look up table
382 **/
383static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
384 u16 vsi_id, bool pf_lut,
385 u8 *lut, u16 lut_size,
386 bool set)
387{
388 i40e_status status;
389 struct i40e_aq_desc desc;
390 struct i40e_aqc_get_set_rss_lut *cmd_resp =
391 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
392
393 if (set)
394 i40e_fill_default_direct_cmd_desc(&desc,
395 i40e_aqc_opc_set_rss_lut);
396 else
397 i40e_fill_default_direct_cmd_desc(&desc,
398 i40e_aqc_opc_get_rss_lut);
399
400 /* Indirect command */
401 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
402 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
403
404 cmd_resp->vsi_id =
405 cpu_to_le16((u16)((vsi_id <<
406 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
407 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
408 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
409
410 if (pf_lut)
411 cmd_resp->flags |= cpu_to_le16((u16)
412 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
413 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
414 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
415 else
416 cmd_resp->flags |= cpu_to_le16((u16)
417 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
418 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
419 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
420
421 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
422
423 return status;
424}
425
426/**
427 * i40e_aq_get_rss_lut
428 * @hw: pointer to the hardware structure
429 * @vsi_id: vsi fw index
430 * @pf_lut: for PF table set true, for VSI table set false
431 * @lut: pointer to the lut buffer provided by the caller
432 * @lut_size: size of the lut buffer
433 *
434 * get the RSS lookup table, PF or VSI type
435 **/
436i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
437 bool pf_lut, u8 *lut, u16 lut_size)
438{
439 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
440 false);
441}
442
443/**
444 * i40e_aq_set_rss_lut
445 * @hw: pointer to the hardware structure
446 * @vsi_id: vsi fw index
447 * @pf_lut: for PF table set true, for VSI table set false
448 * @lut: pointer to the lut buffer provided by the caller
449 * @lut_size: size of the lut buffer
450 *
451 * set the RSS lookup table, PF or VSI type
452 **/
453i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
454 bool pf_lut, u8 *lut, u16 lut_size)
455{
456 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
457}
458
459/**
460 * i40e_aq_get_set_rss_key
461 * @hw: pointer to the hw struct
462 * @vsi_id: vsi fw index
463 * @key: pointer to key info struct
464 * @set: set true to set the key, false to get the key
465 *
466 * get the RSS key per VSI
467 **/
468static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
469 u16 vsi_id,
470 struct i40e_aqc_get_set_rss_key_data *key,
471 bool set)
472{
473 i40e_status status;
474 struct i40e_aq_desc desc;
475 struct i40e_aqc_get_set_rss_key *cmd_resp =
476 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
477 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
478
479 if (set)
480 i40e_fill_default_direct_cmd_desc(&desc,
481 i40e_aqc_opc_set_rss_key);
482 else
483 i40e_fill_default_direct_cmd_desc(&desc,
484 i40e_aqc_opc_get_rss_key);
485
486 /* Indirect command */
487 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
488 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
489
490 cmd_resp->vsi_id =
491 cpu_to_le16((u16)((vsi_id <<
492 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
493 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
494 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
495
496 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
497
498 return status;
499}
500
501/**
502 * i40e_aq_get_rss_key
503 * @hw: pointer to the hw struct
504 * @vsi_id: vsi fw index
505 * @key: pointer to key info struct
506 *
507 **/
508i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
509 u16 vsi_id,
510 struct i40e_aqc_get_set_rss_key_data *key)
511{
512 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
513}
514
515/**
516 * i40e_aq_set_rss_key
517 * @hw: pointer to the hw struct
518 * @vsi_id: vsi fw index
519 * @key: pointer to key info struct
520 *
521 * set the RSS key per VSI
522 **/
523i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
524 u16 vsi_id,
525 struct i40e_aqc_get_set_rss_key_data *key)
526{
527 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
528}
529
530/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
531 * hardware to a bit-field that can be used by SW to more easily determine the
532 * packet type.
533 *
534 * Macros are used to shorten the table lines and make this table human
535 * readable.
536 *
537 * We store the PTYPE in the top byte of the bit field - this is just so that
538 * we can check that the table doesn't have a row missing, as the index into
539 * the table should be the PTYPE.
540 *
541 * Typical work flow:
542 *
543 * IF NOT i40e_ptype_lookup[ptype].known
544 * THEN
545 * Packet is unknown
546 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
547 * Use the rest of the fields to look at the tunnels, inner protocols, etc
548 * ELSE
549 * Use the enum i40e_rx_l2_ptype to decode the packet type
550 * ENDIF
551 */
552
553/* macro to make the table lines short */
554#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
555 { PTYPE, \
556 1, \
557 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
558 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
559 I40E_RX_PTYPE_##OUTER_FRAG, \
560 I40E_RX_PTYPE_TUNNEL_##T, \
561 I40E_RX_PTYPE_TUNNEL_END_##TE, \
562 I40E_RX_PTYPE_##TEF, \
563 I40E_RX_PTYPE_INNER_PROT_##I, \
564 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
565
566#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
567 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
568
569/* shorter macros makes the table fit but are terse */
570#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
571#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
572#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
573
574/* Lookup table mapping the HW PTYPE to the bit field for decoding */
575struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
576 /* L2 Packet types */
577 I40E_PTT_UNUSED_ENTRY(0),
578 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
579 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
580 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
581 I40E_PTT_UNUSED_ENTRY(4),
582 I40E_PTT_UNUSED_ENTRY(5),
583 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
584 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
585 I40E_PTT_UNUSED_ENTRY(8),
586 I40E_PTT_UNUSED_ENTRY(9),
587 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
588 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
589 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
590 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
591 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
592 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
593 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
594 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
595 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
596 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
597 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
598 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
599
600 /* Non Tunneled IPv4 */
601 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
602 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
603 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
604 I40E_PTT_UNUSED_ENTRY(25),
605 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
606 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
607 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
608
609 /* IPv4 --> IPv4 */
610 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
611 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
612 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
613 I40E_PTT_UNUSED_ENTRY(32),
614 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
615 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
616 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
617
618 /* IPv4 --> IPv6 */
619 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
620 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
621 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
622 I40E_PTT_UNUSED_ENTRY(39),
623 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
624 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
625 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
626
627 /* IPv4 --> GRE/NAT */
628 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
629
630 /* IPv4 --> GRE/NAT --> IPv4 */
631 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
632 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
633 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
634 I40E_PTT_UNUSED_ENTRY(47),
635 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
636 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
637 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
638
639 /* IPv4 --> GRE/NAT --> IPv6 */
640 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
641 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
642 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
643 I40E_PTT_UNUSED_ENTRY(54),
644 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
645 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
646 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
647
648 /* IPv4 --> GRE/NAT --> MAC */
649 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
650
651 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
652 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
653 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
654 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
655 I40E_PTT_UNUSED_ENTRY(62),
656 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
657 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
658 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
659
660 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
661 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
662 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
663 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
664 I40E_PTT_UNUSED_ENTRY(69),
665 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
666 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
667 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
668
669 /* IPv4 --> GRE/NAT --> MAC/VLAN */
670 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
671
672 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
673 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
674 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
675 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
676 I40E_PTT_UNUSED_ENTRY(77),
677 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
678 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
679 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
680
681 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
682 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
683 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
684 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
685 I40E_PTT_UNUSED_ENTRY(84),
686 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
687 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
688 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
689
690 /* Non Tunneled IPv6 */
691 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
692 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
693 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
694 I40E_PTT_UNUSED_ENTRY(91),
695 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
696 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
697 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
698
699 /* IPv6 --> IPv4 */
700 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
701 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
702 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
703 I40E_PTT_UNUSED_ENTRY(98),
704 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
705 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
706 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
707
708 /* IPv6 --> IPv6 */
709 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
710 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
711 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
712 I40E_PTT_UNUSED_ENTRY(105),
713 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
714 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
715 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
716
717 /* IPv6 --> GRE/NAT */
718 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
719
720 /* IPv6 --> GRE/NAT -> IPv4 */
721 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
722 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
723 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
724 I40E_PTT_UNUSED_ENTRY(113),
725 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
726 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
727 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
728
729 /* IPv6 --> GRE/NAT -> IPv6 */
730 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
731 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
732 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
733 I40E_PTT_UNUSED_ENTRY(120),
734 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
735 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
736 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
737
738 /* IPv6 --> GRE/NAT -> MAC */
739 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
740
741 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
742 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
743 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
744 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
745 I40E_PTT_UNUSED_ENTRY(128),
746 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
747 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
748 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
749
750 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
751 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
752 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
753 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
754 I40E_PTT_UNUSED_ENTRY(135),
755 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
756 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
757 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
758
759 /* IPv6 --> GRE/NAT -> MAC/VLAN */
760 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
761
762 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
763 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
764 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
765 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
766 I40E_PTT_UNUSED_ENTRY(143),
767 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
768 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
769 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
770
771 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
772 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
773 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
774 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
775 I40E_PTT_UNUSED_ENTRY(150),
776 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
777 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
778 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
779
780 /* unused entries */
781 I40E_PTT_UNUSED_ENTRY(154),
782 I40E_PTT_UNUSED_ENTRY(155),
783 I40E_PTT_UNUSED_ENTRY(156),
784 I40E_PTT_UNUSED_ENTRY(157),
785 I40E_PTT_UNUSED_ENTRY(158),
786 I40E_PTT_UNUSED_ENTRY(159),
787
788 I40E_PTT_UNUSED_ENTRY(160),
789 I40E_PTT_UNUSED_ENTRY(161),
790 I40E_PTT_UNUSED_ENTRY(162),
791 I40E_PTT_UNUSED_ENTRY(163),
792 I40E_PTT_UNUSED_ENTRY(164),
793 I40E_PTT_UNUSED_ENTRY(165),
794 I40E_PTT_UNUSED_ENTRY(166),
795 I40E_PTT_UNUSED_ENTRY(167),
796 I40E_PTT_UNUSED_ENTRY(168),
797 I40E_PTT_UNUSED_ENTRY(169),
798
799 I40E_PTT_UNUSED_ENTRY(170),
800 I40E_PTT_UNUSED_ENTRY(171),
801 I40E_PTT_UNUSED_ENTRY(172),
802 I40E_PTT_UNUSED_ENTRY(173),
803 I40E_PTT_UNUSED_ENTRY(174),
804 I40E_PTT_UNUSED_ENTRY(175),
805 I40E_PTT_UNUSED_ENTRY(176),
806 I40E_PTT_UNUSED_ENTRY(177),
807 I40E_PTT_UNUSED_ENTRY(178),
808 I40E_PTT_UNUSED_ENTRY(179),
809
810 I40E_PTT_UNUSED_ENTRY(180),
811 I40E_PTT_UNUSED_ENTRY(181),
812 I40E_PTT_UNUSED_ENTRY(182),
813 I40E_PTT_UNUSED_ENTRY(183),
814 I40E_PTT_UNUSED_ENTRY(184),
815 I40E_PTT_UNUSED_ENTRY(185),
816 I40E_PTT_UNUSED_ENTRY(186),
817 I40E_PTT_UNUSED_ENTRY(187),
818 I40E_PTT_UNUSED_ENTRY(188),
819 I40E_PTT_UNUSED_ENTRY(189),
820
821 I40E_PTT_UNUSED_ENTRY(190),
822 I40E_PTT_UNUSED_ENTRY(191),
823 I40E_PTT_UNUSED_ENTRY(192),
824 I40E_PTT_UNUSED_ENTRY(193),
825 I40E_PTT_UNUSED_ENTRY(194),
826 I40E_PTT_UNUSED_ENTRY(195),
827 I40E_PTT_UNUSED_ENTRY(196),
828 I40E_PTT_UNUSED_ENTRY(197),
829 I40E_PTT_UNUSED_ENTRY(198),
830 I40E_PTT_UNUSED_ENTRY(199),
831
832 I40E_PTT_UNUSED_ENTRY(200),
833 I40E_PTT_UNUSED_ENTRY(201),
834 I40E_PTT_UNUSED_ENTRY(202),
835 I40E_PTT_UNUSED_ENTRY(203),
836 I40E_PTT_UNUSED_ENTRY(204),
837 I40E_PTT_UNUSED_ENTRY(205),
838 I40E_PTT_UNUSED_ENTRY(206),
839 I40E_PTT_UNUSED_ENTRY(207),
840 I40E_PTT_UNUSED_ENTRY(208),
841 I40E_PTT_UNUSED_ENTRY(209),
842
843 I40E_PTT_UNUSED_ENTRY(210),
844 I40E_PTT_UNUSED_ENTRY(211),
845 I40E_PTT_UNUSED_ENTRY(212),
846 I40E_PTT_UNUSED_ENTRY(213),
847 I40E_PTT_UNUSED_ENTRY(214),
848 I40E_PTT_UNUSED_ENTRY(215),
849 I40E_PTT_UNUSED_ENTRY(216),
850 I40E_PTT_UNUSED_ENTRY(217),
851 I40E_PTT_UNUSED_ENTRY(218),
852 I40E_PTT_UNUSED_ENTRY(219),
853
854 I40E_PTT_UNUSED_ENTRY(220),
855 I40E_PTT_UNUSED_ENTRY(221),
856 I40E_PTT_UNUSED_ENTRY(222),
857 I40E_PTT_UNUSED_ENTRY(223),
858 I40E_PTT_UNUSED_ENTRY(224),
859 I40E_PTT_UNUSED_ENTRY(225),
860 I40E_PTT_UNUSED_ENTRY(226),
861 I40E_PTT_UNUSED_ENTRY(227),
862 I40E_PTT_UNUSED_ENTRY(228),
863 I40E_PTT_UNUSED_ENTRY(229),
864
865 I40E_PTT_UNUSED_ENTRY(230),
866 I40E_PTT_UNUSED_ENTRY(231),
867 I40E_PTT_UNUSED_ENTRY(232),
868 I40E_PTT_UNUSED_ENTRY(233),
869 I40E_PTT_UNUSED_ENTRY(234),
870 I40E_PTT_UNUSED_ENTRY(235),
871 I40E_PTT_UNUSED_ENTRY(236),
872 I40E_PTT_UNUSED_ENTRY(237),
873 I40E_PTT_UNUSED_ENTRY(238),
874 I40E_PTT_UNUSED_ENTRY(239),
875
876 I40E_PTT_UNUSED_ENTRY(240),
877 I40E_PTT_UNUSED_ENTRY(241),
878 I40E_PTT_UNUSED_ENTRY(242),
879 I40E_PTT_UNUSED_ENTRY(243),
880 I40E_PTT_UNUSED_ENTRY(244),
881 I40E_PTT_UNUSED_ENTRY(245),
882 I40E_PTT_UNUSED_ENTRY(246),
883 I40E_PTT_UNUSED_ENTRY(247),
884 I40E_PTT_UNUSED_ENTRY(248),
885 I40E_PTT_UNUSED_ENTRY(249),
886
887 I40E_PTT_UNUSED_ENTRY(250),
888 I40E_PTT_UNUSED_ENTRY(251),
889 I40E_PTT_UNUSED_ENTRY(252),
890 I40E_PTT_UNUSED_ENTRY(253),
891 I40E_PTT_UNUSED_ENTRY(254),
892 I40E_PTT_UNUSED_ENTRY(255)
893};
894
895/**
896 * i40e_init_shared_code - Initialize the shared code
897 * @hw: pointer to hardware structure
898 *
899 * This assigns the MAC type and PHY code and inits the NVM.
900 * Does not touch the hardware. This function must be called prior to any
901 * other function in the shared code. The i40e_hw structure should be
902 * memset to 0 prior to calling this function. The following fields in
903 * hw structure should be filled in prior to calling this function:
904 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
905 * subsystem_vendor_id, and revision_id
906 **/
907i40e_status i40e_init_shared_code(struct i40e_hw *hw)
908{
909 i40e_status status = 0;
910 u32 port, ari, func_rid;
911
912 i40e_set_mac_type(hw);
913
914 switch (hw->mac.type) {
915 case I40E_MAC_XL710:
916 case I40E_MAC_X722:
917 break;
918 default:
919 return I40E_ERR_DEVICE_NOT_SUPPORTED;
920 }
921
922 hw->phy.get_link_info = true;
923
924 /* Determine port number and PF number*/
925 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
926 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
927 hw->port = (u8)port;
928 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
929 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
930 func_rid = rd32(hw, I40E_PF_FUNC_RID);
931 if (ari)
932 hw->pf_id = (u8)(func_rid & 0xff);
933 else
934 hw->pf_id = (u8)(func_rid & 0x7);
935
936 if (hw->mac.type == I40E_MAC_X722)
937 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
938 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
939
940 status = i40e_init_nvm(hw);
941 return status;
942}
943
944/**
945 * i40e_aq_mac_address_read - Retrieve the MAC addresses
946 * @hw: pointer to the hw struct
947 * @flags: a return indicator of what addresses were added to the addr store
948 * @addrs: the requestor's mac addr store
949 * @cmd_details: pointer to command details structure or NULL
950 **/
951static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
952 u16 *flags,
953 struct i40e_aqc_mac_address_read_data *addrs,
954 struct i40e_asq_cmd_details *cmd_details)
955{
956 struct i40e_aq_desc desc;
957 struct i40e_aqc_mac_address_read *cmd_data =
958 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
959 i40e_status status;
960
961 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
962 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
963
964 status = i40e_asq_send_command(hw, &desc, addrs,
965 sizeof(*addrs), cmd_details);
966 *flags = le16_to_cpu(cmd_data->command_flags);
967
968 return status;
969}
970
971/**
972 * i40e_aq_mac_address_write - Change the MAC addresses
973 * @hw: pointer to the hw struct
974 * @flags: indicates which MAC to be written
975 * @mac_addr: address to write
976 * @cmd_details: pointer to command details structure or NULL
977 **/
978i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
979 u16 flags, u8 *mac_addr,
980 struct i40e_asq_cmd_details *cmd_details)
981{
982 struct i40e_aq_desc desc;
983 struct i40e_aqc_mac_address_write *cmd_data =
984 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
985 i40e_status status;
986
987 i40e_fill_default_direct_cmd_desc(&desc,
988 i40e_aqc_opc_mac_address_write);
989 cmd_data->command_flags = cpu_to_le16(flags);
990 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
991 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
992 ((u32)mac_addr[3] << 16) |
993 ((u32)mac_addr[4] << 8) |
994 mac_addr[5]);
995
996 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
997
998 return status;
999}
1000
1001/**
1002 * i40e_get_mac_addr - get MAC address
1003 * @hw: pointer to the HW structure
1004 * @mac_addr: pointer to MAC address
1005 *
1006 * Reads the adapter's MAC address from register
1007 **/
1008i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1009{
1010 struct i40e_aqc_mac_address_read_data addrs;
1011 i40e_status status;
1012 u16 flags = 0;
1013
1014 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1015
1016 if (flags & I40E_AQC_LAN_ADDR_VALID)
1017 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
1018
1019 return status;
1020}
1021
1022/**
1023 * i40e_get_port_mac_addr - get Port MAC address
1024 * @hw: pointer to the HW structure
1025 * @mac_addr: pointer to Port MAC address
1026 *
1027 * Reads the adapter's Port MAC address
1028 **/
1029i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1030{
1031 struct i40e_aqc_mac_address_read_data addrs;
1032 i40e_status status;
1033 u16 flags = 0;
1034
1035 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1036 if (status)
1037 return status;
1038
1039 if (flags & I40E_AQC_PORT_ADDR_VALID)
1040 ether_addr_copy(mac_addr, addrs.port_mac);
1041 else
1042 status = I40E_ERR_INVALID_MAC_ADDR;
1043
1044 return status;
1045}
1046
1047/**
1048 * i40e_pre_tx_queue_cfg - pre tx queue configure
1049 * @hw: pointer to the HW structure
1050 * @queue: target PF queue index
1051 * @enable: state change request
1052 *
1053 * Handles hw requirement to indicate intention to enable
1054 * or disable target queue.
1055 **/
1056void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
1057{
1058 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
1059 u32 reg_block = 0;
1060 u32 reg_val;
1061
1062 if (abs_queue_idx >= 128) {
1063 reg_block = abs_queue_idx / 128;
1064 abs_queue_idx %= 128;
1065 }
1066
1067 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1068 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1069 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1070
1071 if (enable)
1072 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
1073 else
1074 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1075
1076 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
1077}
1078
1079/**
1080 * i40e_read_pba_string - Reads part number string from EEPROM
1081 * @hw: pointer to hardware structure
1082 * @pba_num: stores the part number string from the EEPROM
1083 * @pba_num_size: part number string buffer length
1084 *
1085 * Reads the part number string from the EEPROM.
1086 **/
1087i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
1088 u32 pba_num_size)
1089{
1090 i40e_status status = 0;
1091 u16 pba_word = 0;
1092 u16 pba_size = 0;
1093 u16 pba_ptr = 0;
1094 u16 i = 0;
1095
1096 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
1097 if (status || (pba_word != 0xFAFA)) {
1098 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
1099 return status;
1100 }
1101
1102 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
1103 if (status) {
1104 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
1105 return status;
1106 }
1107
1108 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
1109 if (status) {
1110 hw_dbg(hw, "Failed to read PBA Block size.\n");
1111 return status;
1112 }
1113
1114 /* Subtract one to get PBA word count (PBA Size word is included in
1115 * total size)
1116 */
1117 pba_size--;
1118 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1119 hw_dbg(hw, "Buffer to small for PBA data.\n");
1120 return I40E_ERR_PARAM;
1121 }
1122
1123 for (i = 0; i < pba_size; i++) {
1124 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1125 if (status) {
1126 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1127 return status;
1128 }
1129
1130 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1131 pba_num[(i * 2) + 1] = pba_word & 0xFF;
1132 }
1133 pba_num[(pba_size * 2)] = '\0';
1134
1135 return status;
1136}
1137
1138/**
1139 * i40e_get_media_type - Gets media type
1140 * @hw: pointer to the hardware structure
1141 **/
1142static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1143{
1144 enum i40e_media_type media;
1145
1146 switch (hw->phy.link_info.phy_type) {
1147 case I40E_PHY_TYPE_10GBASE_SR:
1148 case I40E_PHY_TYPE_10GBASE_LR:
1149 case I40E_PHY_TYPE_1000BASE_SX:
1150 case I40E_PHY_TYPE_1000BASE_LX:
1151 case I40E_PHY_TYPE_40GBASE_SR4:
1152 case I40E_PHY_TYPE_40GBASE_LR4:
1153 case I40E_PHY_TYPE_25GBASE_LR:
1154 case I40E_PHY_TYPE_25GBASE_SR:
1155 media = I40E_MEDIA_TYPE_FIBER;
1156 break;
1157 case I40E_PHY_TYPE_100BASE_TX:
1158 case I40E_PHY_TYPE_1000BASE_T:
1159 case I40E_PHY_TYPE_2_5GBASE_T:
1160 case I40E_PHY_TYPE_5GBASE_T:
1161 case I40E_PHY_TYPE_10GBASE_T:
1162 media = I40E_MEDIA_TYPE_BASET;
1163 break;
1164 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1165 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1166 case I40E_PHY_TYPE_10GBASE_CR1:
1167 case I40E_PHY_TYPE_40GBASE_CR4:
1168 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1169 case I40E_PHY_TYPE_40GBASE_AOC:
1170 case I40E_PHY_TYPE_10GBASE_AOC:
1171 case I40E_PHY_TYPE_25GBASE_CR:
1172 case I40E_PHY_TYPE_25GBASE_AOC:
1173 case I40E_PHY_TYPE_25GBASE_ACC:
1174 media = I40E_MEDIA_TYPE_DA;
1175 break;
1176 case I40E_PHY_TYPE_1000BASE_KX:
1177 case I40E_PHY_TYPE_10GBASE_KX4:
1178 case I40E_PHY_TYPE_10GBASE_KR:
1179 case I40E_PHY_TYPE_40GBASE_KR4:
1180 case I40E_PHY_TYPE_20GBASE_KR2:
1181 case I40E_PHY_TYPE_25GBASE_KR:
1182 media = I40E_MEDIA_TYPE_BACKPLANE;
1183 break;
1184 case I40E_PHY_TYPE_SGMII:
1185 case I40E_PHY_TYPE_XAUI:
1186 case I40E_PHY_TYPE_XFI:
1187 case I40E_PHY_TYPE_XLAUI:
1188 case I40E_PHY_TYPE_XLPPI:
1189 default:
1190 media = I40E_MEDIA_TYPE_UNKNOWN;
1191 break;
1192 }
1193
1194 return media;
1195}
1196
1197/**
1198 * i40e_poll_globr - Poll for Global Reset completion
1199 * @hw: pointer to the hardware structure
1200 * @retry_limit: how many times to retry before failure
1201 **/
1202static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1203 u32 retry_limit)
1204{
1205 u32 cnt, reg = 0;
1206
1207 for (cnt = 0; cnt < retry_limit; cnt++) {
1208 reg = rd32(hw, I40E_GLGEN_RSTAT);
1209 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1210 return 0;
1211 msleep(100);
1212 }
1213
1214 hw_dbg(hw, "Global reset failed.\n");
1215 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1216
1217 return I40E_ERR_RESET_FAILED;
1218}
1219
1220#define I40E_PF_RESET_WAIT_COUNT_A0 200
1221#define I40E_PF_RESET_WAIT_COUNT 200
1222/**
1223 * i40e_pf_reset - Reset the PF
1224 * @hw: pointer to the hardware structure
1225 *
1226 * Assuming someone else has triggered a global reset,
1227 * assure the global reset is complete and then reset the PF
1228 **/
1229i40e_status i40e_pf_reset(struct i40e_hw *hw)
1230{
1231 u32 cnt = 0;
1232 u32 cnt1 = 0;
1233 u32 reg = 0;
1234 u32 grst_del;
1235
1236 /* Poll for Global Reset steady state in case of recent GRST.
1237 * The grst delay value is in 100ms units, and we'll wait a
1238 * couple counts longer to be sure we don't just miss the end.
1239 */
1240 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1241 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1242 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1243
1244 /* It can take upto 15 secs for GRST steady state.
1245 * Bump it to 16 secs max to be safe.
1246 */
1247 grst_del = grst_del * 20;
1248
1249 for (cnt = 0; cnt < grst_del; cnt++) {
1250 reg = rd32(hw, I40E_GLGEN_RSTAT);
1251 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1252 break;
1253 msleep(100);
1254 }
1255 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1256 hw_dbg(hw, "Global reset polling failed to complete.\n");
1257 return I40E_ERR_RESET_FAILED;
1258 }
1259
1260 /* Now Wait for the FW to be ready */
1261 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1262 reg = rd32(hw, I40E_GLNVM_ULD);
1263 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1264 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1265 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1266 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1267 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1268 break;
1269 }
1270 usleep_range(10000, 20000);
1271 }
1272 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1273 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1274 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1275 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1276 return I40E_ERR_RESET_FAILED;
1277 }
1278
1279 /* If there was a Global Reset in progress when we got here,
1280 * we don't need to do the PF Reset
1281 */
1282 if (!cnt) {
1283 u32 reg2 = 0;
1284 if (hw->revision_id == 0)
1285 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1286 else
1287 cnt = I40E_PF_RESET_WAIT_COUNT;
1288 reg = rd32(hw, I40E_PFGEN_CTRL);
1289 wr32(hw, I40E_PFGEN_CTRL,
1290 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1291 for (; cnt; cnt--) {
1292 reg = rd32(hw, I40E_PFGEN_CTRL);
1293 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1294 break;
1295 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1296 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1297 break;
1298 usleep_range(1000, 2000);
1299 }
1300 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1301 if (i40e_poll_globr(hw, grst_del))
1302 return I40E_ERR_RESET_FAILED;
1303 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1304 hw_dbg(hw, "PF reset polling failed to complete.\n");
1305 return I40E_ERR_RESET_FAILED;
1306 }
1307 }
1308
1309 i40e_clear_pxe_mode(hw);
1310
1311 return 0;
1312}
1313
1314/**
1315 * i40e_clear_hw - clear out any left over hw state
1316 * @hw: pointer to the hw struct
1317 *
1318 * Clear queues and interrupts, typically called at init time,
1319 * but after the capabilities have been found so we know how many
1320 * queues and msix vectors have been allocated.
1321 **/
1322void i40e_clear_hw(struct i40e_hw *hw)
1323{
1324 u32 num_queues, base_queue;
1325 u32 num_pf_int;
1326 u32 num_vf_int;
1327 u32 num_vfs;
1328 u32 i, j;
1329 u32 val;
1330 u32 eol = 0x7ff;
1331
1332 /* get number of interrupts, queues, and VFs */
1333 val = rd32(hw, I40E_GLPCI_CNF2);
1334 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1335 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1336 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1337 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1338
1339 val = rd32(hw, I40E_PFLAN_QALLOC);
1340 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1341 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1342 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1343 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1344 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1345 num_queues = (j - base_queue) + 1;
1346 else
1347 num_queues = 0;
1348
1349 val = rd32(hw, I40E_PF_VT_PFALLOC);
1350 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1351 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1352 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1353 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1354 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1355 num_vfs = (j - i) + 1;
1356 else
1357 num_vfs = 0;
1358
1359 /* stop all the interrupts */
1360 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1361 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1362 for (i = 0; i < num_pf_int - 2; i++)
1363 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1364
1365 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1366 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1367 wr32(hw, I40E_PFINT_LNKLST0, val);
1368 for (i = 0; i < num_pf_int - 2; i++)
1369 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1370 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1371 for (i = 0; i < num_vfs; i++)
1372 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1373 for (i = 0; i < num_vf_int - 2; i++)
1374 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1375
1376 /* warn the HW of the coming Tx disables */
1377 for (i = 0; i < num_queues; i++) {
1378 u32 abs_queue_idx = base_queue + i;
1379 u32 reg_block = 0;
1380
1381 if (abs_queue_idx >= 128) {
1382 reg_block = abs_queue_idx / 128;
1383 abs_queue_idx %= 128;
1384 }
1385
1386 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1387 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1388 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1389 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1390
1391 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1392 }
1393 udelay(400);
1394
1395 /* stop all the queues */
1396 for (i = 0; i < num_queues; i++) {
1397 wr32(hw, I40E_QINT_TQCTL(i), 0);
1398 wr32(hw, I40E_QTX_ENA(i), 0);
1399 wr32(hw, I40E_QINT_RQCTL(i), 0);
1400 wr32(hw, I40E_QRX_ENA(i), 0);
1401 }
1402
1403 /* short wait for all queue disables to settle */
1404 udelay(50);
1405}
1406
1407/**
1408 * i40e_clear_pxe_mode - clear pxe operations mode
1409 * @hw: pointer to the hw struct
1410 *
1411 * Make sure all PXE mode settings are cleared, including things
1412 * like descriptor fetch/write-back mode.
1413 **/
1414void i40e_clear_pxe_mode(struct i40e_hw *hw)
1415{
1416 u32 reg;
1417
1418 if (i40e_check_asq_alive(hw))
1419 i40e_aq_clear_pxe_mode(hw, NULL);
1420
1421 /* Clear single descriptor fetch/write-back mode */
1422 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1423
1424 if (hw->revision_id == 0) {
1425 /* As a work around clear PXE_MODE instead of setting it */
1426 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1427 } else {
1428 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1429 }
1430}
1431
1432/**
1433 * i40e_led_is_mine - helper to find matching led
1434 * @hw: pointer to the hw struct
1435 * @idx: index into GPIO registers
1436 *
1437 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1438 */
1439static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1440{
1441 u32 gpio_val = 0;
1442 u32 port;
1443
1444 if (!hw->func_caps.led[idx])
1445 return 0;
1446
1447 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1448 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1449 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1450
1451 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1452 * if it is not our port then ignore
1453 */
1454 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1455 (port != hw->port))
1456 return 0;
1457
1458 return gpio_val;
1459}
1460
1461#define I40E_COMBINED_ACTIVITY 0xA
1462#define I40E_FILTER_ACTIVITY 0xE
1463#define I40E_LINK_ACTIVITY 0xC
1464#define I40E_MAC_ACTIVITY 0xD
1465#define I40E_LED0 22
1466
1467/**
1468 * i40e_led_get - return current on/off mode
1469 * @hw: pointer to the hw struct
1470 *
1471 * The value returned is the 'mode' field as defined in the
1472 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1473 * values are variations of possible behaviors relating to
1474 * blink, link, and wire.
1475 **/
1476u32 i40e_led_get(struct i40e_hw *hw)
1477{
1478 u32 mode = 0;
1479 int i;
1480
1481 /* as per the documentation GPIO 22-29 are the LED
1482 * GPIO pins named LED0..LED7
1483 */
1484 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1485 u32 gpio_val = i40e_led_is_mine(hw, i);
1486
1487 if (!gpio_val)
1488 continue;
1489
1490 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1491 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1492 break;
1493 }
1494
1495 return mode;
1496}
1497
1498/**
1499 * i40e_led_set - set new on/off mode
1500 * @hw: pointer to the hw struct
1501 * @mode: 0=off, 0xf=on (else see manual for mode details)
1502 * @blink: true if the LED should blink when on, false if steady
1503 *
1504 * if this function is used to turn on the blink it should
1505 * be used to disable the blink when restoring the original state.
1506 **/
1507void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1508{
1509 int i;
1510
1511 if (mode & 0xfffffff0)
1512 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1513
1514 /* as per the documentation GPIO 22-29 are the LED
1515 * GPIO pins named LED0..LED7
1516 */
1517 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1518 u32 gpio_val = i40e_led_is_mine(hw, i);
1519
1520 if (!gpio_val)
1521 continue;
1522 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1523 /* this & is a bit of paranoia, but serves as a range check */
1524 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1525 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1526
1527 if (blink)
1528 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1529 else
1530 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1531
1532 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1533 break;
1534 }
1535}
1536
1537/* Admin command wrappers */
1538
1539/**
1540 * i40e_aq_get_phy_capabilities
1541 * @hw: pointer to the hw struct
1542 * @abilities: structure for PHY capabilities to be filled
1543 * @qualified_modules: report Qualified Modules
1544 * @report_init: report init capabilities (active are default)
1545 * @cmd_details: pointer to command details structure or NULL
1546 *
1547 * Returns the various PHY abilities supported on the Port.
1548 **/
1549i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1550 bool qualified_modules, bool report_init,
1551 struct i40e_aq_get_phy_abilities_resp *abilities,
1552 struct i40e_asq_cmd_details *cmd_details)
1553{
1554 struct i40e_aq_desc desc;
1555 i40e_status status;
1556 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1557 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1558
1559 if (!abilities)
1560 return I40E_ERR_PARAM;
1561
1562 do {
1563 i40e_fill_default_direct_cmd_desc(&desc,
1564 i40e_aqc_opc_get_phy_abilities);
1565
1566 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1567 if (abilities_size > I40E_AQ_LARGE_BUF)
1568 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1569
1570 if (qualified_modules)
1571 desc.params.external.param0 |=
1572 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1573
1574 if (report_init)
1575 desc.params.external.param0 |=
1576 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1577
1578 status = i40e_asq_send_command(hw, &desc, abilities,
1579 abilities_size, cmd_details);
1580
1581 switch (hw->aq.asq_last_status) {
1582 case I40E_AQ_RC_EIO:
1583 status = I40E_ERR_UNKNOWN_PHY;
1584 break;
1585 case I40E_AQ_RC_EAGAIN:
1586 usleep_range(1000, 2000);
1587 total_delay++;
1588 status = I40E_ERR_TIMEOUT;
1589 break;
1590 /* also covers I40E_AQ_RC_OK */
1591 default:
1592 break;
1593 }
1594
1595 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1596 (total_delay < max_delay));
1597
1598 if (status)
1599 return status;
1600
1601 if (report_init) {
1602 if (hw->mac.type == I40E_MAC_XL710 &&
1603 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1604 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1605 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1606 } else {
1607 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1608 hw->phy.phy_types |=
1609 ((u64)abilities->phy_type_ext << 32);
1610 }
1611 }
1612
1613 return status;
1614}
1615
1616/**
1617 * i40e_aq_set_phy_config
1618 * @hw: pointer to the hw struct
1619 * @config: structure with PHY configuration to be set
1620 * @cmd_details: pointer to command details structure or NULL
1621 *
1622 * Set the various PHY configuration parameters
1623 * supported on the Port.One or more of the Set PHY config parameters may be
1624 * ignored in an MFP mode as the PF may not have the privilege to set some
1625 * of the PHY Config parameters. This status will be indicated by the
1626 * command response.
1627 **/
1628enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1629 struct i40e_aq_set_phy_config *config,
1630 struct i40e_asq_cmd_details *cmd_details)
1631{
1632 struct i40e_aq_desc desc;
1633 struct i40e_aq_set_phy_config *cmd =
1634 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1635 enum i40e_status_code status;
1636
1637 if (!config)
1638 return I40E_ERR_PARAM;
1639
1640 i40e_fill_default_direct_cmd_desc(&desc,
1641 i40e_aqc_opc_set_phy_config);
1642
1643 *cmd = *config;
1644
1645 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1646
1647 return status;
1648}
1649
1650static noinline_for_stack enum i40e_status_code
1651i40e_set_fc_status(struct i40e_hw *hw,
1652 struct i40e_aq_get_phy_abilities_resp *abilities,
1653 bool atomic_restart)
1654{
1655 struct i40e_aq_set_phy_config config;
1656 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1657 u8 pause_mask = 0x0;
1658
1659 switch (fc_mode) {
1660 case I40E_FC_FULL:
1661 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1662 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1663 break;
1664 case I40E_FC_RX_PAUSE:
1665 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1666 break;
1667 case I40E_FC_TX_PAUSE:
1668 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1669 break;
1670 default:
1671 break;
1672 }
1673
1674 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1675 /* clear the old pause settings */
1676 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1677 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1678 /* set the new abilities */
1679 config.abilities |= pause_mask;
1680 /* If the abilities have changed, then set the new config */
1681 if (config.abilities == abilities->abilities)
1682 return 0;
1683
1684 /* Auto restart link so settings take effect */
1685 if (atomic_restart)
1686 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1687 /* Copy over all the old settings */
1688 config.phy_type = abilities->phy_type;
1689 config.phy_type_ext = abilities->phy_type_ext;
1690 config.link_speed = abilities->link_speed;
1691 config.eee_capability = abilities->eee_capability;
1692 config.eeer = abilities->eeer_val;
1693 config.low_power_ctrl = abilities->d3_lpan;
1694 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1695 I40E_AQ_PHY_FEC_CONFIG_MASK;
1696
1697 return i40e_aq_set_phy_config(hw, &config, NULL);
1698}
1699
1700/**
1701 * i40e_set_fc
1702 * @hw: pointer to the hw struct
1703 * @aq_failures: buffer to return AdminQ failure information
1704 * @atomic_restart: whether to enable atomic link restart
1705 *
1706 * Set the requested flow control mode using set_phy_config.
1707 **/
1708enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1709 bool atomic_restart)
1710{
1711 struct i40e_aq_get_phy_abilities_resp abilities;
1712 enum i40e_status_code status;
1713
1714 *aq_failures = 0x0;
1715
1716 /* Get the current phy config */
1717 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1718 NULL);
1719 if (status) {
1720 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1721 return status;
1722 }
1723
1724 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1725 if (status)
1726 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1727
1728 /* Update the link info */
1729 status = i40e_update_link_info(hw);
1730 if (status) {
1731 /* Wait a little bit (on 40G cards it sometimes takes a really
1732 * long time for link to come back from the atomic reset)
1733 * and try once more
1734 */
1735 msleep(1000);
1736 status = i40e_update_link_info(hw);
1737 }
1738 if (status)
1739 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1740
1741 return status;
1742}
1743
1744/**
1745 * i40e_aq_clear_pxe_mode
1746 * @hw: pointer to the hw struct
1747 * @cmd_details: pointer to command details structure or NULL
1748 *
1749 * Tell the firmware that the driver is taking over from PXE
1750 **/
1751i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1752 struct i40e_asq_cmd_details *cmd_details)
1753{
1754 i40e_status status;
1755 struct i40e_aq_desc desc;
1756 struct i40e_aqc_clear_pxe *cmd =
1757 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1758
1759 i40e_fill_default_direct_cmd_desc(&desc,
1760 i40e_aqc_opc_clear_pxe_mode);
1761
1762 cmd->rx_cnt = 0x2;
1763
1764 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1765
1766 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1767
1768 return status;
1769}
1770
1771/**
1772 * i40e_aq_set_link_restart_an
1773 * @hw: pointer to the hw struct
1774 * @enable_link: if true: enable link, if false: disable link
1775 * @cmd_details: pointer to command details structure or NULL
1776 *
1777 * Sets up the link and restarts the Auto-Negotiation over the link.
1778 **/
1779i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1780 bool enable_link,
1781 struct i40e_asq_cmd_details *cmd_details)
1782{
1783 struct i40e_aq_desc desc;
1784 struct i40e_aqc_set_link_restart_an *cmd =
1785 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1786 i40e_status status;
1787
1788 i40e_fill_default_direct_cmd_desc(&desc,
1789 i40e_aqc_opc_set_link_restart_an);
1790
1791 cmd->command = I40E_AQ_PHY_RESTART_AN;
1792 if (enable_link)
1793 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1794 else
1795 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1796
1797 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1798
1799 return status;
1800}
1801
1802/**
1803 * i40e_aq_get_link_info
1804 * @hw: pointer to the hw struct
1805 * @enable_lse: enable/disable LinkStatusEvent reporting
1806 * @link: pointer to link status structure - optional
1807 * @cmd_details: pointer to command details structure or NULL
1808 *
1809 * Returns the link status of the adapter.
1810 **/
1811i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1812 bool enable_lse, struct i40e_link_status *link,
1813 struct i40e_asq_cmd_details *cmd_details)
1814{
1815 struct i40e_aq_desc desc;
1816 struct i40e_aqc_get_link_status *resp =
1817 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1818 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1819 i40e_status status;
1820 bool tx_pause, rx_pause;
1821 u16 command_flags;
1822
1823 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1824
1825 if (enable_lse)
1826 command_flags = I40E_AQ_LSE_ENABLE;
1827 else
1828 command_flags = I40E_AQ_LSE_DISABLE;
1829 resp->command_flags = cpu_to_le16(command_flags);
1830
1831 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1832
1833 if (status)
1834 goto aq_get_link_info_exit;
1835
1836 /* save off old link status information */
1837 hw->phy.link_info_old = *hw_link_info;
1838
1839 /* update link status */
1840 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1841 hw->phy.media_type = i40e_get_media_type(hw);
1842 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1843 hw_link_info->link_info = resp->link_info;
1844 hw_link_info->an_info = resp->an_info;
1845 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1846 I40E_AQ_CONFIG_FEC_RS_ENA);
1847 hw_link_info->ext_info = resp->ext_info;
1848 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1849 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1850 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1851
1852 /* update fc info */
1853 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1854 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1855 if (tx_pause & rx_pause)
1856 hw->fc.current_mode = I40E_FC_FULL;
1857 else if (tx_pause)
1858 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1859 else if (rx_pause)
1860 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1861 else
1862 hw->fc.current_mode = I40E_FC_NONE;
1863
1864 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1865 hw_link_info->crc_enable = true;
1866 else
1867 hw_link_info->crc_enable = false;
1868
1869 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1870 hw_link_info->lse_enable = true;
1871 else
1872 hw_link_info->lse_enable = false;
1873
1874 if ((hw->mac.type == I40E_MAC_XL710) &&
1875 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1876 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1877 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1878
1879 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1880 hw->mac.type != I40E_MAC_X722) {
1881 __le32 tmp;
1882
1883 memcpy(&tmp, resp->link_type, sizeof(tmp));
1884 hw->phy.phy_types = le32_to_cpu(tmp);
1885 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1886 }
1887
1888 /* save link status information */
1889 if (link)
1890 *link = *hw_link_info;
1891
1892 /* flag cleared so helper functions don't call AQ again */
1893 hw->phy.get_link_info = false;
1894
1895aq_get_link_info_exit:
1896 return status;
1897}
1898
1899/**
1900 * i40e_aq_set_phy_int_mask
1901 * @hw: pointer to the hw struct
1902 * @mask: interrupt mask to be set
1903 * @cmd_details: pointer to command details structure or NULL
1904 *
1905 * Set link interrupt mask.
1906 **/
1907i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1908 u16 mask,
1909 struct i40e_asq_cmd_details *cmd_details)
1910{
1911 struct i40e_aq_desc desc;
1912 struct i40e_aqc_set_phy_int_mask *cmd =
1913 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1914 i40e_status status;
1915
1916 i40e_fill_default_direct_cmd_desc(&desc,
1917 i40e_aqc_opc_set_phy_int_mask);
1918
1919 cmd->event_mask = cpu_to_le16(mask);
1920
1921 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1922
1923 return status;
1924}
1925
1926/**
1927 * i40e_aq_set_phy_debug
1928 * @hw: pointer to the hw struct
1929 * @cmd_flags: debug command flags
1930 * @cmd_details: pointer to command details structure or NULL
1931 *
1932 * Reset the external PHY.
1933 **/
1934i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1935 struct i40e_asq_cmd_details *cmd_details)
1936{
1937 struct i40e_aq_desc desc;
1938 struct i40e_aqc_set_phy_debug *cmd =
1939 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1940 i40e_status status;
1941
1942 i40e_fill_default_direct_cmd_desc(&desc,
1943 i40e_aqc_opc_set_phy_debug);
1944
1945 cmd->command_flags = cmd_flags;
1946
1947 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1948
1949 return status;
1950}
1951
1952/**
1953 * i40e_aq_add_vsi
1954 * @hw: pointer to the hw struct
1955 * @vsi_ctx: pointer to a vsi context struct
1956 * @cmd_details: pointer to command details structure or NULL
1957 *
1958 * Add a VSI context to the hardware.
1959**/
1960i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1961 struct i40e_vsi_context *vsi_ctx,
1962 struct i40e_asq_cmd_details *cmd_details)
1963{
1964 struct i40e_aq_desc desc;
1965 struct i40e_aqc_add_get_update_vsi *cmd =
1966 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1967 struct i40e_aqc_add_get_update_vsi_completion *resp =
1968 (struct i40e_aqc_add_get_update_vsi_completion *)
1969 &desc.params.raw;
1970 i40e_status status;
1971
1972 i40e_fill_default_direct_cmd_desc(&desc,
1973 i40e_aqc_opc_add_vsi);
1974
1975 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1976 cmd->connection_type = vsi_ctx->connection_type;
1977 cmd->vf_id = vsi_ctx->vf_num;
1978 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1979
1980 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1981
1982 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1983 sizeof(vsi_ctx->info), cmd_details);
1984
1985 if (status)
1986 goto aq_add_vsi_exit;
1987
1988 vsi_ctx->seid = le16_to_cpu(resp->seid);
1989 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1990 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1991 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1992
1993aq_add_vsi_exit:
1994 return status;
1995}
1996
1997/**
1998 * i40e_aq_set_default_vsi
1999 * @hw: pointer to the hw struct
2000 * @seid: vsi number
2001 * @cmd_details: pointer to command details structure or NULL
2002 **/
2003i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
2004 u16 seid,
2005 struct i40e_asq_cmd_details *cmd_details)
2006{
2007 struct i40e_aq_desc desc;
2008 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2009 (struct i40e_aqc_set_vsi_promiscuous_modes *)
2010 &desc.params.raw;
2011 i40e_status status;
2012
2013 i40e_fill_default_direct_cmd_desc(&desc,
2014 i40e_aqc_opc_set_vsi_promiscuous_modes);
2015
2016 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2017 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2018 cmd->seid = cpu_to_le16(seid);
2019
2020 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2021
2022 return status;
2023}
2024
2025/**
2026 * i40e_aq_clear_default_vsi
2027 * @hw: pointer to the hw struct
2028 * @seid: vsi number
2029 * @cmd_details: pointer to command details structure or NULL
2030 **/
2031i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
2032 u16 seid,
2033 struct i40e_asq_cmd_details *cmd_details)
2034{
2035 struct i40e_aq_desc desc;
2036 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2037 (struct i40e_aqc_set_vsi_promiscuous_modes *)
2038 &desc.params.raw;
2039 i40e_status status;
2040
2041 i40e_fill_default_direct_cmd_desc(&desc,
2042 i40e_aqc_opc_set_vsi_promiscuous_modes);
2043
2044 cmd->promiscuous_flags = cpu_to_le16(0);
2045 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2046 cmd->seid = cpu_to_le16(seid);
2047
2048 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2049
2050 return status;
2051}
2052
2053/**
2054 * i40e_aq_set_vsi_unicast_promiscuous
2055 * @hw: pointer to the hw struct
2056 * @seid: vsi number
2057 * @set: set unicast promiscuous enable/disable
2058 * @cmd_details: pointer to command details structure or NULL
2059 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
2060 **/
2061i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
2062 u16 seid, bool set,
2063 struct i40e_asq_cmd_details *cmd_details,
2064 bool rx_only_promisc)
2065{
2066 struct i40e_aq_desc desc;
2067 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2068 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2069 i40e_status status;
2070 u16 flags = 0;
2071
2072 i40e_fill_default_direct_cmd_desc(&desc,
2073 i40e_aqc_opc_set_vsi_promiscuous_modes);
2074
2075 if (set) {
2076 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2077 if (rx_only_promisc &&
2078 (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
2079 (hw->aq.api_maj_ver > 1)))
2080 flags |= I40E_AQC_SET_VSI_PROMISC_TX;
2081 }
2082
2083 cmd->promiscuous_flags = cpu_to_le16(flags);
2084
2085 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2086 if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
2087 (hw->aq.api_maj_ver > 1))
2088 cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
2089
2090 cmd->seid = cpu_to_le16(seid);
2091 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2092
2093 return status;
2094}
2095
2096/**
2097 * i40e_aq_set_vsi_multicast_promiscuous
2098 * @hw: pointer to the hw struct
2099 * @seid: vsi number
2100 * @set: set multicast promiscuous enable/disable
2101 * @cmd_details: pointer to command details structure or NULL
2102 **/
2103i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2104 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2105{
2106 struct i40e_aq_desc desc;
2107 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2108 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2109 i40e_status status;
2110 u16 flags = 0;
2111
2112 i40e_fill_default_direct_cmd_desc(&desc,
2113 i40e_aqc_opc_set_vsi_promiscuous_modes);
2114
2115 if (set)
2116 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2117
2118 cmd->promiscuous_flags = cpu_to_le16(flags);
2119
2120 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2121
2122 cmd->seid = cpu_to_le16(seid);
2123 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2124
2125 return status;
2126}
2127
2128/**
2129 * i40e_aq_set_vsi_mc_promisc_on_vlan
2130 * @hw: pointer to the hw struct
2131 * @seid: vsi number
2132 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2133 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2134 * @cmd_details: pointer to command details structure or NULL
2135 **/
2136enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2137 u16 seid, bool enable,
2138 u16 vid,
2139 struct i40e_asq_cmd_details *cmd_details)
2140{
2141 struct i40e_aq_desc desc;
2142 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2143 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2144 enum i40e_status_code status;
2145 u16 flags = 0;
2146
2147 i40e_fill_default_direct_cmd_desc(&desc,
2148 i40e_aqc_opc_set_vsi_promiscuous_modes);
2149
2150 if (enable)
2151 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2152
2153 cmd->promiscuous_flags = cpu_to_le16(flags);
2154 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2155 cmd->seid = cpu_to_le16(seid);
2156 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2157
2158 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2159
2160 return status;
2161}
2162
2163/**
2164 * i40e_aq_set_vsi_uc_promisc_on_vlan
2165 * @hw: pointer to the hw struct
2166 * @seid: vsi number
2167 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2168 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2169 * @cmd_details: pointer to command details structure or NULL
2170 **/
2171enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2172 u16 seid, bool enable,
2173 u16 vid,
2174 struct i40e_asq_cmd_details *cmd_details)
2175{
2176 struct i40e_aq_desc desc;
2177 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2178 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2179 enum i40e_status_code status;
2180 u16 flags = 0;
2181
2182 i40e_fill_default_direct_cmd_desc(&desc,
2183 i40e_aqc_opc_set_vsi_promiscuous_modes);
2184
2185 if (enable)
2186 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2187
2188 cmd->promiscuous_flags = cpu_to_le16(flags);
2189 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2190 cmd->seid = cpu_to_le16(seid);
2191 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2192
2193 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2194
2195 return status;
2196}
2197
2198/**
2199 * i40e_aq_set_vsi_bc_promisc_on_vlan
2200 * @hw: pointer to the hw struct
2201 * @seid: vsi number
2202 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2203 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2204 * @cmd_details: pointer to command details structure or NULL
2205 **/
2206i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2207 u16 seid, bool enable, u16 vid,
2208 struct i40e_asq_cmd_details *cmd_details)
2209{
2210 struct i40e_aq_desc desc;
2211 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2212 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2213 i40e_status status;
2214 u16 flags = 0;
2215
2216 i40e_fill_default_direct_cmd_desc(&desc,
2217 i40e_aqc_opc_set_vsi_promiscuous_modes);
2218
2219 if (enable)
2220 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2221
2222 cmd->promiscuous_flags = cpu_to_le16(flags);
2223 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2224 cmd->seid = cpu_to_le16(seid);
2225 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2226
2227 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2228
2229 return status;
2230}
2231
2232/**
2233 * i40e_aq_set_vsi_broadcast
2234 * @hw: pointer to the hw struct
2235 * @seid: vsi number
2236 * @set_filter: true to set filter, false to clear filter
2237 * @cmd_details: pointer to command details structure or NULL
2238 *
2239 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2240 **/
2241i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2242 u16 seid, bool set_filter,
2243 struct i40e_asq_cmd_details *cmd_details)
2244{
2245 struct i40e_aq_desc desc;
2246 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2247 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2248 i40e_status status;
2249
2250 i40e_fill_default_direct_cmd_desc(&desc,
2251 i40e_aqc_opc_set_vsi_promiscuous_modes);
2252
2253 if (set_filter)
2254 cmd->promiscuous_flags
2255 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2256 else
2257 cmd->promiscuous_flags
2258 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2259
2260 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2261 cmd->seid = cpu_to_le16(seid);
2262 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2263
2264 return status;
2265}
2266
2267/**
2268 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2269 * @hw: pointer to the hw struct
2270 * @seid: vsi number
2271 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2272 * @cmd_details: pointer to command details structure or NULL
2273 **/
2274i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2275 u16 seid, bool enable,
2276 struct i40e_asq_cmd_details *cmd_details)
2277{
2278 struct i40e_aq_desc desc;
2279 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2280 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2281 i40e_status status;
2282 u16 flags = 0;
2283
2284 i40e_fill_default_direct_cmd_desc(&desc,
2285 i40e_aqc_opc_set_vsi_promiscuous_modes);
2286 if (enable)
2287 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2288
2289 cmd->promiscuous_flags = cpu_to_le16(flags);
2290 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2291 cmd->seid = cpu_to_le16(seid);
2292
2293 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2294
2295 return status;
2296}
2297
2298/**
2299 * i40e_get_vsi_params - get VSI configuration info
2300 * @hw: pointer to the hw struct
2301 * @vsi_ctx: pointer to a vsi context struct
2302 * @cmd_details: pointer to command details structure or NULL
2303 **/
2304i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2305 struct i40e_vsi_context *vsi_ctx,
2306 struct i40e_asq_cmd_details *cmd_details)
2307{
2308 struct i40e_aq_desc desc;
2309 struct i40e_aqc_add_get_update_vsi *cmd =
2310 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2311 struct i40e_aqc_add_get_update_vsi_completion *resp =
2312 (struct i40e_aqc_add_get_update_vsi_completion *)
2313 &desc.params.raw;
2314 i40e_status status;
2315
2316 i40e_fill_default_direct_cmd_desc(&desc,
2317 i40e_aqc_opc_get_vsi_parameters);
2318
2319 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2320
2321 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2322
2323 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2324 sizeof(vsi_ctx->info), NULL);
2325
2326 if (status)
2327 goto aq_get_vsi_params_exit;
2328
2329 vsi_ctx->seid = le16_to_cpu(resp->seid);
2330 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2331 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2332 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2333
2334aq_get_vsi_params_exit:
2335 return status;
2336}
2337
2338/**
2339 * i40e_aq_update_vsi_params
2340 * @hw: pointer to the hw struct
2341 * @vsi_ctx: pointer to a vsi context struct
2342 * @cmd_details: pointer to command details structure or NULL
2343 *
2344 * Update a VSI context.
2345 **/
2346i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2347 struct i40e_vsi_context *vsi_ctx,
2348 struct i40e_asq_cmd_details *cmd_details)
2349{
2350 struct i40e_aq_desc desc;
2351 struct i40e_aqc_add_get_update_vsi *cmd =
2352 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2353 struct i40e_aqc_add_get_update_vsi_completion *resp =
2354 (struct i40e_aqc_add_get_update_vsi_completion *)
2355 &desc.params.raw;
2356 i40e_status status;
2357
2358 i40e_fill_default_direct_cmd_desc(&desc,
2359 i40e_aqc_opc_update_vsi_parameters);
2360 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2361
2362 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2363
2364 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2365 sizeof(vsi_ctx->info), cmd_details);
2366
2367 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2368 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2369
2370 return status;
2371}
2372
2373/**
2374 * i40e_aq_get_switch_config
2375 * @hw: pointer to the hardware structure
2376 * @buf: pointer to the result buffer
2377 * @buf_size: length of input buffer
2378 * @start_seid: seid to start for the report, 0 == beginning
2379 * @cmd_details: pointer to command details structure or NULL
2380 *
2381 * Fill the buf with switch configuration returned from AdminQ command
2382 **/
2383i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2384 struct i40e_aqc_get_switch_config_resp *buf,
2385 u16 buf_size, u16 *start_seid,
2386 struct i40e_asq_cmd_details *cmd_details)
2387{
2388 struct i40e_aq_desc desc;
2389 struct i40e_aqc_switch_seid *scfg =
2390 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2391 i40e_status status;
2392
2393 i40e_fill_default_direct_cmd_desc(&desc,
2394 i40e_aqc_opc_get_switch_config);
2395 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2396 if (buf_size > I40E_AQ_LARGE_BUF)
2397 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2398 scfg->seid = cpu_to_le16(*start_seid);
2399
2400 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2401 *start_seid = le16_to_cpu(scfg->seid);
2402
2403 return status;
2404}
2405
2406/**
2407 * i40e_aq_set_switch_config
2408 * @hw: pointer to the hardware structure
2409 * @flags: bit flag values to set
2410 * @mode: cloud filter mode
2411 * @valid_flags: which bit flags to set
2412 * @mode: cloud filter mode
2413 * @cmd_details: pointer to command details structure or NULL
2414 *
2415 * Set switch configuration bits
2416 **/
2417enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2418 u16 flags,
2419 u16 valid_flags, u8 mode,
2420 struct i40e_asq_cmd_details *cmd_details)
2421{
2422 struct i40e_aq_desc desc;
2423 struct i40e_aqc_set_switch_config *scfg =
2424 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2425 enum i40e_status_code status;
2426
2427 i40e_fill_default_direct_cmd_desc(&desc,
2428 i40e_aqc_opc_set_switch_config);
2429 scfg->flags = cpu_to_le16(flags);
2430 scfg->valid_flags = cpu_to_le16(valid_flags);
2431 scfg->mode = mode;
2432 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2433 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2434 scfg->first_tag = cpu_to_le16(hw->first_tag);
2435 scfg->second_tag = cpu_to_le16(hw->second_tag);
2436 }
2437 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2438
2439 return status;
2440}
2441
2442/**
2443 * i40e_aq_get_firmware_version
2444 * @hw: pointer to the hw struct
2445 * @fw_major_version: firmware major version
2446 * @fw_minor_version: firmware minor version
2447 * @fw_build: firmware build number
2448 * @api_major_version: major queue version
2449 * @api_minor_version: minor queue version
2450 * @cmd_details: pointer to command details structure or NULL
2451 *
2452 * Get the firmware version from the admin queue commands
2453 **/
2454i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2455 u16 *fw_major_version, u16 *fw_minor_version,
2456 u32 *fw_build,
2457 u16 *api_major_version, u16 *api_minor_version,
2458 struct i40e_asq_cmd_details *cmd_details)
2459{
2460 struct i40e_aq_desc desc;
2461 struct i40e_aqc_get_version *resp =
2462 (struct i40e_aqc_get_version *)&desc.params.raw;
2463 i40e_status status;
2464
2465 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2466
2467 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2468
2469 if (!status) {
2470 if (fw_major_version)
2471 *fw_major_version = le16_to_cpu(resp->fw_major);
2472 if (fw_minor_version)
2473 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2474 if (fw_build)
2475 *fw_build = le32_to_cpu(resp->fw_build);
2476 if (api_major_version)
2477 *api_major_version = le16_to_cpu(resp->api_major);
2478 if (api_minor_version)
2479 *api_minor_version = le16_to_cpu(resp->api_minor);
2480 }
2481
2482 return status;
2483}
2484
2485/**
2486 * i40e_aq_send_driver_version
2487 * @hw: pointer to the hw struct
2488 * @dv: driver's major, minor version
2489 * @cmd_details: pointer to command details structure or NULL
2490 *
2491 * Send the driver version to the firmware
2492 **/
2493i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2494 struct i40e_driver_version *dv,
2495 struct i40e_asq_cmd_details *cmd_details)
2496{
2497 struct i40e_aq_desc desc;
2498 struct i40e_aqc_driver_version *cmd =
2499 (struct i40e_aqc_driver_version *)&desc.params.raw;
2500 i40e_status status;
2501 u16 len;
2502
2503 if (dv == NULL)
2504 return I40E_ERR_PARAM;
2505
2506 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2507
2508 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2509 cmd->driver_major_ver = dv->major_version;
2510 cmd->driver_minor_ver = dv->minor_version;
2511 cmd->driver_build_ver = dv->build_version;
2512 cmd->driver_subbuild_ver = dv->subbuild_version;
2513
2514 len = 0;
2515 while (len < sizeof(dv->driver_string) &&
2516 (dv->driver_string[len] < 0x80) &&
2517 dv->driver_string[len])
2518 len++;
2519 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2520 len, cmd_details);
2521
2522 return status;
2523}
2524
2525/**
2526 * i40e_get_link_status - get status of the HW network link
2527 * @hw: pointer to the hw struct
2528 * @link_up: pointer to bool (true/false = linkup/linkdown)
2529 *
2530 * Variable link_up true if link is up, false if link is down.
2531 * The variable link_up is invalid if returned value of status != 0
2532 *
2533 * Side effect: LinkStatusEvent reporting becomes enabled
2534 **/
2535i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2536{
2537 i40e_status status = 0;
2538
2539 if (hw->phy.get_link_info) {
2540 status = i40e_update_link_info(hw);
2541
2542 if (status)
2543 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2544 status);
2545 }
2546
2547 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2548
2549 return status;
2550}
2551
2552/**
2553 * i40e_updatelink_status - update status of the HW network link
2554 * @hw: pointer to the hw struct
2555 **/
2556noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
2557{
2558 struct i40e_aq_get_phy_abilities_resp abilities;
2559 i40e_status status = 0;
2560
2561 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2562 if (status)
2563 return status;
2564
2565 /* extra checking needed to ensure link info to user is timely */
2566 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2567 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2568 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2569 status = i40e_aq_get_phy_capabilities(hw, false, false,
2570 &abilities, NULL);
2571 if (status)
2572 return status;
2573
2574 hw->phy.link_info.req_fec_info =
2575 abilities.fec_cfg_curr_mod_ext_info &
2576 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
2577
2578 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2579 sizeof(hw->phy.link_info.module_type));
2580 }
2581
2582 return status;
2583}
2584
2585/**
2586 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2587 * @hw: pointer to the hw struct
2588 * @uplink_seid: the MAC or other gizmo SEID
2589 * @downlink_seid: the VSI SEID
2590 * @enabled_tc: bitmap of TCs to be enabled
2591 * @default_port: true for default port VSI, false for control port
2592 * @veb_seid: pointer to where to put the resulting VEB SEID
2593 * @enable_stats: true to turn on VEB stats
2594 * @cmd_details: pointer to command details structure or NULL
2595 *
2596 * This asks the FW to add a VEB between the uplink and downlink
2597 * elements. If the uplink SEID is 0, this will be a floating VEB.
2598 **/
2599i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2600 u16 downlink_seid, u8 enabled_tc,
2601 bool default_port, u16 *veb_seid,
2602 bool enable_stats,
2603 struct i40e_asq_cmd_details *cmd_details)
2604{
2605 struct i40e_aq_desc desc;
2606 struct i40e_aqc_add_veb *cmd =
2607 (struct i40e_aqc_add_veb *)&desc.params.raw;
2608 struct i40e_aqc_add_veb_completion *resp =
2609 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2610 i40e_status status;
2611 u16 veb_flags = 0;
2612
2613 /* SEIDs need to either both be set or both be 0 for floating VEB */
2614 if (!!uplink_seid != !!downlink_seid)
2615 return I40E_ERR_PARAM;
2616
2617 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2618
2619 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2620 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2621 cmd->enable_tcs = enabled_tc;
2622 if (!uplink_seid)
2623 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2624 if (default_port)
2625 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2626 else
2627 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2628
2629 /* reverse logic here: set the bitflag to disable the stats */
2630 if (!enable_stats)
2631 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2632
2633 cmd->veb_flags = cpu_to_le16(veb_flags);
2634
2635 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2636
2637 if (!status && veb_seid)
2638 *veb_seid = le16_to_cpu(resp->veb_seid);
2639
2640 return status;
2641}
2642
2643/**
2644 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2645 * @hw: pointer to the hw struct
2646 * @veb_seid: the SEID of the VEB to query
2647 * @switch_id: the uplink switch id
2648 * @floating: set to true if the VEB is floating
2649 * @statistic_index: index of the stats counter block for this VEB
2650 * @vebs_used: number of VEB's used by function
2651 * @vebs_free: total VEB's not reserved by any function
2652 * @cmd_details: pointer to command details structure or NULL
2653 *
2654 * This retrieves the parameters for a particular VEB, specified by
2655 * uplink_seid, and returns them to the caller.
2656 **/
2657i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2658 u16 veb_seid, u16 *switch_id,
2659 bool *floating, u16 *statistic_index,
2660 u16 *vebs_used, u16 *vebs_free,
2661 struct i40e_asq_cmd_details *cmd_details)
2662{
2663 struct i40e_aq_desc desc;
2664 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2665 (struct i40e_aqc_get_veb_parameters_completion *)
2666 &desc.params.raw;
2667 i40e_status status;
2668
2669 if (veb_seid == 0)
2670 return I40E_ERR_PARAM;
2671
2672 i40e_fill_default_direct_cmd_desc(&desc,
2673 i40e_aqc_opc_get_veb_parameters);
2674 cmd_resp->seid = cpu_to_le16(veb_seid);
2675
2676 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2677 if (status)
2678 goto get_veb_exit;
2679
2680 if (switch_id)
2681 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2682 if (statistic_index)
2683 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2684 if (vebs_used)
2685 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2686 if (vebs_free)
2687 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2688 if (floating) {
2689 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2690
2691 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2692 *floating = true;
2693 else
2694 *floating = false;
2695 }
2696
2697get_veb_exit:
2698 return status;
2699}
2700
2701/**
2702 * i40e_aq_add_macvlan
2703 * @hw: pointer to the hw struct
2704 * @seid: VSI for the mac address
2705 * @mv_list: list of macvlans to be added
2706 * @count: length of the list
2707 * @cmd_details: pointer to command details structure or NULL
2708 *
2709 * Add MAC/VLAN addresses to the HW filtering
2710 **/
2711i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2712 struct i40e_aqc_add_macvlan_element_data *mv_list,
2713 u16 count, struct i40e_asq_cmd_details *cmd_details)
2714{
2715 struct i40e_aq_desc desc;
2716 struct i40e_aqc_macvlan *cmd =
2717 (struct i40e_aqc_macvlan *)&desc.params.raw;
2718 i40e_status status;
2719 u16 buf_size;
2720 int i;
2721
2722 if (count == 0 || !mv_list || !hw)
2723 return I40E_ERR_PARAM;
2724
2725 buf_size = count * sizeof(*mv_list);
2726
2727 /* prep the rest of the request */
2728 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
2729 cmd->num_addresses = cpu_to_le16(count);
2730 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2731 cmd->seid[1] = 0;
2732 cmd->seid[2] = 0;
2733
2734 for (i = 0; i < count; i++)
2735 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2736 mv_list[i].flags |=
2737 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2738
2739 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2740 if (buf_size > I40E_AQ_LARGE_BUF)
2741 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2742
2743 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2744 cmd_details);
2745
2746 return status;
2747}
2748
2749/**
2750 * i40e_aq_remove_macvlan
2751 * @hw: pointer to the hw struct
2752 * @seid: VSI for the mac address
2753 * @mv_list: list of macvlans to be removed
2754 * @count: length of the list
2755 * @cmd_details: pointer to command details structure or NULL
2756 *
2757 * Remove MAC/VLAN addresses from the HW filtering
2758 **/
2759i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2760 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2761 u16 count, struct i40e_asq_cmd_details *cmd_details)
2762{
2763 struct i40e_aq_desc desc;
2764 struct i40e_aqc_macvlan *cmd =
2765 (struct i40e_aqc_macvlan *)&desc.params.raw;
2766 i40e_status status;
2767 u16 buf_size;
2768
2769 if (count == 0 || !mv_list || !hw)
2770 return I40E_ERR_PARAM;
2771
2772 buf_size = count * sizeof(*mv_list);
2773
2774 /* prep the rest of the request */
2775 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2776 cmd->num_addresses = cpu_to_le16(count);
2777 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2778 cmd->seid[1] = 0;
2779 cmd->seid[2] = 0;
2780
2781 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2782 if (buf_size > I40E_AQ_LARGE_BUF)
2783 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2784
2785 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2786 cmd_details);
2787
2788 return status;
2789}
2790
2791/**
2792 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2793 * @hw: pointer to the hw struct
2794 * @opcode: AQ opcode for add or delete mirror rule
2795 * @sw_seid: Switch SEID (to which rule refers)
2796 * @rule_type: Rule Type (ingress/egress/VLAN)
2797 * @id: Destination VSI SEID or Rule ID
2798 * @count: length of the list
2799 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2800 * @cmd_details: pointer to command details structure or NULL
2801 * @rule_id: Rule ID returned from FW
2802 * @rules_used: Number of rules used in internal switch
2803 * @rules_free: Number of rules free in internal switch
2804 *
2805 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2806 * VEBs/VEPA elements only
2807 **/
2808static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2809 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2810 u16 count, __le16 *mr_list,
2811 struct i40e_asq_cmd_details *cmd_details,
2812 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2813{
2814 struct i40e_aq_desc desc;
2815 struct i40e_aqc_add_delete_mirror_rule *cmd =
2816 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2817 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2818 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2819 i40e_status status;
2820 u16 buf_size;
2821
2822 buf_size = count * sizeof(*mr_list);
2823
2824 /* prep the rest of the request */
2825 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2826 cmd->seid = cpu_to_le16(sw_seid);
2827 cmd->rule_type = cpu_to_le16(rule_type &
2828 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2829 cmd->num_entries = cpu_to_le16(count);
2830 /* Dest VSI for add, rule_id for delete */
2831 cmd->destination = cpu_to_le16(id);
2832 if (mr_list) {
2833 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2834 I40E_AQ_FLAG_RD));
2835 if (buf_size > I40E_AQ_LARGE_BUF)
2836 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2837 }
2838
2839 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2840 cmd_details);
2841 if (!status ||
2842 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2843 if (rule_id)
2844 *rule_id = le16_to_cpu(resp->rule_id);
2845 if (rules_used)
2846 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2847 if (rules_free)
2848 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2849 }
2850 return status;
2851}
2852
2853/**
2854 * i40e_aq_add_mirrorrule - add a mirror rule
2855 * @hw: pointer to the hw struct
2856 * @sw_seid: Switch SEID (to which rule refers)
2857 * @rule_type: Rule Type (ingress/egress/VLAN)
2858 * @dest_vsi: SEID of VSI to which packets will be mirrored
2859 * @count: length of the list
2860 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2861 * @cmd_details: pointer to command details structure or NULL
2862 * @rule_id: Rule ID returned from FW
2863 * @rules_used: Number of rules used in internal switch
2864 * @rules_free: Number of rules free in internal switch
2865 *
2866 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2867 **/
2868i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2869 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2870 struct i40e_asq_cmd_details *cmd_details,
2871 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2872{
2873 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2874 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2875 if (count == 0 || !mr_list)
2876 return I40E_ERR_PARAM;
2877 }
2878
2879 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2880 rule_type, dest_vsi, count, mr_list,
2881 cmd_details, rule_id, rules_used, rules_free);
2882}
2883
2884/**
2885 * i40e_aq_delete_mirrorrule - delete a mirror rule
2886 * @hw: pointer to the hw struct
2887 * @sw_seid: Switch SEID (to which rule refers)
2888 * @rule_type: Rule Type (ingress/egress/VLAN)
2889 * @count: length of the list
2890 * @rule_id: Rule ID that is returned in the receive desc as part of
2891 * add_mirrorrule.
2892 * @mr_list: list of mirrored VLAN IDs to be removed
2893 * @cmd_details: pointer to command details structure or NULL
2894 * @rules_used: Number of rules used in internal switch
2895 * @rules_free: Number of rules free in internal switch
2896 *
2897 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2898 **/
2899i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2900 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2901 struct i40e_asq_cmd_details *cmd_details,
2902 u16 *rules_used, u16 *rules_free)
2903{
2904 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2905 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2906 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2907 * mirroring. For other rule_type, count and rule_type should
2908 * not matter.
2909 */
2910 if (count == 0 || !mr_list)
2911 return I40E_ERR_PARAM;
2912 }
2913
2914 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2915 rule_type, rule_id, count, mr_list,
2916 cmd_details, NULL, rules_used, rules_free);
2917}
2918
2919/**
2920 * i40e_aq_send_msg_to_vf
2921 * @hw: pointer to the hardware structure
2922 * @vfid: VF id to send msg
2923 * @v_opcode: opcodes for VF-PF communication
2924 * @v_retval: return error code
2925 * @msg: pointer to the msg buffer
2926 * @msglen: msg length
2927 * @cmd_details: pointer to command details
2928 *
2929 * send msg to vf
2930 **/
2931i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2932 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2933 struct i40e_asq_cmd_details *cmd_details)
2934{
2935 struct i40e_aq_desc desc;
2936 struct i40e_aqc_pf_vf_message *cmd =
2937 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2938 i40e_status status;
2939
2940 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2941 cmd->id = cpu_to_le32(vfid);
2942 desc.cookie_high = cpu_to_le32(v_opcode);
2943 desc.cookie_low = cpu_to_le32(v_retval);
2944 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2945 if (msglen) {
2946 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2947 I40E_AQ_FLAG_RD));
2948 if (msglen > I40E_AQ_LARGE_BUF)
2949 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2950 desc.datalen = cpu_to_le16(msglen);
2951 }
2952 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2953
2954 return status;
2955}
2956
2957/**
2958 * i40e_aq_debug_read_register
2959 * @hw: pointer to the hw struct
2960 * @reg_addr: register address
2961 * @reg_val: register value
2962 * @cmd_details: pointer to command details structure or NULL
2963 *
2964 * Read the register using the admin queue commands
2965 **/
2966i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
2967 u32 reg_addr, u64 *reg_val,
2968 struct i40e_asq_cmd_details *cmd_details)
2969{
2970 struct i40e_aq_desc desc;
2971 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2972 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2973 i40e_status status;
2974
2975 if (reg_val == NULL)
2976 return I40E_ERR_PARAM;
2977
2978 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2979
2980 cmd_resp->address = cpu_to_le32(reg_addr);
2981
2982 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2983
2984 if (!status) {
2985 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2986 (u64)le32_to_cpu(cmd_resp->value_low);
2987 }
2988
2989 return status;
2990}
2991
2992/**
2993 * i40e_aq_debug_write_register
2994 * @hw: pointer to the hw struct
2995 * @reg_addr: register address
2996 * @reg_val: register value
2997 * @cmd_details: pointer to command details structure or NULL
2998 *
2999 * Write to a register using the admin queue commands
3000 **/
3001i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
3002 u32 reg_addr, u64 reg_val,
3003 struct i40e_asq_cmd_details *cmd_details)
3004{
3005 struct i40e_aq_desc desc;
3006 struct i40e_aqc_debug_reg_read_write *cmd =
3007 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3008 i40e_status status;
3009
3010 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
3011
3012 cmd->address = cpu_to_le32(reg_addr);
3013 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
3014 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
3015
3016 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3017
3018 return status;
3019}
3020
3021/**
3022 * i40e_aq_request_resource
3023 * @hw: pointer to the hw struct
3024 * @resource: resource id
3025 * @access: access type
3026 * @sdp_number: resource number
3027 * @timeout: the maximum time in ms that the driver may hold the resource
3028 * @cmd_details: pointer to command details structure or NULL
3029 *
3030 * requests common resource using the admin queue commands
3031 **/
3032i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3033 enum i40e_aq_resources_ids resource,
3034 enum i40e_aq_resource_access_type access,
3035 u8 sdp_number, u64 *timeout,
3036 struct i40e_asq_cmd_details *cmd_details)
3037{
3038 struct i40e_aq_desc desc;
3039 struct i40e_aqc_request_resource *cmd_resp =
3040 (struct i40e_aqc_request_resource *)&desc.params.raw;
3041 i40e_status status;
3042
3043 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3044
3045 cmd_resp->resource_id = cpu_to_le16(resource);
3046 cmd_resp->access_type = cpu_to_le16(access);
3047 cmd_resp->resource_number = cpu_to_le32(sdp_number);
3048
3049 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3050 /* The completion specifies the maximum time in ms that the driver
3051 * may hold the resource in the Timeout field.
3052 * If the resource is held by someone else, the command completes with
3053 * busy return value and the timeout field indicates the maximum time
3054 * the current owner of the resource has to free it.
3055 */
3056 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3057 *timeout = le32_to_cpu(cmd_resp->timeout);
3058
3059 return status;
3060}
3061
3062/**
3063 * i40e_aq_release_resource
3064 * @hw: pointer to the hw struct
3065 * @resource: resource id
3066 * @sdp_number: resource number
3067 * @cmd_details: pointer to command details structure or NULL
3068 *
3069 * release common resource using the admin queue commands
3070 **/
3071i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3072 enum i40e_aq_resources_ids resource,
3073 u8 sdp_number,
3074 struct i40e_asq_cmd_details *cmd_details)
3075{
3076 struct i40e_aq_desc desc;
3077 struct i40e_aqc_request_resource *cmd =
3078 (struct i40e_aqc_request_resource *)&desc.params.raw;
3079 i40e_status status;
3080
3081 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3082
3083 cmd->resource_id = cpu_to_le16(resource);
3084 cmd->resource_number = cpu_to_le32(sdp_number);
3085
3086 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3087
3088 return status;
3089}
3090
3091/**
3092 * i40e_aq_read_nvm
3093 * @hw: pointer to the hw struct
3094 * @module_pointer: module pointer location in words from the NVM beginning
3095 * @offset: byte offset from the module beginning
3096 * @length: length of the section to be read (in bytes from the offset)
3097 * @data: command buffer (size [bytes] = length)
3098 * @last_command: tells if this is the last command in a series
3099 * @cmd_details: pointer to command details structure or NULL
3100 *
3101 * Read the NVM using the admin queue commands
3102 **/
3103i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3104 u32 offset, u16 length, void *data,
3105 bool last_command,
3106 struct i40e_asq_cmd_details *cmd_details)
3107{
3108 struct i40e_aq_desc desc;
3109 struct i40e_aqc_nvm_update *cmd =
3110 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3111 i40e_status status;
3112
3113 /* In offset the highest byte must be zeroed. */
3114 if (offset & 0xFF000000) {
3115 status = I40E_ERR_PARAM;
3116 goto i40e_aq_read_nvm_exit;
3117 }
3118
3119 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3120
3121 /* If this is the last command in a series, set the proper flag. */
3122 if (last_command)
3123 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3124 cmd->module_pointer = module_pointer;
3125 cmd->offset = cpu_to_le32(offset);
3126 cmd->length = cpu_to_le16(length);
3127
3128 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3129 if (length > I40E_AQ_LARGE_BUF)
3130 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3131
3132 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3133
3134i40e_aq_read_nvm_exit:
3135 return status;
3136}
3137
3138/**
3139 * i40e_aq_erase_nvm
3140 * @hw: pointer to the hw struct
3141 * @module_pointer: module pointer location in words from the NVM beginning
3142 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3143 * @length: length of the section to be erased (expressed in 4 KB)
3144 * @last_command: tells if this is the last command in a series
3145 * @cmd_details: pointer to command details structure or NULL
3146 *
3147 * Erase the NVM sector using the admin queue commands
3148 **/
3149i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3150 u32 offset, u16 length, bool last_command,
3151 struct i40e_asq_cmd_details *cmd_details)
3152{
3153 struct i40e_aq_desc desc;
3154 struct i40e_aqc_nvm_update *cmd =
3155 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3156 i40e_status status;
3157
3158 /* In offset the highest byte must be zeroed. */
3159 if (offset & 0xFF000000) {
3160 status = I40E_ERR_PARAM;
3161 goto i40e_aq_erase_nvm_exit;
3162 }
3163
3164 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3165
3166 /* If this is the last command in a series, set the proper flag. */
3167 if (last_command)
3168 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3169 cmd->module_pointer = module_pointer;
3170 cmd->offset = cpu_to_le32(offset);
3171 cmd->length = cpu_to_le16(length);
3172
3173 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3174
3175i40e_aq_erase_nvm_exit:
3176 return status;
3177}
3178
3179/**
3180 * i40e_parse_discover_capabilities
3181 * @hw: pointer to the hw struct
3182 * @buff: pointer to a buffer containing device/function capability records
3183 * @cap_count: number of capability records in the list
3184 * @list_type_opc: type of capabilities list to parse
3185 *
3186 * Parse the device/function capabilities list.
3187 **/
3188static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3189 u32 cap_count,
3190 enum i40e_admin_queue_opc list_type_opc)
3191{
3192 struct i40e_aqc_list_capabilities_element_resp *cap;
3193 u32 valid_functions, num_functions;
3194 u32 number, logical_id, phys_id;
3195 struct i40e_hw_capabilities *p;
3196 u16 id, ocp_cfg_word0;
3197 i40e_status status;
3198 u8 major_rev;
3199 u32 i = 0;
3200
3201 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3202
3203 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3204 p = &hw->dev_caps;
3205 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3206 p = &hw->func_caps;
3207 else
3208 return;
3209
3210 for (i = 0; i < cap_count; i++, cap++) {
3211 id = le16_to_cpu(cap->id);
3212 number = le32_to_cpu(cap->number);
3213 logical_id = le32_to_cpu(cap->logical_id);
3214 phys_id = le32_to_cpu(cap->phys_id);
3215 major_rev = cap->major_rev;
3216
3217 switch (id) {
3218 case I40E_AQ_CAP_ID_SWITCH_MODE:
3219 p->switch_mode = number;
3220 break;
3221 case I40E_AQ_CAP_ID_MNG_MODE:
3222 p->management_mode = number;
3223 if (major_rev > 1) {
3224 p->mng_protocols_over_mctp = logical_id;
3225 i40e_debug(hw, I40E_DEBUG_INIT,
3226 "HW Capability: Protocols over MCTP = %d\n",
3227 p->mng_protocols_over_mctp);
3228 } else {
3229 p->mng_protocols_over_mctp = 0;
3230 }
3231 break;
3232 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3233 p->npar_enable = number;
3234 break;
3235 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3236 p->os2bmc = number;
3237 break;
3238 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3239 p->valid_functions = number;
3240 break;
3241 case I40E_AQ_CAP_ID_SRIOV:
3242 if (number == 1)
3243 p->sr_iov_1_1 = true;
3244 break;
3245 case I40E_AQ_CAP_ID_VF:
3246 p->num_vfs = number;
3247 p->vf_base_id = logical_id;
3248 break;
3249 case I40E_AQ_CAP_ID_VMDQ:
3250 if (number == 1)
3251 p->vmdq = true;
3252 break;
3253 case I40E_AQ_CAP_ID_8021QBG:
3254 if (number == 1)
3255 p->evb_802_1_qbg = true;
3256 break;
3257 case I40E_AQ_CAP_ID_8021QBR:
3258 if (number == 1)
3259 p->evb_802_1_qbh = true;
3260 break;
3261 case I40E_AQ_CAP_ID_VSI:
3262 p->num_vsis = number;
3263 break;
3264 case I40E_AQ_CAP_ID_DCB:
3265 if (number == 1) {
3266 p->dcb = true;
3267 p->enabled_tcmap = logical_id;
3268 p->maxtc = phys_id;
3269 }
3270 break;
3271 case I40E_AQ_CAP_ID_FCOE:
3272 if (number == 1)
3273 p->fcoe = true;
3274 break;
3275 case I40E_AQ_CAP_ID_ISCSI:
3276 if (number == 1)
3277 p->iscsi = true;
3278 break;
3279 case I40E_AQ_CAP_ID_RSS:
3280 p->rss = true;
3281 p->rss_table_size = number;
3282 p->rss_table_entry_width = logical_id;
3283 break;
3284 case I40E_AQ_CAP_ID_RXQ:
3285 p->num_rx_qp = number;
3286 p->base_queue = phys_id;
3287 break;
3288 case I40E_AQ_CAP_ID_TXQ:
3289 p->num_tx_qp = number;
3290 p->base_queue = phys_id;
3291 break;
3292 case I40E_AQ_CAP_ID_MSIX:
3293 p->num_msix_vectors = number;
3294 i40e_debug(hw, I40E_DEBUG_INIT,
3295 "HW Capability: MSIX vector count = %d\n",
3296 p->num_msix_vectors);
3297 break;
3298 case I40E_AQ_CAP_ID_VF_MSIX:
3299 p->num_msix_vectors_vf = number;
3300 break;
3301 case I40E_AQ_CAP_ID_FLEX10:
3302 if (major_rev == 1) {
3303 if (number == 1) {
3304 p->flex10_enable = true;
3305 p->flex10_capable = true;
3306 }
3307 } else {
3308 /* Capability revision >= 2 */
3309 if (number & 1)
3310 p->flex10_enable = true;
3311 if (number & 2)
3312 p->flex10_capable = true;
3313 }
3314 p->flex10_mode = logical_id;
3315 p->flex10_status = phys_id;
3316 break;
3317 case I40E_AQ_CAP_ID_CEM:
3318 if (number == 1)
3319 p->mgmt_cem = true;
3320 break;
3321 case I40E_AQ_CAP_ID_IWARP:
3322 if (number == 1)
3323 p->iwarp = true;
3324 break;
3325 case I40E_AQ_CAP_ID_LED:
3326 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3327 p->led[phys_id] = true;
3328 break;
3329 case I40E_AQ_CAP_ID_SDP:
3330 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3331 p->sdp[phys_id] = true;
3332 break;
3333 case I40E_AQ_CAP_ID_MDIO:
3334 if (number == 1) {
3335 p->mdio_port_num = phys_id;
3336 p->mdio_port_mode = logical_id;
3337 }
3338 break;
3339 case I40E_AQ_CAP_ID_1588:
3340 if (number == 1)
3341 p->ieee_1588 = true;
3342 break;
3343 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3344 p->fd = true;
3345 p->fd_filters_guaranteed = number;
3346 p->fd_filters_best_effort = logical_id;
3347 break;
3348 case I40E_AQ_CAP_ID_WSR_PROT:
3349 p->wr_csr_prot = (u64)number;
3350 p->wr_csr_prot |= (u64)logical_id << 32;
3351 break;
3352 case I40E_AQ_CAP_ID_NVM_MGMT:
3353 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3354 p->sec_rev_disabled = true;
3355 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3356 p->update_disabled = true;
3357 break;
3358 default:
3359 break;
3360 }
3361 }
3362
3363 if (p->fcoe)
3364 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3365
3366 /* Software override ensuring FCoE is disabled if npar or mfp
3367 * mode because it is not supported in these modes.
3368 */
3369 if (p->npar_enable || p->flex10_enable)
3370 p->fcoe = false;
3371
3372 /* count the enabled ports (aka the "not disabled" ports) */
3373 hw->num_ports = 0;
3374 for (i = 0; i < 4; i++) {
3375 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3376 u64 port_cfg = 0;
3377
3378 /* use AQ read to get the physical register offset instead
3379 * of the port relative offset
3380 */
3381 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3382 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3383 hw->num_ports++;
3384 }
3385
3386 /* OCP cards case: if a mezz is removed the Ethernet port is at
3387 * disabled state in PRTGEN_CNF register. Additional NVM read is
3388 * needed in order to check if we are dealing with OCP card.
3389 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3390 * physical ports results in wrong partition id calculation and thus
3391 * not supporting WoL.
3392 */
3393 if (hw->mac.type == I40E_MAC_X722) {
3394 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3395 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3396 2 * I40E_SR_OCP_CFG_WORD0,
3397 sizeof(ocp_cfg_word0),
3398 &ocp_cfg_word0, true, NULL);
3399 if (!status &&
3400 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3401 hw->num_ports = 4;
3402 i40e_release_nvm(hw);
3403 }
3404 }
3405
3406 valid_functions = p->valid_functions;
3407 num_functions = 0;
3408 while (valid_functions) {
3409 if (valid_functions & 1)
3410 num_functions++;
3411 valid_functions >>= 1;
3412 }
3413
3414 /* partition id is 1-based, and functions are evenly spread
3415 * across the ports as partitions
3416 */
3417 if (hw->num_ports != 0) {
3418 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3419 hw->num_partitions = num_functions / hw->num_ports;
3420 }
3421
3422 /* additional HW specific goodies that might
3423 * someday be HW version specific
3424 */
3425 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3426}
3427
3428/**
3429 * i40e_aq_discover_capabilities
3430 * @hw: pointer to the hw struct
3431 * @buff: a virtual buffer to hold the capabilities
3432 * @buff_size: Size of the virtual buffer
3433 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3434 * @list_type_opc: capabilities type to discover - pass in the command opcode
3435 * @cmd_details: pointer to command details structure or NULL
3436 *
3437 * Get the device capabilities descriptions from the firmware
3438 **/
3439i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3440 void *buff, u16 buff_size, u16 *data_size,
3441 enum i40e_admin_queue_opc list_type_opc,
3442 struct i40e_asq_cmd_details *cmd_details)
3443{
3444 struct i40e_aqc_list_capabilites *cmd;
3445 struct i40e_aq_desc desc;
3446 i40e_status status = 0;
3447
3448 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3449
3450 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3451 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3452 status = I40E_ERR_PARAM;
3453 goto exit;
3454 }
3455
3456 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3457
3458 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3459 if (buff_size > I40E_AQ_LARGE_BUF)
3460 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3461
3462 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3463 *data_size = le16_to_cpu(desc.datalen);
3464
3465 if (status)
3466 goto exit;
3467
3468 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3469 list_type_opc);
3470
3471exit:
3472 return status;
3473}
3474
3475/**
3476 * i40e_aq_update_nvm
3477 * @hw: pointer to the hw struct
3478 * @module_pointer: module pointer location in words from the NVM beginning
3479 * @offset: byte offset from the module beginning
3480 * @length: length of the section to be written (in bytes from the offset)
3481 * @data: command buffer (size [bytes] = length)
3482 * @last_command: tells if this is the last command in a series
3483 * @preservation_flags: Preservation mode flags
3484 * @cmd_details: pointer to command details structure or NULL
3485 *
3486 * Update the NVM using the admin queue commands
3487 **/
3488i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3489 u32 offset, u16 length, void *data,
3490 bool last_command, u8 preservation_flags,
3491 struct i40e_asq_cmd_details *cmd_details)
3492{
3493 struct i40e_aq_desc desc;
3494 struct i40e_aqc_nvm_update *cmd =
3495 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3496 i40e_status status;
3497
3498 /* In offset the highest byte must be zeroed. */
3499 if (offset & 0xFF000000) {
3500 status = I40E_ERR_PARAM;
3501 goto i40e_aq_update_nvm_exit;
3502 }
3503
3504 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3505
3506 /* If this is the last command in a series, set the proper flag. */
3507 if (last_command)
3508 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3509 if (hw->mac.type == I40E_MAC_X722) {
3510 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3511 cmd->command_flags |=
3512 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3513 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3514 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3515 cmd->command_flags |=
3516 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3517 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3518 }
3519 cmd->module_pointer = module_pointer;
3520 cmd->offset = cpu_to_le32(offset);
3521 cmd->length = cpu_to_le16(length);
3522
3523 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3524 if (length > I40E_AQ_LARGE_BUF)
3525 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3526
3527 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3528
3529i40e_aq_update_nvm_exit:
3530 return status;
3531}
3532
3533/**
3534 * i40e_aq_rearrange_nvm
3535 * @hw: pointer to the hw struct
3536 * @rearrange_nvm: defines direction of rearrangement
3537 * @cmd_details: pointer to command details structure or NULL
3538 *
3539 * Rearrange NVM structure, available only for transition FW
3540 **/
3541i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3542 u8 rearrange_nvm,
3543 struct i40e_asq_cmd_details *cmd_details)
3544{
3545 struct i40e_aqc_nvm_update *cmd;
3546 i40e_status status;
3547 struct i40e_aq_desc desc;
3548
3549 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3550
3551 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3552
3553 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3554 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3555
3556 if (!rearrange_nvm) {
3557 status = I40E_ERR_PARAM;
3558 goto i40e_aq_rearrange_nvm_exit;
3559 }
3560
3561 cmd->command_flags |= rearrange_nvm;
3562 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3563
3564i40e_aq_rearrange_nvm_exit:
3565 return status;
3566}
3567
3568/**
3569 * i40e_aq_get_lldp_mib
3570 * @hw: pointer to the hw struct
3571 * @bridge_type: type of bridge requested
3572 * @mib_type: Local, Remote or both Local and Remote MIBs
3573 * @buff: pointer to a user supplied buffer to store the MIB block
3574 * @buff_size: size of the buffer (in bytes)
3575 * @local_len : length of the returned Local LLDP MIB
3576 * @remote_len: length of the returned Remote LLDP MIB
3577 * @cmd_details: pointer to command details structure or NULL
3578 *
3579 * Requests the complete LLDP MIB (entire packet).
3580 **/
3581i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3582 u8 mib_type, void *buff, u16 buff_size,
3583 u16 *local_len, u16 *remote_len,
3584 struct i40e_asq_cmd_details *cmd_details)
3585{
3586 struct i40e_aq_desc desc;
3587 struct i40e_aqc_lldp_get_mib *cmd =
3588 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3589 struct i40e_aqc_lldp_get_mib *resp =
3590 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3591 i40e_status status;
3592
3593 if (buff_size == 0 || !buff)
3594 return I40E_ERR_PARAM;
3595
3596 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3597 /* Indirect Command */
3598 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3599
3600 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3601 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3602 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3603
3604 desc.datalen = cpu_to_le16(buff_size);
3605
3606 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3607 if (buff_size > I40E_AQ_LARGE_BUF)
3608 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3609
3610 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3611 if (!status) {
3612 if (local_len != NULL)
3613 *local_len = le16_to_cpu(resp->local_len);
3614 if (remote_len != NULL)
3615 *remote_len = le16_to_cpu(resp->remote_len);
3616 }
3617
3618 return status;
3619}
3620
3621/**
3622 * i40e_aq_cfg_lldp_mib_change_event
3623 * @hw: pointer to the hw struct
3624 * @enable_update: Enable or Disable event posting
3625 * @cmd_details: pointer to command details structure or NULL
3626 *
3627 * Enable or Disable posting of an event on ARQ when LLDP MIB
3628 * associated with the interface changes
3629 **/
3630i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3631 bool enable_update,
3632 struct i40e_asq_cmd_details *cmd_details)
3633{
3634 struct i40e_aq_desc desc;
3635 struct i40e_aqc_lldp_update_mib *cmd =
3636 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3637 i40e_status status;
3638
3639 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3640
3641 if (!enable_update)
3642 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3643
3644 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3645
3646 return status;
3647}
3648
3649/**
3650 * i40e_aq_restore_lldp
3651 * @hw: pointer to the hw struct
3652 * @setting: pointer to factory setting variable or NULL
3653 * @restore: True if factory settings should be restored
3654 * @cmd_details: pointer to command details structure or NULL
3655 *
3656 * Restore LLDP Agent factory settings if @restore set to True. In other case
3657 * only returns factory setting in AQ response.
3658 **/
3659enum i40e_status_code
3660i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3661 struct i40e_asq_cmd_details *cmd_details)
3662{
3663 struct i40e_aq_desc desc;
3664 struct i40e_aqc_lldp_restore *cmd =
3665 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3666 i40e_status status;
3667
3668 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3669 i40e_debug(hw, I40E_DEBUG_ALL,
3670 "Restore LLDP not supported by current FW version.\n");
3671 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3672 }
3673
3674 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3675
3676 if (restore)
3677 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3678
3679 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3680
3681 if (setting)
3682 *setting = cmd->command & 1;
3683
3684 return status;
3685}
3686
3687/**
3688 * i40e_aq_stop_lldp
3689 * @hw: pointer to the hw struct
3690 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3691 * @persist: True if stop of LLDP should be persistent across power cycles
3692 * @cmd_details: pointer to command details structure or NULL
3693 *
3694 * Stop or Shutdown the embedded LLDP Agent
3695 **/
3696i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3697 bool persist,
3698 struct i40e_asq_cmd_details *cmd_details)
3699{
3700 struct i40e_aq_desc desc;
3701 struct i40e_aqc_lldp_stop *cmd =
3702 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3703 i40e_status status;
3704
3705 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3706
3707 if (shutdown_agent)
3708 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3709
3710 if (persist) {
3711 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3712 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3713 else
3714 i40e_debug(hw, I40E_DEBUG_ALL,
3715 "Persistent Stop LLDP not supported by current FW version.\n");
3716 }
3717
3718 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3719
3720 return status;
3721}
3722
3723/**
3724 * i40e_aq_start_lldp
3725 * @hw: pointer to the hw struct
3726 * @buff: buffer for result
3727 * @persist: True if start of LLDP should be persistent across power cycles
3728 * @buff_size: buffer size
3729 * @cmd_details: pointer to command details structure or NULL
3730 *
3731 * Start the embedded LLDP Agent on all ports.
3732 **/
3733i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3734 struct i40e_asq_cmd_details *cmd_details)
3735{
3736 struct i40e_aq_desc desc;
3737 struct i40e_aqc_lldp_start *cmd =
3738 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3739 i40e_status status;
3740
3741 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3742
3743 cmd->command = I40E_AQ_LLDP_AGENT_START;
3744
3745 if (persist) {
3746 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3747 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3748 else
3749 i40e_debug(hw, I40E_DEBUG_ALL,
3750 "Persistent Start LLDP not supported by current FW version.\n");
3751 }
3752
3753 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3754
3755 return status;
3756}
3757
3758/**
3759 * i40e_aq_set_dcb_parameters
3760 * @hw: pointer to the hw struct
3761 * @cmd_details: pointer to command details structure or NULL
3762 * @dcb_enable: True if DCB configuration needs to be applied
3763 *
3764 **/
3765enum i40e_status_code
3766i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3767 struct i40e_asq_cmd_details *cmd_details)
3768{
3769 struct i40e_aq_desc desc;
3770 struct i40e_aqc_set_dcb_parameters *cmd =
3771 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3772 i40e_status status;
3773
3774 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3775 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3776
3777 i40e_fill_default_direct_cmd_desc(&desc,
3778 i40e_aqc_opc_set_dcb_parameters);
3779
3780 if (dcb_enable) {
3781 cmd->valid_flags = I40E_DCB_VALID;
3782 cmd->command = I40E_AQ_DCB_SET_AGENT;
3783 }
3784 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3785
3786 return status;
3787}
3788
3789/**
3790 * i40e_aq_get_cee_dcb_config
3791 * @hw: pointer to the hw struct
3792 * @buff: response buffer that stores CEE operational configuration
3793 * @buff_size: size of the buffer passed
3794 * @cmd_details: pointer to command details structure or NULL
3795 *
3796 * Get CEE DCBX mode operational configuration from firmware
3797 **/
3798i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3799 void *buff, u16 buff_size,
3800 struct i40e_asq_cmd_details *cmd_details)
3801{
3802 struct i40e_aq_desc desc;
3803 i40e_status status;
3804
3805 if (buff_size == 0 || !buff)
3806 return I40E_ERR_PARAM;
3807
3808 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3809
3810 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3811 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3812 cmd_details);
3813
3814 return status;
3815}
3816
3817/**
3818 * i40e_aq_add_udp_tunnel
3819 * @hw: pointer to the hw struct
3820 * @udp_port: the UDP port to add in Host byte order
3821 * @protocol_index: protocol index type
3822 * @filter_index: pointer to filter index
3823 * @cmd_details: pointer to command details structure or NULL
3824 *
3825 * Note: Firmware expects the udp_port value to be in Little Endian format,
3826 * and this function will call cpu_to_le16 to convert from Host byte order to
3827 * Little Endian order.
3828 **/
3829i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3830 u16 udp_port, u8 protocol_index,
3831 u8 *filter_index,
3832 struct i40e_asq_cmd_details *cmd_details)
3833{
3834 struct i40e_aq_desc desc;
3835 struct i40e_aqc_add_udp_tunnel *cmd =
3836 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3837 struct i40e_aqc_del_udp_tunnel_completion *resp =
3838 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3839 i40e_status status;
3840
3841 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3842
3843 cmd->udp_port = cpu_to_le16(udp_port);
3844 cmd->protocol_type = protocol_index;
3845
3846 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3847
3848 if (!status && filter_index)
3849 *filter_index = resp->index;
3850
3851 return status;
3852}
3853
3854/**
3855 * i40e_aq_del_udp_tunnel
3856 * @hw: pointer to the hw struct
3857 * @index: filter index
3858 * @cmd_details: pointer to command details structure or NULL
3859 **/
3860i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3861 struct i40e_asq_cmd_details *cmd_details)
3862{
3863 struct i40e_aq_desc desc;
3864 struct i40e_aqc_remove_udp_tunnel *cmd =
3865 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3866 i40e_status status;
3867
3868 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3869
3870 cmd->index = index;
3871
3872 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3873
3874 return status;
3875}
3876
3877/**
3878 * i40e_aq_delete_element - Delete switch element
3879 * @hw: pointer to the hw struct
3880 * @seid: the SEID to delete from the switch
3881 * @cmd_details: pointer to command details structure or NULL
3882 *
3883 * This deletes a switch element from the switch.
3884 **/
3885i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3886 struct i40e_asq_cmd_details *cmd_details)
3887{
3888 struct i40e_aq_desc desc;
3889 struct i40e_aqc_switch_seid *cmd =
3890 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3891 i40e_status status;
3892
3893 if (seid == 0)
3894 return I40E_ERR_PARAM;
3895
3896 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3897
3898 cmd->seid = cpu_to_le16(seid);
3899
3900 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3901
3902 return status;
3903}
3904
3905/**
3906 * i40e_aq_dcb_updated - DCB Updated Command
3907 * @hw: pointer to the hw struct
3908 * @cmd_details: pointer to command details structure or NULL
3909 *
3910 * EMP will return when the shared RPB settings have been
3911 * recomputed and modified. The retval field in the descriptor
3912 * will be set to 0 when RPB is modified.
3913 **/
3914i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
3915 struct i40e_asq_cmd_details *cmd_details)
3916{
3917 struct i40e_aq_desc desc;
3918 i40e_status status;
3919
3920 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3921
3922 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3923
3924 return status;
3925}
3926
3927/**
3928 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3929 * @hw: pointer to the hw struct
3930 * @seid: seid for the physical port/switching component/vsi
3931 * @buff: Indirect buffer to hold data parameters and response
3932 * @buff_size: Indirect buffer size
3933 * @opcode: Tx scheduler AQ command opcode
3934 * @cmd_details: pointer to command details structure or NULL
3935 *
3936 * Generic command handler for Tx scheduler AQ commands
3937 **/
3938static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3939 void *buff, u16 buff_size,
3940 enum i40e_admin_queue_opc opcode,
3941 struct i40e_asq_cmd_details *cmd_details)
3942{
3943 struct i40e_aq_desc desc;
3944 struct i40e_aqc_tx_sched_ind *cmd =
3945 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3946 i40e_status status;
3947 bool cmd_param_flag = false;
3948
3949 switch (opcode) {
3950 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3951 case i40e_aqc_opc_configure_vsi_tc_bw:
3952 case i40e_aqc_opc_enable_switching_comp_ets:
3953 case i40e_aqc_opc_modify_switching_comp_ets:
3954 case i40e_aqc_opc_disable_switching_comp_ets:
3955 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3956 case i40e_aqc_opc_configure_switching_comp_bw_config:
3957 cmd_param_flag = true;
3958 break;
3959 case i40e_aqc_opc_query_vsi_bw_config:
3960 case i40e_aqc_opc_query_vsi_ets_sla_config:
3961 case i40e_aqc_opc_query_switching_comp_ets_config:
3962 case i40e_aqc_opc_query_port_ets_config:
3963 case i40e_aqc_opc_query_switching_comp_bw_config:
3964 cmd_param_flag = false;
3965 break;
3966 default:
3967 return I40E_ERR_PARAM;
3968 }
3969
3970 i40e_fill_default_direct_cmd_desc(&desc, opcode);
3971
3972 /* Indirect command */
3973 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3974 if (cmd_param_flag)
3975 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3976 if (buff_size > I40E_AQ_LARGE_BUF)
3977 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3978
3979 desc.datalen = cpu_to_le16(buff_size);
3980
3981 cmd->vsi_seid = cpu_to_le16(seid);
3982
3983 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3984
3985 return status;
3986}
3987
3988/**
3989 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3990 * @hw: pointer to the hw struct
3991 * @seid: VSI seid
3992 * @credit: BW limit credits (0 = disabled)
3993 * @max_credit: Max BW limit credits
3994 * @cmd_details: pointer to command details structure or NULL
3995 **/
3996i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3997 u16 seid, u16 credit, u8 max_credit,
3998 struct i40e_asq_cmd_details *cmd_details)
3999{
4000 struct i40e_aq_desc desc;
4001 struct i40e_aqc_configure_vsi_bw_limit *cmd =
4002 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
4003 i40e_status status;
4004
4005 i40e_fill_default_direct_cmd_desc(&desc,
4006 i40e_aqc_opc_configure_vsi_bw_limit);
4007
4008 cmd->vsi_seid = cpu_to_le16(seid);
4009 cmd->credit = cpu_to_le16(credit);
4010 cmd->max_credit = max_credit;
4011
4012 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4013
4014 return status;
4015}
4016
4017/**
4018 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
4019 * @hw: pointer to the hw struct
4020 * @seid: VSI seid
4021 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
4022 * @cmd_details: pointer to command details structure or NULL
4023 **/
4024i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
4025 u16 seid,
4026 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
4027 struct i40e_asq_cmd_details *cmd_details)
4028{
4029 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4030 i40e_aqc_opc_configure_vsi_tc_bw,
4031 cmd_details);
4032}
4033
4034/**
4035 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4036 * @hw: pointer to the hw struct
4037 * @seid: seid of the switching component connected to Physical Port
4038 * @ets_data: Buffer holding ETS parameters
4039 * @opcode: Tx scheduler AQ command opcode
4040 * @cmd_details: pointer to command details structure or NULL
4041 **/
4042i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4043 u16 seid,
4044 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4045 enum i40e_admin_queue_opc opcode,
4046 struct i40e_asq_cmd_details *cmd_details)
4047{
4048 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4049 sizeof(*ets_data), opcode, cmd_details);
4050}
4051
4052/**
4053 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4054 * @hw: pointer to the hw struct
4055 * @seid: seid of the switching component
4056 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4057 * @cmd_details: pointer to command details structure or NULL
4058 **/
4059i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4060 u16 seid,
4061 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4062 struct i40e_asq_cmd_details *cmd_details)
4063{
4064 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4065 i40e_aqc_opc_configure_switching_comp_bw_config,
4066 cmd_details);
4067}
4068
4069/**
4070 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4071 * @hw: pointer to the hw struct
4072 * @seid: seid of the VSI
4073 * @bw_data: Buffer to hold VSI BW configuration
4074 * @cmd_details: pointer to command details structure or NULL
4075 **/
4076i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4077 u16 seid,
4078 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4079 struct i40e_asq_cmd_details *cmd_details)
4080{
4081 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4082 i40e_aqc_opc_query_vsi_bw_config,
4083 cmd_details);
4084}
4085
4086/**
4087 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4088 * @hw: pointer to the hw struct
4089 * @seid: seid of the VSI
4090 * @bw_data: Buffer to hold VSI BW configuration per TC
4091 * @cmd_details: pointer to command details structure or NULL
4092 **/
4093i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4094 u16 seid,
4095 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4096 struct i40e_asq_cmd_details *cmd_details)
4097{
4098 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4099 i40e_aqc_opc_query_vsi_ets_sla_config,
4100 cmd_details);
4101}
4102
4103/**
4104 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4105 * @hw: pointer to the hw struct
4106 * @seid: seid of the switching component
4107 * @bw_data: Buffer to hold switching component's per TC BW config
4108 * @cmd_details: pointer to command details structure or NULL
4109 **/
4110i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4111 u16 seid,
4112 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4113 struct i40e_asq_cmd_details *cmd_details)
4114{
4115 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4116 i40e_aqc_opc_query_switching_comp_ets_config,
4117 cmd_details);
4118}
4119
4120/**
4121 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4122 * @hw: pointer to the hw struct
4123 * @seid: seid of the VSI or switching component connected to Physical Port
4124 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4125 * @cmd_details: pointer to command details structure or NULL
4126 **/
4127i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4128 u16 seid,
4129 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4130 struct i40e_asq_cmd_details *cmd_details)
4131{
4132 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4133 i40e_aqc_opc_query_port_ets_config,
4134 cmd_details);
4135}
4136
4137/**
4138 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4139 * @hw: pointer to the hw struct
4140 * @seid: seid of the switching component
4141 * @bw_data: Buffer to hold switching component's BW configuration
4142 * @cmd_details: pointer to command details structure or NULL
4143 **/
4144i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4145 u16 seid,
4146 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4147 struct i40e_asq_cmd_details *cmd_details)
4148{
4149 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4150 i40e_aqc_opc_query_switching_comp_bw_config,
4151 cmd_details);
4152}
4153
4154/**
4155 * i40e_validate_filter_settings
4156 * @hw: pointer to the hardware structure
4157 * @settings: Filter control settings
4158 *
4159 * Check and validate the filter control settings passed.
4160 * The function checks for the valid filter/context sizes being
4161 * passed for FCoE and PE.
4162 *
4163 * Returns 0 if the values passed are valid and within
4164 * range else returns an error.
4165 **/
4166static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4167 struct i40e_filter_control_settings *settings)
4168{
4169 u32 fcoe_cntx_size, fcoe_filt_size;
4170 u32 pe_cntx_size, pe_filt_size;
4171 u32 fcoe_fmax;
4172 u32 val;
4173
4174 /* Validate FCoE settings passed */
4175 switch (settings->fcoe_filt_num) {
4176 case I40E_HASH_FILTER_SIZE_1K:
4177 case I40E_HASH_FILTER_SIZE_2K:
4178 case I40E_HASH_FILTER_SIZE_4K:
4179 case I40E_HASH_FILTER_SIZE_8K:
4180 case I40E_HASH_FILTER_SIZE_16K:
4181 case I40E_HASH_FILTER_SIZE_32K:
4182 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4183 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4184 break;
4185 default:
4186 return I40E_ERR_PARAM;
4187 }
4188
4189 switch (settings->fcoe_cntx_num) {
4190 case I40E_DMA_CNTX_SIZE_512:
4191 case I40E_DMA_CNTX_SIZE_1K:
4192 case I40E_DMA_CNTX_SIZE_2K:
4193 case I40E_DMA_CNTX_SIZE_4K:
4194 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4195 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4196 break;
4197 default:
4198 return I40E_ERR_PARAM;
4199 }
4200
4201 /* Validate PE settings passed */
4202 switch (settings->pe_filt_num) {
4203 case I40E_HASH_FILTER_SIZE_1K:
4204 case I40E_HASH_FILTER_SIZE_2K:
4205 case I40E_HASH_FILTER_SIZE_4K:
4206 case I40E_HASH_FILTER_SIZE_8K:
4207 case I40E_HASH_FILTER_SIZE_16K:
4208 case I40E_HASH_FILTER_SIZE_32K:
4209 case I40E_HASH_FILTER_SIZE_64K:
4210 case I40E_HASH_FILTER_SIZE_128K:
4211 case I40E_HASH_FILTER_SIZE_256K:
4212 case I40E_HASH_FILTER_SIZE_512K:
4213 case I40E_HASH_FILTER_SIZE_1M:
4214 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4215 pe_filt_size <<= (u32)settings->pe_filt_num;
4216 break;
4217 default:
4218 return I40E_ERR_PARAM;
4219 }
4220
4221 switch (settings->pe_cntx_num) {
4222 case I40E_DMA_CNTX_SIZE_512:
4223 case I40E_DMA_CNTX_SIZE_1K:
4224 case I40E_DMA_CNTX_SIZE_2K:
4225 case I40E_DMA_CNTX_SIZE_4K:
4226 case I40E_DMA_CNTX_SIZE_8K:
4227 case I40E_DMA_CNTX_SIZE_16K:
4228 case I40E_DMA_CNTX_SIZE_32K:
4229 case I40E_DMA_CNTX_SIZE_64K:
4230 case I40E_DMA_CNTX_SIZE_128K:
4231 case I40E_DMA_CNTX_SIZE_256K:
4232 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4233 pe_cntx_size <<= (u32)settings->pe_cntx_num;
4234 break;
4235 default:
4236 return I40E_ERR_PARAM;
4237 }
4238
4239 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4240 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4241 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4242 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4243 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4244 return I40E_ERR_INVALID_SIZE;
4245
4246 return 0;
4247}
4248
4249/**
4250 * i40e_set_filter_control
4251 * @hw: pointer to the hardware structure
4252 * @settings: Filter control settings
4253 *
4254 * Set the Queue Filters for PE/FCoE and enable filters required
4255 * for a single PF. It is expected that these settings are programmed
4256 * at the driver initialization time.
4257 **/
4258i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4259 struct i40e_filter_control_settings *settings)
4260{
4261 i40e_status ret = 0;
4262 u32 hash_lut_size = 0;
4263 u32 val;
4264
4265 if (!settings)
4266 return I40E_ERR_PARAM;
4267
4268 /* Validate the input settings */
4269 ret = i40e_validate_filter_settings(hw, settings);
4270 if (ret)
4271 return ret;
4272
4273 /* Read the PF Queue Filter control register */
4274 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4275
4276 /* Program required PE hash buckets for the PF */
4277 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4278 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4279 I40E_PFQF_CTL_0_PEHSIZE_MASK;
4280 /* Program required PE contexts for the PF */
4281 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4282 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4283 I40E_PFQF_CTL_0_PEDSIZE_MASK;
4284
4285 /* Program required FCoE hash buckets for the PF */
4286 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4287 val |= ((u32)settings->fcoe_filt_num <<
4288 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4289 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4290 /* Program required FCoE DDP contexts for the PF */
4291 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4292 val |= ((u32)settings->fcoe_cntx_num <<
4293 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4294 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4295
4296 /* Program Hash LUT size for the PF */
4297 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4298 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4299 hash_lut_size = 1;
4300 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4301 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4302
4303 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4304 if (settings->enable_fdir)
4305 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4306 if (settings->enable_ethtype)
4307 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4308 if (settings->enable_macvlan)
4309 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4310
4311 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4312
4313 return 0;
4314}
4315
4316/**
4317 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4318 * @hw: pointer to the hw struct
4319 * @mac_addr: MAC address to use in the filter
4320 * @ethtype: Ethertype to use in the filter
4321 * @flags: Flags that needs to be applied to the filter
4322 * @vsi_seid: seid of the control VSI
4323 * @queue: VSI queue number to send the packet to
4324 * @is_add: Add control packet filter if True else remove
4325 * @stats: Structure to hold information on control filter counts
4326 * @cmd_details: pointer to command details structure or NULL
4327 *
4328 * This command will Add or Remove control packet filter for a control VSI.
4329 * In return it will update the total number of perfect filter count in
4330 * the stats member.
4331 **/
4332i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4333 u8 *mac_addr, u16 ethtype, u16 flags,
4334 u16 vsi_seid, u16 queue, bool is_add,
4335 struct i40e_control_filter_stats *stats,
4336 struct i40e_asq_cmd_details *cmd_details)
4337{
4338 struct i40e_aq_desc desc;
4339 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4340 (struct i40e_aqc_add_remove_control_packet_filter *)
4341 &desc.params.raw;
4342 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4343 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4344 &desc.params.raw;
4345 i40e_status status;
4346
4347 if (vsi_seid == 0)
4348 return I40E_ERR_PARAM;
4349
4350 if (is_add) {
4351 i40e_fill_default_direct_cmd_desc(&desc,
4352 i40e_aqc_opc_add_control_packet_filter);
4353 cmd->queue = cpu_to_le16(queue);
4354 } else {
4355 i40e_fill_default_direct_cmd_desc(&desc,
4356 i40e_aqc_opc_remove_control_packet_filter);
4357 }
4358
4359 if (mac_addr)
4360 ether_addr_copy(cmd->mac, mac_addr);
4361
4362 cmd->etype = cpu_to_le16(ethtype);
4363 cmd->flags = cpu_to_le16(flags);
4364 cmd->seid = cpu_to_le16(vsi_seid);
4365
4366 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4367
4368 if (!status && stats) {
4369 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4370 stats->etype_used = le16_to_cpu(resp->etype_used);
4371 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4372 stats->etype_free = le16_to_cpu(resp->etype_free);
4373 }
4374
4375 return status;
4376}
4377
4378/**
4379 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4380 * @hw: pointer to the hw struct
4381 * @seid: VSI seid to add ethertype filter from
4382 **/
4383void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4384 u16 seid)
4385{
4386#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4387 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4388 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4389 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4390 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4391 i40e_status status;
4392
4393 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4394 seid, 0, true, NULL,
4395 NULL);
4396 if (status)
4397 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4398}
4399
4400/**
4401 * i40e_aq_alternate_read
4402 * @hw: pointer to the hardware structure
4403 * @reg_addr0: address of first dword to be read
4404 * @reg_val0: pointer for data read from 'reg_addr0'
4405 * @reg_addr1: address of second dword to be read
4406 * @reg_val1: pointer for data read from 'reg_addr1'
4407 *
4408 * Read one or two dwords from alternate structure. Fields are indicated
4409 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4410 * is not passed then only register at 'reg_addr0' is read.
4411 *
4412 **/
4413static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4414 u32 reg_addr0, u32 *reg_val0,
4415 u32 reg_addr1, u32 *reg_val1)
4416{
4417 struct i40e_aq_desc desc;
4418 struct i40e_aqc_alternate_write *cmd_resp =
4419 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4420 i40e_status status;
4421
4422 if (!reg_val0)
4423 return I40E_ERR_PARAM;
4424
4425 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4426 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4427 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4428
4429 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4430
4431 if (!status) {
4432 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4433
4434 if (reg_val1)
4435 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4436 }
4437
4438 return status;
4439}
4440
4441/**
4442 * i40e_aq_resume_port_tx
4443 * @hw: pointer to the hardware structure
4444 * @cmd_details: pointer to command details structure or NULL
4445 *
4446 * Resume port's Tx traffic
4447 **/
4448i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4449 struct i40e_asq_cmd_details *cmd_details)
4450{
4451 struct i40e_aq_desc desc;
4452 i40e_status status;
4453
4454 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4455
4456 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4457
4458 return status;
4459}
4460
4461/**
4462 * i40e_set_pci_config_data - store PCI bus info
4463 * @hw: pointer to hardware structure
4464 * @link_status: the link status word from PCI config space
4465 *
4466 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4467 **/
4468void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4469{
4470 hw->bus.type = i40e_bus_type_pci_express;
4471
4472 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4473 case PCI_EXP_LNKSTA_NLW_X1:
4474 hw->bus.width = i40e_bus_width_pcie_x1;
4475 break;
4476 case PCI_EXP_LNKSTA_NLW_X2:
4477 hw->bus.width = i40e_bus_width_pcie_x2;
4478 break;
4479 case PCI_EXP_LNKSTA_NLW_X4:
4480 hw->bus.width = i40e_bus_width_pcie_x4;
4481 break;
4482 case PCI_EXP_LNKSTA_NLW_X8:
4483 hw->bus.width = i40e_bus_width_pcie_x8;
4484 break;
4485 default:
4486 hw->bus.width = i40e_bus_width_unknown;
4487 break;
4488 }
4489
4490 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4491 case PCI_EXP_LNKSTA_CLS_2_5GB:
4492 hw->bus.speed = i40e_bus_speed_2500;
4493 break;
4494 case PCI_EXP_LNKSTA_CLS_5_0GB:
4495 hw->bus.speed = i40e_bus_speed_5000;
4496 break;
4497 case PCI_EXP_LNKSTA_CLS_8_0GB:
4498 hw->bus.speed = i40e_bus_speed_8000;
4499 break;
4500 default:
4501 hw->bus.speed = i40e_bus_speed_unknown;
4502 break;
4503 }
4504}
4505
4506/**
4507 * i40e_aq_debug_dump
4508 * @hw: pointer to the hardware structure
4509 * @cluster_id: specific cluster to dump
4510 * @table_id: table id within cluster
4511 * @start_index: index of line in the block to read
4512 * @buff_size: dump buffer size
4513 * @buff: dump buffer
4514 * @ret_buff_size: actual buffer size returned
4515 * @ret_next_table: next block to read
4516 * @ret_next_index: next index to read
4517 * @cmd_details: pointer to command details structure or NULL
4518 *
4519 * Dump internal FW/HW data for debug purposes.
4520 *
4521 **/
4522i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4523 u8 table_id, u32 start_index, u16 buff_size,
4524 void *buff, u16 *ret_buff_size,
4525 u8 *ret_next_table, u32 *ret_next_index,
4526 struct i40e_asq_cmd_details *cmd_details)
4527{
4528 struct i40e_aq_desc desc;
4529 struct i40e_aqc_debug_dump_internals *cmd =
4530 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4531 struct i40e_aqc_debug_dump_internals *resp =
4532 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4533 i40e_status status;
4534
4535 if (buff_size == 0 || !buff)
4536 return I40E_ERR_PARAM;
4537
4538 i40e_fill_default_direct_cmd_desc(&desc,
4539 i40e_aqc_opc_debug_dump_internals);
4540 /* Indirect Command */
4541 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4542 if (buff_size > I40E_AQ_LARGE_BUF)
4543 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4544
4545 cmd->cluster_id = cluster_id;
4546 cmd->table_id = table_id;
4547 cmd->idx = cpu_to_le32(start_index);
4548
4549 desc.datalen = cpu_to_le16(buff_size);
4550
4551 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4552 if (!status) {
4553 if (ret_buff_size)
4554 *ret_buff_size = le16_to_cpu(desc.datalen);
4555 if (ret_next_table)
4556 *ret_next_table = resp->table_id;
4557 if (ret_next_index)
4558 *ret_next_index = le32_to_cpu(resp->idx);
4559 }
4560
4561 return status;
4562}
4563
4564/**
4565 * i40e_read_bw_from_alt_ram
4566 * @hw: pointer to the hardware structure
4567 * @max_bw: pointer for max_bw read
4568 * @min_bw: pointer for min_bw read
4569 * @min_valid: pointer for bool that is true if min_bw is a valid value
4570 * @max_valid: pointer for bool that is true if max_bw is a valid value
4571 *
4572 * Read bw from the alternate ram for the given pf
4573 **/
4574i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4575 u32 *max_bw, u32 *min_bw,
4576 bool *min_valid, bool *max_valid)
4577{
4578 i40e_status status;
4579 u32 max_bw_addr, min_bw_addr;
4580
4581 /* Calculate the address of the min/max bw registers */
4582 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4583 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4584 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4585 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4586 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4587 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4588
4589 /* Read the bandwidths from alt ram */
4590 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4591 min_bw_addr, min_bw);
4592
4593 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4594 *min_valid = true;
4595 else
4596 *min_valid = false;
4597
4598 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4599 *max_valid = true;
4600 else
4601 *max_valid = false;
4602
4603 return status;
4604}
4605
4606/**
4607 * i40e_aq_configure_partition_bw
4608 * @hw: pointer to the hardware structure
4609 * @bw_data: Buffer holding valid pfs and bw limits
4610 * @cmd_details: pointer to command details
4611 *
4612 * Configure partitions guaranteed/max bw
4613 **/
4614i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4615 struct i40e_aqc_configure_partition_bw_data *bw_data,
4616 struct i40e_asq_cmd_details *cmd_details)
4617{
4618 i40e_status status;
4619 struct i40e_aq_desc desc;
4620 u16 bwd_size = sizeof(*bw_data);
4621
4622 i40e_fill_default_direct_cmd_desc(&desc,
4623 i40e_aqc_opc_configure_partition_bw);
4624
4625 /* Indirect command */
4626 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4627 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4628
4629 if (bwd_size > I40E_AQ_LARGE_BUF)
4630 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4631
4632 desc.datalen = cpu_to_le16(bwd_size);
4633
4634 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4635 cmd_details);
4636
4637 return status;
4638}
4639
4640/**
4641 * i40e_read_phy_register_clause22
4642 * @hw: pointer to the HW structure
4643 * @reg: register address in the page
4644 * @phy_addr: PHY address on MDIO interface
4645 * @value: PHY register value
4646 *
4647 * Reads specified PHY register value
4648 **/
4649i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4650 u16 reg, u8 phy_addr, u16 *value)
4651{
4652 i40e_status status = I40E_ERR_TIMEOUT;
4653 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4654 u32 command = 0;
4655 u16 retry = 1000;
4656
4657 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4658 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4659 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4660 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4661 (I40E_GLGEN_MSCA_MDICMD_MASK);
4662 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4663 do {
4664 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4665 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4666 status = 0;
4667 break;
4668 }
4669 udelay(10);
4670 retry--;
4671 } while (retry);
4672
4673 if (status) {
4674 i40e_debug(hw, I40E_DEBUG_PHY,
4675 "PHY: Can't write command to external PHY.\n");
4676 } else {
4677 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4678 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4679 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4680 }
4681
4682 return status;
4683}
4684
4685/**
4686 * i40e_write_phy_register_clause22
4687 * @hw: pointer to the HW structure
4688 * @reg: register address in the page
4689 * @phy_addr: PHY address on MDIO interface
4690 * @value: PHY register value
4691 *
4692 * Writes specified PHY register value
4693 **/
4694i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4695 u16 reg, u8 phy_addr, u16 value)
4696{
4697 i40e_status status = I40E_ERR_TIMEOUT;
4698 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4699 u32 command = 0;
4700 u16 retry = 1000;
4701
4702 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4703 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4704
4705 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4706 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4707 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4708 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4709 (I40E_GLGEN_MSCA_MDICMD_MASK);
4710
4711 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4712 do {
4713 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4714 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4715 status = 0;
4716 break;
4717 }
4718 udelay(10);
4719 retry--;
4720 } while (retry);
4721
4722 return status;
4723}
4724
4725/**
4726 * i40e_read_phy_register_clause45
4727 * @hw: pointer to the HW structure
4728 * @page: registers page number
4729 * @reg: register address in the page
4730 * @phy_addr: PHY address on MDIO interface
4731 * @value: PHY register value
4732 *
4733 * Reads specified PHY register value
4734 **/
4735i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4736 u8 page, u16 reg, u8 phy_addr, u16 *value)
4737{
4738 i40e_status status = I40E_ERR_TIMEOUT;
4739 u32 command = 0;
4740 u16 retry = 1000;
4741 u8 port_num = hw->func_caps.mdio_port_num;
4742
4743 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4744 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4745 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4746 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4747 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4748 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4749 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4750 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4751 do {
4752 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4753 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4754 status = 0;
4755 break;
4756 }
4757 usleep_range(10, 20);
4758 retry--;
4759 } while (retry);
4760
4761 if (status) {
4762 i40e_debug(hw, I40E_DEBUG_PHY,
4763 "PHY: Can't write command to external PHY.\n");
4764 goto phy_read_end;
4765 }
4766
4767 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4768 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4769 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4770 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4771 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4772 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4773 status = I40E_ERR_TIMEOUT;
4774 retry = 1000;
4775 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4776 do {
4777 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4778 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4779 status = 0;
4780 break;
4781 }
4782 usleep_range(10, 20);
4783 retry--;
4784 } while (retry);
4785
4786 if (!status) {
4787 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4788 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4789 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4790 } else {
4791 i40e_debug(hw, I40E_DEBUG_PHY,
4792 "PHY: Can't read register value from external PHY.\n");
4793 }
4794
4795phy_read_end:
4796 return status;
4797}
4798
4799/**
4800 * i40e_write_phy_register_clause45
4801 * @hw: pointer to the HW structure
4802 * @page: registers page number
4803 * @reg: register address in the page
4804 * @phy_addr: PHY address on MDIO interface
4805 * @value: PHY register value
4806 *
4807 * Writes value to specified PHY register
4808 **/
4809i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4810 u8 page, u16 reg, u8 phy_addr, u16 value)
4811{
4812 i40e_status status = I40E_ERR_TIMEOUT;
4813 u32 command = 0;
4814 u16 retry = 1000;
4815 u8 port_num = hw->func_caps.mdio_port_num;
4816
4817 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4818 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4819 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4820 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4821 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4822 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4823 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4824 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4825 do {
4826 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4827 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4828 status = 0;
4829 break;
4830 }
4831 usleep_range(10, 20);
4832 retry--;
4833 } while (retry);
4834 if (status) {
4835 i40e_debug(hw, I40E_DEBUG_PHY,
4836 "PHY: Can't write command to external PHY.\n");
4837 goto phy_write_end;
4838 }
4839
4840 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4841 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4842
4843 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4844 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4845 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4846 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4847 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4848 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4849 status = I40E_ERR_TIMEOUT;
4850 retry = 1000;
4851 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4852 do {
4853 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4854 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4855 status = 0;
4856 break;
4857 }
4858 usleep_range(10, 20);
4859 retry--;
4860 } while (retry);
4861
4862phy_write_end:
4863 return status;
4864}
4865
4866/**
4867 * i40e_write_phy_register
4868 * @hw: pointer to the HW structure
4869 * @page: registers page number
4870 * @reg: register address in the page
4871 * @phy_addr: PHY address on MDIO interface
4872 * @value: PHY register value
4873 *
4874 * Writes value to specified PHY register
4875 **/
4876i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4877 u8 page, u16 reg, u8 phy_addr, u16 value)
4878{
4879 i40e_status status;
4880
4881 switch (hw->device_id) {
4882 case I40E_DEV_ID_1G_BASE_T_X722:
4883 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4884 value);
4885 break;
4886 case I40E_DEV_ID_10G_BASE_T:
4887 case I40E_DEV_ID_10G_BASE_T4:
4888 case I40E_DEV_ID_10G_BASE_T_X722:
4889 case I40E_DEV_ID_25G_B:
4890 case I40E_DEV_ID_25G_SFP28:
4891 status = i40e_write_phy_register_clause45(hw, page, reg,
4892 phy_addr, value);
4893 break;
4894 default:
4895 status = I40E_ERR_UNKNOWN_PHY;
4896 break;
4897 }
4898
4899 return status;
4900}
4901
4902/**
4903 * i40e_read_phy_register
4904 * @hw: pointer to the HW structure
4905 * @page: registers page number
4906 * @reg: register address in the page
4907 * @phy_addr: PHY address on MDIO interface
4908 * @value: PHY register value
4909 *
4910 * Reads specified PHY register value
4911 **/
4912i40e_status i40e_read_phy_register(struct i40e_hw *hw,
4913 u8 page, u16 reg, u8 phy_addr, u16 *value)
4914{
4915 i40e_status status;
4916
4917 switch (hw->device_id) {
4918 case I40E_DEV_ID_1G_BASE_T_X722:
4919 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4920 value);
4921 break;
4922 case I40E_DEV_ID_10G_BASE_T:
4923 case I40E_DEV_ID_10G_BASE_T4:
4924 case I40E_DEV_ID_10G_BASE_T_BC:
4925 case I40E_DEV_ID_10G_BASE_T_X722:
4926 case I40E_DEV_ID_25G_B:
4927 case I40E_DEV_ID_25G_SFP28:
4928 status = i40e_read_phy_register_clause45(hw, page, reg,
4929 phy_addr, value);
4930 break;
4931 default:
4932 status = I40E_ERR_UNKNOWN_PHY;
4933 break;
4934 }
4935
4936 return status;
4937}
4938
4939/**
4940 * i40e_get_phy_address
4941 * @hw: pointer to the HW structure
4942 * @dev_num: PHY port num that address we want
4943 *
4944 * Gets PHY address for current port
4945 **/
4946u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4947{
4948 u8 port_num = hw->func_caps.mdio_port_num;
4949 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4950
4951 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4952}
4953
4954/**
4955 * i40e_blink_phy_led
4956 * @hw: pointer to the HW structure
4957 * @time: time how long led will blinks in secs
4958 * @interval: gap between LED on and off in msecs
4959 *
4960 * Blinks PHY link LED
4961 **/
4962i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
4963 u32 time, u32 interval)
4964{
4965 i40e_status status = 0;
4966 u32 i;
4967 u16 led_ctl;
4968 u16 gpio_led_port;
4969 u16 led_reg;
4970 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4971 u8 phy_addr = 0;
4972 u8 port_num;
4973
4974 i = rd32(hw, I40E_PFGEN_PORTNUM);
4975 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4976 phy_addr = i40e_get_phy_address(hw, port_num);
4977
4978 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4979 led_addr++) {
4980 status = i40e_read_phy_register_clause45(hw,
4981 I40E_PHY_COM_REG_PAGE,
4982 led_addr, phy_addr,
4983 &led_reg);
4984 if (status)
4985 goto phy_blinking_end;
4986 led_ctl = led_reg;
4987 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4988 led_reg = 0;
4989 status = i40e_write_phy_register_clause45(hw,
4990 I40E_PHY_COM_REG_PAGE,
4991 led_addr, phy_addr,
4992 led_reg);
4993 if (status)
4994 goto phy_blinking_end;
4995 break;
4996 }
4997 }
4998
4999 if (time > 0 && interval > 0) {
5000 for (i = 0; i < time * 1000; i += interval) {
5001 status = i40e_read_phy_register_clause45(hw,
5002 I40E_PHY_COM_REG_PAGE,
5003 led_addr, phy_addr, &led_reg);
5004 if (status)
5005 goto restore_config;
5006 if (led_reg & I40E_PHY_LED_MANUAL_ON)
5007 led_reg = 0;
5008 else
5009 led_reg = I40E_PHY_LED_MANUAL_ON;
5010 status = i40e_write_phy_register_clause45(hw,
5011 I40E_PHY_COM_REG_PAGE,
5012 led_addr, phy_addr, led_reg);
5013 if (status)
5014 goto restore_config;
5015 msleep(interval);
5016 }
5017 }
5018
5019restore_config:
5020 status = i40e_write_phy_register_clause45(hw,
5021 I40E_PHY_COM_REG_PAGE,
5022 led_addr, phy_addr, led_ctl);
5023
5024phy_blinking_end:
5025 return status;
5026}
5027
5028/**
5029 * i40e_led_get_reg - read LED register
5030 * @hw: pointer to the HW structure
5031 * @led_addr: LED register address
5032 * @reg_val: read register value
5033 **/
5034static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5035 u32 *reg_val)
5036{
5037 enum i40e_status_code status;
5038 u8 phy_addr = 0;
5039 u8 port_num;
5040 u32 i;
5041
5042 *reg_val = 0;
5043 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5044 status =
5045 i40e_aq_get_phy_register(hw,
5046 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5047 I40E_PHY_COM_REG_PAGE,
5048 I40E_PHY_LED_PROV_REG_1,
5049 reg_val, NULL);
5050 } else {
5051 i = rd32(hw, I40E_PFGEN_PORTNUM);
5052 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5053 phy_addr = i40e_get_phy_address(hw, port_num);
5054 status = i40e_read_phy_register_clause45(hw,
5055 I40E_PHY_COM_REG_PAGE,
5056 led_addr, phy_addr,
5057 (u16 *)reg_val);
5058 }
5059 return status;
5060}
5061
5062/**
5063 * i40e_led_set_reg - write LED register
5064 * @hw: pointer to the HW structure
5065 * @led_addr: LED register address
5066 * @reg_val: register value to write
5067 **/
5068static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5069 u32 reg_val)
5070{
5071 enum i40e_status_code status;
5072 u8 phy_addr = 0;
5073 u8 port_num;
5074 u32 i;
5075
5076 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5077 status =
5078 i40e_aq_set_phy_register(hw,
5079 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5080 I40E_PHY_COM_REG_PAGE,
5081 I40E_PHY_LED_PROV_REG_1,
5082 reg_val, NULL);
5083 } else {
5084 i = rd32(hw, I40E_PFGEN_PORTNUM);
5085 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5086 phy_addr = i40e_get_phy_address(hw, port_num);
5087 status = i40e_write_phy_register_clause45(hw,
5088 I40E_PHY_COM_REG_PAGE,
5089 led_addr, phy_addr,
5090 (u16)reg_val);
5091 }
5092
5093 return status;
5094}
5095
5096/**
5097 * i40e_led_get_phy - return current on/off mode
5098 * @hw: pointer to the hw struct
5099 * @led_addr: address of led register to use
5100 * @val: original value of register to use
5101 *
5102 **/
5103i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5104 u16 *val)
5105{
5106 i40e_status status = 0;
5107 u16 gpio_led_port;
5108 u8 phy_addr = 0;
5109 u16 reg_val;
5110 u16 temp_addr;
5111 u8 port_num;
5112 u32 i;
5113 u32 reg_val_aq;
5114
5115 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5116 status =
5117 i40e_aq_get_phy_register(hw,
5118 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5119 I40E_PHY_COM_REG_PAGE,
5120 I40E_PHY_LED_PROV_REG_1,
5121 ®_val_aq, NULL);
5122 if (status == I40E_SUCCESS)
5123 *val = (u16)reg_val_aq;
5124 return status;
5125 }
5126 temp_addr = I40E_PHY_LED_PROV_REG_1;
5127 i = rd32(hw, I40E_PFGEN_PORTNUM);
5128 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5129 phy_addr = i40e_get_phy_address(hw, port_num);
5130
5131 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5132 temp_addr++) {
5133 status = i40e_read_phy_register_clause45(hw,
5134 I40E_PHY_COM_REG_PAGE,
5135 temp_addr, phy_addr,
5136 ®_val);
5137 if (status)
5138 return status;
5139 *val = reg_val;
5140 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5141 *led_addr = temp_addr;
5142 break;
5143 }
5144 }
5145 return status;
5146}
5147
5148/**
5149 * i40e_led_set_phy
5150 * @hw: pointer to the HW structure
5151 * @on: true or false
5152 * @led_addr: address of led register to use
5153 * @mode: original val plus bit for set or ignore
5154 *
5155 * Set led's on or off when controlled by the PHY
5156 *
5157 **/
5158i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5159 u16 led_addr, u32 mode)
5160{
5161 i40e_status status = 0;
5162 u32 led_ctl = 0;
5163 u32 led_reg = 0;
5164
5165 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5166 if (status)
5167 return status;
5168 led_ctl = led_reg;
5169 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5170 led_reg = 0;
5171 status = i40e_led_set_reg(hw, led_addr, led_reg);
5172 if (status)
5173 return status;
5174 }
5175 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5176 if (status)
5177 goto restore_config;
5178 if (on)
5179 led_reg = I40E_PHY_LED_MANUAL_ON;
5180 else
5181 led_reg = 0;
5182
5183 status = i40e_led_set_reg(hw, led_addr, led_reg);
5184 if (status)
5185 goto restore_config;
5186 if (mode & I40E_PHY_LED_MODE_ORIG) {
5187 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5188 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5189 }
5190 return status;
5191
5192restore_config:
5193 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5194 return status;
5195}
5196
5197/**
5198 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5199 * @hw: pointer to the hw struct
5200 * @reg_addr: register address
5201 * @reg_val: ptr to register value
5202 * @cmd_details: pointer to command details structure or NULL
5203 *
5204 * Use the firmware to read the Rx control register,
5205 * especially useful if the Rx unit is under heavy pressure
5206 **/
5207i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5208 u32 reg_addr, u32 *reg_val,
5209 struct i40e_asq_cmd_details *cmd_details)
5210{
5211 struct i40e_aq_desc desc;
5212 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5213 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5214 i40e_status status;
5215
5216 if (!reg_val)
5217 return I40E_ERR_PARAM;
5218
5219 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5220
5221 cmd_resp->address = cpu_to_le32(reg_addr);
5222
5223 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5224
5225 if (status == 0)
5226 *reg_val = le32_to_cpu(cmd_resp->value);
5227
5228 return status;
5229}
5230
5231/**
5232 * i40e_read_rx_ctl - read from an Rx control register
5233 * @hw: pointer to the hw struct
5234 * @reg_addr: register address
5235 **/
5236u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5237{
5238 i40e_status status = 0;
5239 bool use_register;
5240 int retry = 5;
5241 u32 val = 0;
5242
5243 use_register = (((hw->aq.api_maj_ver == 1) &&
5244 (hw->aq.api_min_ver < 5)) ||
5245 (hw->mac.type == I40E_MAC_X722));
5246 if (!use_register) {
5247do_retry:
5248 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5249 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5250 usleep_range(1000, 2000);
5251 retry--;
5252 goto do_retry;
5253 }
5254 }
5255
5256 /* if the AQ access failed, try the old-fashioned way */
5257 if (status || use_register)
5258 val = rd32(hw, reg_addr);
5259
5260 return val;
5261}
5262
5263/**
5264 * i40e_aq_rx_ctl_write_register
5265 * @hw: pointer to the hw struct
5266 * @reg_addr: register address
5267 * @reg_val: register value
5268 * @cmd_details: pointer to command details structure or NULL
5269 *
5270 * Use the firmware to write to an Rx control register,
5271 * especially useful if the Rx unit is under heavy pressure
5272 **/
5273i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5274 u32 reg_addr, u32 reg_val,
5275 struct i40e_asq_cmd_details *cmd_details)
5276{
5277 struct i40e_aq_desc desc;
5278 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5279 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5280 i40e_status status;
5281
5282 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5283
5284 cmd->address = cpu_to_le32(reg_addr);
5285 cmd->value = cpu_to_le32(reg_val);
5286
5287 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5288
5289 return status;
5290}
5291
5292/**
5293 * i40e_write_rx_ctl - write to an Rx control register
5294 * @hw: pointer to the hw struct
5295 * @reg_addr: register address
5296 * @reg_val: register value
5297 **/
5298void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5299{
5300 i40e_status status = 0;
5301 bool use_register;
5302 int retry = 5;
5303
5304 use_register = (((hw->aq.api_maj_ver == 1) &&
5305 (hw->aq.api_min_ver < 5)) ||
5306 (hw->mac.type == I40E_MAC_X722));
5307 if (!use_register) {
5308do_retry:
5309 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5310 reg_val, NULL);
5311 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5312 usleep_range(1000, 2000);
5313 retry--;
5314 goto do_retry;
5315 }
5316 }
5317
5318 /* if the AQ access failed, try the old-fashioned way */
5319 if (status || use_register)
5320 wr32(hw, reg_addr, reg_val);
5321}
5322
5323/**
5324 * i40e_aq_set_phy_register
5325 * @hw: pointer to the hw struct
5326 * @phy_select: select which phy should be accessed
5327 * @dev_addr: PHY device address
5328 * @reg_addr: PHY register address
5329 * @reg_val: new register value
5330 * @cmd_details: pointer to command details structure or NULL
5331 *
5332 * Write the external PHY register.
5333 **/
5334i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
5335 u8 phy_select, u8 dev_addr,
5336 u32 reg_addr, u32 reg_val,
5337 struct i40e_asq_cmd_details *cmd_details)
5338{
5339 struct i40e_aq_desc desc;
5340 struct i40e_aqc_phy_register_access *cmd =
5341 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5342 i40e_status status;
5343
5344 i40e_fill_default_direct_cmd_desc(&desc,
5345 i40e_aqc_opc_set_phy_register);
5346
5347 cmd->phy_interface = phy_select;
5348 cmd->dev_address = dev_addr;
5349 cmd->reg_address = cpu_to_le32(reg_addr);
5350 cmd->reg_value = cpu_to_le32(reg_val);
5351
5352 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5353
5354 return status;
5355}
5356
5357/**
5358 * i40e_aq_get_phy_register
5359 * @hw: pointer to the hw struct
5360 * @phy_select: select which phy should be accessed
5361 * @dev_addr: PHY device address
5362 * @reg_addr: PHY register address
5363 * @reg_val: read register value
5364 * @cmd_details: pointer to command details structure or NULL
5365 *
5366 * Read the external PHY register.
5367 **/
5368i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
5369 u8 phy_select, u8 dev_addr,
5370 u32 reg_addr, u32 *reg_val,
5371 struct i40e_asq_cmd_details *cmd_details)
5372{
5373 struct i40e_aq_desc desc;
5374 struct i40e_aqc_phy_register_access *cmd =
5375 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5376 i40e_status status;
5377
5378 i40e_fill_default_direct_cmd_desc(&desc,
5379 i40e_aqc_opc_get_phy_register);
5380
5381 cmd->phy_interface = phy_select;
5382 cmd->dev_address = dev_addr;
5383 cmd->reg_address = cpu_to_le32(reg_addr);
5384
5385 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5386 if (!status)
5387 *reg_val = le32_to_cpu(cmd->reg_value);
5388
5389 return status;
5390}
5391
5392/**
5393 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5394 * @hw: pointer to the hw struct
5395 * @buff: command buffer (size in bytes = buff_size)
5396 * @buff_size: buffer size in bytes
5397 * @track_id: package tracking id
5398 * @error_offset: returns error offset
5399 * @error_info: returns error information
5400 * @cmd_details: pointer to command details structure or NULL
5401 **/
5402enum
5403i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5404 u16 buff_size, u32 track_id,
5405 u32 *error_offset, u32 *error_info,
5406 struct i40e_asq_cmd_details *cmd_details)
5407{
5408 struct i40e_aq_desc desc;
5409 struct i40e_aqc_write_personalization_profile *cmd =
5410 (struct i40e_aqc_write_personalization_profile *)
5411 &desc.params.raw;
5412 struct i40e_aqc_write_ddp_resp *resp;
5413 i40e_status status;
5414
5415 i40e_fill_default_direct_cmd_desc(&desc,
5416 i40e_aqc_opc_write_personalization_profile);
5417
5418 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5419 if (buff_size > I40E_AQ_LARGE_BUF)
5420 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5421
5422 desc.datalen = cpu_to_le16(buff_size);
5423
5424 cmd->profile_track_id = cpu_to_le32(track_id);
5425
5426 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5427 if (!status) {
5428 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5429 if (error_offset)
5430 *error_offset = le32_to_cpu(resp->error_offset);
5431 if (error_info)
5432 *error_info = le32_to_cpu(resp->error_info);
5433 }
5434
5435 return status;
5436}
5437
5438/**
5439 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5440 * @hw: pointer to the hw struct
5441 * @buff: command buffer (size in bytes = buff_size)
5442 * @buff_size: buffer size in bytes
5443 * @flags: AdminQ command flags
5444 * @cmd_details: pointer to command details structure or NULL
5445 **/
5446enum
5447i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5448 u16 buff_size, u8 flags,
5449 struct i40e_asq_cmd_details *cmd_details)
5450{
5451 struct i40e_aq_desc desc;
5452 struct i40e_aqc_get_applied_profiles *cmd =
5453 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5454 i40e_status status;
5455
5456 i40e_fill_default_direct_cmd_desc(&desc,
5457 i40e_aqc_opc_get_personalization_profile_list);
5458
5459 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5460 if (buff_size > I40E_AQ_LARGE_BUF)
5461 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5462 desc.datalen = cpu_to_le16(buff_size);
5463
5464 cmd->flags = flags;
5465
5466 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5467
5468 return status;
5469}
5470
5471/**
5472 * i40e_find_segment_in_package
5473 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5474 * @pkg_hdr: pointer to the package header to be searched
5475 *
5476 * This function searches a package file for a particular segment type. On
5477 * success it returns a pointer to the segment header, otherwise it will
5478 * return NULL.
5479 **/
5480struct i40e_generic_seg_header *
5481i40e_find_segment_in_package(u32 segment_type,
5482 struct i40e_package_header *pkg_hdr)
5483{
5484 struct i40e_generic_seg_header *segment;
5485 u32 i;
5486
5487 /* Search all package segments for the requested segment type */
5488 for (i = 0; i < pkg_hdr->segment_count; i++) {
5489 segment =
5490 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5491 pkg_hdr->segment_offset[i]);
5492
5493 if (segment->type == segment_type)
5494 return segment;
5495 }
5496
5497 return NULL;
5498}
5499
5500/* Get section table in profile */
5501#define I40E_SECTION_TABLE(profile, sec_tbl) \
5502 do { \
5503 struct i40e_profile_segment *p = (profile); \
5504 u32 count; \
5505 u32 *nvm; \
5506 count = p->device_table_count; \
5507 nvm = (u32 *)&p->device_table[count]; \
5508 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5509 } while (0)
5510
5511/* Get section header in profile */
5512#define I40E_SECTION_HEADER(profile, offset) \
5513 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5514
5515/**
5516 * i40e_find_section_in_profile
5517 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5518 * @profile: pointer to the i40e segment header to be searched
5519 *
5520 * This function searches i40e segment for a particular section type. On
5521 * success it returns a pointer to the section header, otherwise it will
5522 * return NULL.
5523 **/
5524struct i40e_profile_section_header *
5525i40e_find_section_in_profile(u32 section_type,
5526 struct i40e_profile_segment *profile)
5527{
5528 struct i40e_profile_section_header *sec;
5529 struct i40e_section_table *sec_tbl;
5530 u32 sec_off;
5531 u32 i;
5532
5533 if (profile->header.type != SEGMENT_TYPE_I40E)
5534 return NULL;
5535
5536 I40E_SECTION_TABLE(profile, sec_tbl);
5537
5538 for (i = 0; i < sec_tbl->section_count; i++) {
5539 sec_off = sec_tbl->section_offset[i];
5540 sec = I40E_SECTION_HEADER(profile, sec_off);
5541 if (sec->section.type == section_type)
5542 return sec;
5543 }
5544
5545 return NULL;
5546}
5547
5548/**
5549 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5550 * @hw: pointer to the hw struct
5551 * @aq: command buffer containing all data to execute AQ
5552 **/
5553static enum
5554i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5555 struct i40e_profile_aq_section *aq)
5556{
5557 i40e_status status;
5558 struct i40e_aq_desc desc;
5559 u8 *msg = NULL;
5560 u16 msglen;
5561
5562 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5563 desc.flags |= cpu_to_le16(aq->flags);
5564 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5565
5566 msglen = aq->datalen;
5567 if (msglen) {
5568 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5569 I40E_AQ_FLAG_RD));
5570 if (msglen > I40E_AQ_LARGE_BUF)
5571 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5572 desc.datalen = cpu_to_le16(msglen);
5573 msg = &aq->data[0];
5574 }
5575
5576 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5577
5578 if (status) {
5579 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5580 "unable to exec DDP AQ opcode %u, error %d\n",
5581 aq->opcode, status);
5582 return status;
5583 }
5584
5585 /* copy returned desc to aq_buf */
5586 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5587
5588 return 0;
5589}
5590
5591/**
5592 * i40e_validate_profile
5593 * @hw: pointer to the hardware structure
5594 * @profile: pointer to the profile segment of the package to be validated
5595 * @track_id: package tracking id
5596 * @rollback: flag if the profile is for rollback.
5597 *
5598 * Validates supported devices and profile's sections.
5599 */
5600static enum i40e_status_code
5601i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5602 u32 track_id, bool rollback)
5603{
5604 struct i40e_profile_section_header *sec = NULL;
5605 i40e_status status = 0;
5606 struct i40e_section_table *sec_tbl;
5607 u32 vendor_dev_id;
5608 u32 dev_cnt;
5609 u32 sec_off;
5610 u32 i;
5611
5612 if (track_id == I40E_DDP_TRACKID_INVALID) {
5613 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5614 return I40E_NOT_SUPPORTED;
5615 }
5616
5617 dev_cnt = profile->device_table_count;
5618 for (i = 0; i < dev_cnt; i++) {
5619 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5620 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5621 hw->device_id == (vendor_dev_id & 0xFFFF))
5622 break;
5623 }
5624 if (dev_cnt && i == dev_cnt) {
5625 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5626 "Device doesn't support DDP\n");
5627 return I40E_ERR_DEVICE_NOT_SUPPORTED;
5628 }
5629
5630 I40E_SECTION_TABLE(profile, sec_tbl);
5631
5632 /* Validate sections types */
5633 for (i = 0; i < sec_tbl->section_count; i++) {
5634 sec_off = sec_tbl->section_offset[i];
5635 sec = I40E_SECTION_HEADER(profile, sec_off);
5636 if (rollback) {
5637 if (sec->section.type == SECTION_TYPE_MMIO ||
5638 sec->section.type == SECTION_TYPE_AQ ||
5639 sec->section.type == SECTION_TYPE_RB_AQ) {
5640 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5641 "Not a roll-back package\n");
5642 return I40E_NOT_SUPPORTED;
5643 }
5644 } else {
5645 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5646 sec->section.type == SECTION_TYPE_RB_MMIO) {
5647 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5648 "Not an original package\n");
5649 return I40E_NOT_SUPPORTED;
5650 }
5651 }
5652 }
5653
5654 return status;
5655}
5656
5657/**
5658 * i40e_write_profile
5659 * @hw: pointer to the hardware structure
5660 * @profile: pointer to the profile segment of the package to be downloaded
5661 * @track_id: package tracking id
5662 *
5663 * Handles the download of a complete package.
5664 */
5665enum i40e_status_code
5666i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5667 u32 track_id)
5668{
5669 i40e_status status = 0;
5670 struct i40e_section_table *sec_tbl;
5671 struct i40e_profile_section_header *sec = NULL;
5672 struct i40e_profile_aq_section *ddp_aq;
5673 u32 section_size = 0;
5674 u32 offset = 0, info = 0;
5675 u32 sec_off;
5676 u32 i;
5677
5678 status = i40e_validate_profile(hw, profile, track_id, false);
5679 if (status)
5680 return status;
5681
5682 I40E_SECTION_TABLE(profile, sec_tbl);
5683
5684 for (i = 0; i < sec_tbl->section_count; i++) {
5685 sec_off = sec_tbl->section_offset[i];
5686 sec = I40E_SECTION_HEADER(profile, sec_off);
5687 /* Process generic admin command */
5688 if (sec->section.type == SECTION_TYPE_AQ) {
5689 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5690 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5691 if (status) {
5692 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5693 "Failed to execute aq: section %d, opcode %u\n",
5694 i, ddp_aq->opcode);
5695 break;
5696 }
5697 sec->section.type = SECTION_TYPE_RB_AQ;
5698 }
5699
5700 /* Skip any non-mmio sections */
5701 if (sec->section.type != SECTION_TYPE_MMIO)
5702 continue;
5703
5704 section_size = sec->section.size +
5705 sizeof(struct i40e_profile_section_header);
5706
5707 /* Write MMIO section */
5708 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5709 track_id, &offset, &info, NULL);
5710 if (status) {
5711 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5712 "Failed to write profile: section %d, offset %d, info %d\n",
5713 i, offset, info);
5714 break;
5715 }
5716 }
5717 return status;
5718}
5719
5720/**
5721 * i40e_rollback_profile
5722 * @hw: pointer to the hardware structure
5723 * @profile: pointer to the profile segment of the package to be removed
5724 * @track_id: package tracking id
5725 *
5726 * Rolls back previously loaded package.
5727 */
5728enum i40e_status_code
5729i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5730 u32 track_id)
5731{
5732 struct i40e_profile_section_header *sec = NULL;
5733 i40e_status status = 0;
5734 struct i40e_section_table *sec_tbl;
5735 u32 offset = 0, info = 0;
5736 u32 section_size = 0;
5737 u32 sec_off;
5738 int i;
5739
5740 status = i40e_validate_profile(hw, profile, track_id, true);
5741 if (status)
5742 return status;
5743
5744 I40E_SECTION_TABLE(profile, sec_tbl);
5745
5746 /* For rollback write sections in reverse */
5747 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5748 sec_off = sec_tbl->section_offset[i];
5749 sec = I40E_SECTION_HEADER(profile, sec_off);
5750
5751 /* Skip any non-rollback sections */
5752 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5753 continue;
5754
5755 section_size = sec->section.size +
5756 sizeof(struct i40e_profile_section_header);
5757
5758 /* Write roll-back MMIO section */
5759 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5760 track_id, &offset, &info, NULL);
5761 if (status) {
5762 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5763 "Failed to write profile: section %d, offset %d, info %d\n",
5764 i, offset, info);
5765 break;
5766 }
5767 }
5768 return status;
5769}
5770
5771/**
5772 * i40e_add_pinfo_to_list
5773 * @hw: pointer to the hardware structure
5774 * @profile: pointer to the profile segment of the package
5775 * @profile_info_sec: buffer for information section
5776 * @track_id: package tracking id
5777 *
5778 * Register a profile to the list of loaded profiles.
5779 */
5780enum i40e_status_code
5781i40e_add_pinfo_to_list(struct i40e_hw *hw,
5782 struct i40e_profile_segment *profile,
5783 u8 *profile_info_sec, u32 track_id)
5784{
5785 i40e_status status = 0;
5786 struct i40e_profile_section_header *sec = NULL;
5787 struct i40e_profile_info *pinfo;
5788 u32 offset = 0, info = 0;
5789
5790 sec = (struct i40e_profile_section_header *)profile_info_sec;
5791 sec->tbl_size = 1;
5792 sec->data_end = sizeof(struct i40e_profile_section_header) +
5793 sizeof(struct i40e_profile_info);
5794 sec->section.type = SECTION_TYPE_INFO;
5795 sec->section.offset = sizeof(struct i40e_profile_section_header);
5796 sec->section.size = sizeof(struct i40e_profile_info);
5797 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5798 sec->section.offset);
5799 pinfo->track_id = track_id;
5800 pinfo->version = profile->version;
5801 pinfo->op = I40E_DDP_ADD_TRACKID;
5802 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5803
5804 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5805 track_id, &offset, &info, NULL);
5806
5807 return status;
5808}
5809
5810/**
5811 * i40e_aq_add_cloud_filters
5812 * @hw: pointer to the hardware structure
5813 * @seid: VSI seid to add cloud filters from
5814 * @filters: Buffer which contains the filters to be added
5815 * @filter_count: number of filters contained in the buffer
5816 *
5817 * Set the cloud filters for a given VSI. The contents of the
5818 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5819 * of the function.
5820 *
5821 **/
5822enum i40e_status_code
5823i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5824 struct i40e_aqc_cloud_filters_element_data *filters,
5825 u8 filter_count)
5826{
5827 struct i40e_aq_desc desc;
5828 struct i40e_aqc_add_remove_cloud_filters *cmd =
5829 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5830 enum i40e_status_code status;
5831 u16 buff_len;
5832
5833 i40e_fill_default_direct_cmd_desc(&desc,
5834 i40e_aqc_opc_add_cloud_filters);
5835
5836 buff_len = filter_count * sizeof(*filters);
5837 desc.datalen = cpu_to_le16(buff_len);
5838 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5839 cmd->num_filters = filter_count;
5840 cmd->seid = cpu_to_le16(seid);
5841
5842 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5843
5844 return status;
5845}
5846
5847/**
5848 * i40e_aq_add_cloud_filters_bb
5849 * @hw: pointer to the hardware structure
5850 * @seid: VSI seid to add cloud filters from
5851 * @filters: Buffer which contains the filters in big buffer to be added
5852 * @filter_count: number of filters contained in the buffer
5853 *
5854 * Set the big buffer cloud filters for a given VSI. The contents of the
5855 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5856 * function.
5857 *
5858 **/
5859enum i40e_status_code
5860i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5861 struct i40e_aqc_cloud_filters_element_bb *filters,
5862 u8 filter_count)
5863{
5864 struct i40e_aq_desc desc;
5865 struct i40e_aqc_add_remove_cloud_filters *cmd =
5866 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5867 i40e_status status;
5868 u16 buff_len;
5869 int i;
5870
5871 i40e_fill_default_direct_cmd_desc(&desc,
5872 i40e_aqc_opc_add_cloud_filters);
5873
5874 buff_len = filter_count * sizeof(*filters);
5875 desc.datalen = cpu_to_le16(buff_len);
5876 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5877 cmd->num_filters = filter_count;
5878 cmd->seid = cpu_to_le16(seid);
5879 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5880
5881 for (i = 0; i < filter_count; i++) {
5882 u16 tnl_type;
5883 u32 ti;
5884
5885 tnl_type = (le16_to_cpu(filters[i].element.flags) &
5886 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5887 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5888
5889 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5890 * one more byte further than normally used for Tenant ID in
5891 * other tunnel types.
5892 */
5893 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5894 ti = le32_to_cpu(filters[i].element.tenant_id);
5895 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5896 }
5897 }
5898
5899 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5900
5901 return status;
5902}
5903
5904/**
5905 * i40e_aq_rem_cloud_filters
5906 * @hw: pointer to the hardware structure
5907 * @seid: VSI seid to remove cloud filters from
5908 * @filters: Buffer which contains the filters to be removed
5909 * @filter_count: number of filters contained in the buffer
5910 *
5911 * Remove the cloud filters for a given VSI. The contents of the
5912 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5913 * of the function.
5914 *
5915 **/
5916enum i40e_status_code
5917i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5918 struct i40e_aqc_cloud_filters_element_data *filters,
5919 u8 filter_count)
5920{
5921 struct i40e_aq_desc desc;
5922 struct i40e_aqc_add_remove_cloud_filters *cmd =
5923 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5924 enum i40e_status_code status;
5925 u16 buff_len;
5926
5927 i40e_fill_default_direct_cmd_desc(&desc,
5928 i40e_aqc_opc_remove_cloud_filters);
5929
5930 buff_len = filter_count * sizeof(*filters);
5931 desc.datalen = cpu_to_le16(buff_len);
5932 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5933 cmd->num_filters = filter_count;
5934 cmd->seid = cpu_to_le16(seid);
5935
5936 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5937
5938 return status;
5939}
5940
5941/**
5942 * i40e_aq_rem_cloud_filters_bb
5943 * @hw: pointer to the hardware structure
5944 * @seid: VSI seid to remove cloud filters from
5945 * @filters: Buffer which contains the filters in big buffer to be removed
5946 * @filter_count: number of filters contained in the buffer
5947 *
5948 * Remove the big buffer cloud filters for a given VSI. The contents of the
5949 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5950 * function.
5951 *
5952 **/
5953enum i40e_status_code
5954i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5955 struct i40e_aqc_cloud_filters_element_bb *filters,
5956 u8 filter_count)
5957{
5958 struct i40e_aq_desc desc;
5959 struct i40e_aqc_add_remove_cloud_filters *cmd =
5960 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5961 i40e_status status;
5962 u16 buff_len;
5963 int i;
5964
5965 i40e_fill_default_direct_cmd_desc(&desc,
5966 i40e_aqc_opc_remove_cloud_filters);
5967
5968 buff_len = filter_count * sizeof(*filters);
5969 desc.datalen = cpu_to_le16(buff_len);
5970 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5971 cmd->num_filters = filter_count;
5972 cmd->seid = cpu_to_le16(seid);
5973 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5974
5975 for (i = 0; i < filter_count; i++) {
5976 u16 tnl_type;
5977 u32 ti;
5978
5979 tnl_type = (le16_to_cpu(filters[i].element.flags) &
5980 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5981 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5982
5983 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5984 * one more byte further than normally used for Tenant ID in
5985 * other tunnel types.
5986 */
5987 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5988 ti = le32_to_cpu(filters[i].element.tenant_id);
5989 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5990 }
5991 }
5992
5993 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5994
5995 return status;
5996}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4#include "i40e.h"
5#include "i40e_type.h"
6#include "i40e_adminq.h"
7#include "i40e_prototype.h"
8#include <linux/avf/virtchnl.h>
9
10/**
11 * i40e_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure
13 *
14 * This function sets the mac type of the adapter based on the
15 * vendor ID and device ID stored in the hw structure.
16 **/
17i40e_status i40e_set_mac_type(struct i40e_hw *hw)
18{
19 i40e_status status = 0;
20
21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
22 switch (hw->device_id) {
23 case I40E_DEV_ID_SFP_XL710:
24 case I40E_DEV_ID_QEMU:
25 case I40E_DEV_ID_KX_B:
26 case I40E_DEV_ID_KX_C:
27 case I40E_DEV_ID_QSFP_A:
28 case I40E_DEV_ID_QSFP_B:
29 case I40E_DEV_ID_QSFP_C:
30 case I40E_DEV_ID_5G_BASE_T_BC:
31 case I40E_DEV_ID_10G_BASE_T:
32 case I40E_DEV_ID_10G_BASE_T4:
33 case I40E_DEV_ID_10G_BASE_T_BC:
34 case I40E_DEV_ID_10G_B:
35 case I40E_DEV_ID_10G_SFP:
36 case I40E_DEV_ID_20G_KR2:
37 case I40E_DEV_ID_20G_KR2_A:
38 case I40E_DEV_ID_25G_B:
39 case I40E_DEV_ID_25G_SFP28:
40 case I40E_DEV_ID_X710_N3000:
41 case I40E_DEV_ID_XXV710_N3000:
42 hw->mac.type = I40E_MAC_XL710;
43 break;
44 case I40E_DEV_ID_KX_X722:
45 case I40E_DEV_ID_QSFP_X722:
46 case I40E_DEV_ID_SFP_X722:
47 case I40E_DEV_ID_1G_BASE_T_X722:
48 case I40E_DEV_ID_10G_BASE_T_X722:
49 case I40E_DEV_ID_SFP_I_X722:
50 hw->mac.type = I40E_MAC_X722;
51 break;
52 default:
53 hw->mac.type = I40E_MAC_GENERIC;
54 break;
55 }
56 } else {
57 status = I40E_ERR_DEVICE_NOT_SUPPORTED;
58 }
59
60 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
61 hw->mac.type, status);
62 return status;
63}
64
65/**
66 * i40e_aq_str - convert AQ err code to a string
67 * @hw: pointer to the HW structure
68 * @aq_err: the AQ error code to convert
69 **/
70const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
71{
72 switch (aq_err) {
73 case I40E_AQ_RC_OK:
74 return "OK";
75 case I40E_AQ_RC_EPERM:
76 return "I40E_AQ_RC_EPERM";
77 case I40E_AQ_RC_ENOENT:
78 return "I40E_AQ_RC_ENOENT";
79 case I40E_AQ_RC_ESRCH:
80 return "I40E_AQ_RC_ESRCH";
81 case I40E_AQ_RC_EINTR:
82 return "I40E_AQ_RC_EINTR";
83 case I40E_AQ_RC_EIO:
84 return "I40E_AQ_RC_EIO";
85 case I40E_AQ_RC_ENXIO:
86 return "I40E_AQ_RC_ENXIO";
87 case I40E_AQ_RC_E2BIG:
88 return "I40E_AQ_RC_E2BIG";
89 case I40E_AQ_RC_EAGAIN:
90 return "I40E_AQ_RC_EAGAIN";
91 case I40E_AQ_RC_ENOMEM:
92 return "I40E_AQ_RC_ENOMEM";
93 case I40E_AQ_RC_EACCES:
94 return "I40E_AQ_RC_EACCES";
95 case I40E_AQ_RC_EFAULT:
96 return "I40E_AQ_RC_EFAULT";
97 case I40E_AQ_RC_EBUSY:
98 return "I40E_AQ_RC_EBUSY";
99 case I40E_AQ_RC_EEXIST:
100 return "I40E_AQ_RC_EEXIST";
101 case I40E_AQ_RC_EINVAL:
102 return "I40E_AQ_RC_EINVAL";
103 case I40E_AQ_RC_ENOTTY:
104 return "I40E_AQ_RC_ENOTTY";
105 case I40E_AQ_RC_ENOSPC:
106 return "I40E_AQ_RC_ENOSPC";
107 case I40E_AQ_RC_ENOSYS:
108 return "I40E_AQ_RC_ENOSYS";
109 case I40E_AQ_RC_ERANGE:
110 return "I40E_AQ_RC_ERANGE";
111 case I40E_AQ_RC_EFLUSHED:
112 return "I40E_AQ_RC_EFLUSHED";
113 case I40E_AQ_RC_BAD_ADDR:
114 return "I40E_AQ_RC_BAD_ADDR";
115 case I40E_AQ_RC_EMODE:
116 return "I40E_AQ_RC_EMODE";
117 case I40E_AQ_RC_EFBIG:
118 return "I40E_AQ_RC_EFBIG";
119 }
120
121 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
122 return hw->err_str;
123}
124
125/**
126 * i40e_stat_str - convert status err code to a string
127 * @hw: pointer to the HW structure
128 * @stat_err: the status error code to convert
129 **/
130const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
131{
132 switch (stat_err) {
133 case 0:
134 return "OK";
135 case I40E_ERR_NVM:
136 return "I40E_ERR_NVM";
137 case I40E_ERR_NVM_CHECKSUM:
138 return "I40E_ERR_NVM_CHECKSUM";
139 case I40E_ERR_PHY:
140 return "I40E_ERR_PHY";
141 case I40E_ERR_CONFIG:
142 return "I40E_ERR_CONFIG";
143 case I40E_ERR_PARAM:
144 return "I40E_ERR_PARAM";
145 case I40E_ERR_MAC_TYPE:
146 return "I40E_ERR_MAC_TYPE";
147 case I40E_ERR_UNKNOWN_PHY:
148 return "I40E_ERR_UNKNOWN_PHY";
149 case I40E_ERR_LINK_SETUP:
150 return "I40E_ERR_LINK_SETUP";
151 case I40E_ERR_ADAPTER_STOPPED:
152 return "I40E_ERR_ADAPTER_STOPPED";
153 case I40E_ERR_INVALID_MAC_ADDR:
154 return "I40E_ERR_INVALID_MAC_ADDR";
155 case I40E_ERR_DEVICE_NOT_SUPPORTED:
156 return "I40E_ERR_DEVICE_NOT_SUPPORTED";
157 case I40E_ERR_MASTER_REQUESTS_PENDING:
158 return "I40E_ERR_MASTER_REQUESTS_PENDING";
159 case I40E_ERR_INVALID_LINK_SETTINGS:
160 return "I40E_ERR_INVALID_LINK_SETTINGS";
161 case I40E_ERR_AUTONEG_NOT_COMPLETE:
162 return "I40E_ERR_AUTONEG_NOT_COMPLETE";
163 case I40E_ERR_RESET_FAILED:
164 return "I40E_ERR_RESET_FAILED";
165 case I40E_ERR_SWFW_SYNC:
166 return "I40E_ERR_SWFW_SYNC";
167 case I40E_ERR_NO_AVAILABLE_VSI:
168 return "I40E_ERR_NO_AVAILABLE_VSI";
169 case I40E_ERR_NO_MEMORY:
170 return "I40E_ERR_NO_MEMORY";
171 case I40E_ERR_BAD_PTR:
172 return "I40E_ERR_BAD_PTR";
173 case I40E_ERR_RING_FULL:
174 return "I40E_ERR_RING_FULL";
175 case I40E_ERR_INVALID_PD_ID:
176 return "I40E_ERR_INVALID_PD_ID";
177 case I40E_ERR_INVALID_QP_ID:
178 return "I40E_ERR_INVALID_QP_ID";
179 case I40E_ERR_INVALID_CQ_ID:
180 return "I40E_ERR_INVALID_CQ_ID";
181 case I40E_ERR_INVALID_CEQ_ID:
182 return "I40E_ERR_INVALID_CEQ_ID";
183 case I40E_ERR_INVALID_AEQ_ID:
184 return "I40E_ERR_INVALID_AEQ_ID";
185 case I40E_ERR_INVALID_SIZE:
186 return "I40E_ERR_INVALID_SIZE";
187 case I40E_ERR_INVALID_ARP_INDEX:
188 return "I40E_ERR_INVALID_ARP_INDEX";
189 case I40E_ERR_INVALID_FPM_FUNC_ID:
190 return "I40E_ERR_INVALID_FPM_FUNC_ID";
191 case I40E_ERR_QP_INVALID_MSG_SIZE:
192 return "I40E_ERR_QP_INVALID_MSG_SIZE";
193 case I40E_ERR_QP_TOOMANY_WRS_POSTED:
194 return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
195 case I40E_ERR_INVALID_FRAG_COUNT:
196 return "I40E_ERR_INVALID_FRAG_COUNT";
197 case I40E_ERR_QUEUE_EMPTY:
198 return "I40E_ERR_QUEUE_EMPTY";
199 case I40E_ERR_INVALID_ALIGNMENT:
200 return "I40E_ERR_INVALID_ALIGNMENT";
201 case I40E_ERR_FLUSHED_QUEUE:
202 return "I40E_ERR_FLUSHED_QUEUE";
203 case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
204 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
205 case I40E_ERR_INVALID_IMM_DATA_SIZE:
206 return "I40E_ERR_INVALID_IMM_DATA_SIZE";
207 case I40E_ERR_TIMEOUT:
208 return "I40E_ERR_TIMEOUT";
209 case I40E_ERR_OPCODE_MISMATCH:
210 return "I40E_ERR_OPCODE_MISMATCH";
211 case I40E_ERR_CQP_COMPL_ERROR:
212 return "I40E_ERR_CQP_COMPL_ERROR";
213 case I40E_ERR_INVALID_VF_ID:
214 return "I40E_ERR_INVALID_VF_ID";
215 case I40E_ERR_INVALID_HMCFN_ID:
216 return "I40E_ERR_INVALID_HMCFN_ID";
217 case I40E_ERR_BACKING_PAGE_ERROR:
218 return "I40E_ERR_BACKING_PAGE_ERROR";
219 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
220 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
221 case I40E_ERR_INVALID_PBLE_INDEX:
222 return "I40E_ERR_INVALID_PBLE_INDEX";
223 case I40E_ERR_INVALID_SD_INDEX:
224 return "I40E_ERR_INVALID_SD_INDEX";
225 case I40E_ERR_INVALID_PAGE_DESC_INDEX:
226 return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
227 case I40E_ERR_INVALID_SD_TYPE:
228 return "I40E_ERR_INVALID_SD_TYPE";
229 case I40E_ERR_MEMCPY_FAILED:
230 return "I40E_ERR_MEMCPY_FAILED";
231 case I40E_ERR_INVALID_HMC_OBJ_INDEX:
232 return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
233 case I40E_ERR_INVALID_HMC_OBJ_COUNT:
234 return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
235 case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
236 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
237 case I40E_ERR_SRQ_ENABLED:
238 return "I40E_ERR_SRQ_ENABLED";
239 case I40E_ERR_ADMIN_QUEUE_ERROR:
240 return "I40E_ERR_ADMIN_QUEUE_ERROR";
241 case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
242 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
243 case I40E_ERR_BUF_TOO_SHORT:
244 return "I40E_ERR_BUF_TOO_SHORT";
245 case I40E_ERR_ADMIN_QUEUE_FULL:
246 return "I40E_ERR_ADMIN_QUEUE_FULL";
247 case I40E_ERR_ADMIN_QUEUE_NO_WORK:
248 return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
249 case I40E_ERR_BAD_IWARP_CQE:
250 return "I40E_ERR_BAD_IWARP_CQE";
251 case I40E_ERR_NVM_BLANK_MODE:
252 return "I40E_ERR_NVM_BLANK_MODE";
253 case I40E_ERR_NOT_IMPLEMENTED:
254 return "I40E_ERR_NOT_IMPLEMENTED";
255 case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
256 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
257 case I40E_ERR_DIAG_TEST_FAILED:
258 return "I40E_ERR_DIAG_TEST_FAILED";
259 case I40E_ERR_NOT_READY:
260 return "I40E_ERR_NOT_READY";
261 case I40E_NOT_SUPPORTED:
262 return "I40E_NOT_SUPPORTED";
263 case I40E_ERR_FIRMWARE_API_VERSION:
264 return "I40E_ERR_FIRMWARE_API_VERSION";
265 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
266 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
267 }
268
269 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
270 return hw->err_str;
271}
272
273/**
274 * i40e_debug_aq
275 * @hw: debug mask related to admin queue
276 * @mask: debug mask
277 * @desc: pointer to admin queue descriptor
278 * @buffer: pointer to command buffer
279 * @buf_len: max length of buffer
280 *
281 * Dumps debug log about adminq command with descriptor contents.
282 **/
283void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
284 void *buffer, u16 buf_len)
285{
286 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
287 u32 effective_mask = hw->debug_mask & mask;
288 char prefix[27];
289 u16 len;
290 u8 *buf = (u8 *)buffer;
291
292 if (!effective_mask || !desc)
293 return;
294
295 len = le16_to_cpu(aq_desc->datalen);
296
297 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
298 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
299 le16_to_cpu(aq_desc->opcode),
300 le16_to_cpu(aq_desc->flags),
301 le16_to_cpu(aq_desc->datalen),
302 le16_to_cpu(aq_desc->retval));
303 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
304 "\tcookie (h,l) 0x%08X 0x%08X\n",
305 le32_to_cpu(aq_desc->cookie_high),
306 le32_to_cpu(aq_desc->cookie_low));
307 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
308 "\tparam (0,1) 0x%08X 0x%08X\n",
309 le32_to_cpu(aq_desc->params.internal.param0),
310 le32_to_cpu(aq_desc->params.internal.param1));
311 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
312 "\taddr (h,l) 0x%08X 0x%08X\n",
313 le32_to_cpu(aq_desc->params.external.addr_high),
314 le32_to_cpu(aq_desc->params.external.addr_low));
315
316 if (buffer && buf_len != 0 && len != 0 &&
317 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
318 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
319 if (buf_len < len)
320 len = buf_len;
321
322 snprintf(prefix, sizeof(prefix),
323 "i40e %02x:%02x.%x: \t0x",
324 hw->bus.bus_id,
325 hw->bus.device,
326 hw->bus.func);
327
328 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
329 16, 1, buf, len, false);
330 }
331}
332
333/**
334 * i40e_check_asq_alive
335 * @hw: pointer to the hw struct
336 *
337 * Returns true if Queue is enabled else false.
338 **/
339bool i40e_check_asq_alive(struct i40e_hw *hw)
340{
341 if (hw->aq.asq.len)
342 return !!(rd32(hw, hw->aq.asq.len) &
343 I40E_PF_ATQLEN_ATQENABLE_MASK);
344 else
345 return false;
346}
347
348/**
349 * i40e_aq_queue_shutdown
350 * @hw: pointer to the hw struct
351 * @unloading: is the driver unloading itself
352 *
353 * Tell the Firmware that we're shutting down the AdminQ and whether
354 * or not the driver is unloading as well.
355 **/
356i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
357 bool unloading)
358{
359 struct i40e_aq_desc desc;
360 struct i40e_aqc_queue_shutdown *cmd =
361 (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
362 i40e_status status;
363
364 i40e_fill_default_direct_cmd_desc(&desc,
365 i40e_aqc_opc_queue_shutdown);
366
367 if (unloading)
368 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
369 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
370
371 return status;
372}
373
374/**
375 * i40e_aq_get_set_rss_lut
376 * @hw: pointer to the hardware structure
377 * @vsi_id: vsi fw index
378 * @pf_lut: for PF table set true, for VSI table set false
379 * @lut: pointer to the lut buffer provided by the caller
380 * @lut_size: size of the lut buffer
381 * @set: set true to set the table, false to get the table
382 *
383 * Internal function to get or set RSS look up table
384 **/
385static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
386 u16 vsi_id, bool pf_lut,
387 u8 *lut, u16 lut_size,
388 bool set)
389{
390 i40e_status status;
391 struct i40e_aq_desc desc;
392 struct i40e_aqc_get_set_rss_lut *cmd_resp =
393 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
394
395 if (set)
396 i40e_fill_default_direct_cmd_desc(&desc,
397 i40e_aqc_opc_set_rss_lut);
398 else
399 i40e_fill_default_direct_cmd_desc(&desc,
400 i40e_aqc_opc_get_rss_lut);
401
402 /* Indirect command */
403 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
404 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
405
406 cmd_resp->vsi_id =
407 cpu_to_le16((u16)((vsi_id <<
408 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
409 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
410 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
411
412 if (pf_lut)
413 cmd_resp->flags |= cpu_to_le16((u16)
414 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
415 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
416 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
417 else
418 cmd_resp->flags |= cpu_to_le16((u16)
419 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
420 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
421 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
422
423 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
424
425 return status;
426}
427
428/**
429 * i40e_aq_get_rss_lut
430 * @hw: pointer to the hardware structure
431 * @vsi_id: vsi fw index
432 * @pf_lut: for PF table set true, for VSI table set false
433 * @lut: pointer to the lut buffer provided by the caller
434 * @lut_size: size of the lut buffer
435 *
436 * get the RSS lookup table, PF or VSI type
437 **/
438i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
439 bool pf_lut, u8 *lut, u16 lut_size)
440{
441 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
442 false);
443}
444
445/**
446 * i40e_aq_set_rss_lut
447 * @hw: pointer to the hardware structure
448 * @vsi_id: vsi fw index
449 * @pf_lut: for PF table set true, for VSI table set false
450 * @lut: pointer to the lut buffer provided by the caller
451 * @lut_size: size of the lut buffer
452 *
453 * set the RSS lookup table, PF or VSI type
454 **/
455i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
456 bool pf_lut, u8 *lut, u16 lut_size)
457{
458 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
459}
460
461/**
462 * i40e_aq_get_set_rss_key
463 * @hw: pointer to the hw struct
464 * @vsi_id: vsi fw index
465 * @key: pointer to key info struct
466 * @set: set true to set the key, false to get the key
467 *
468 * get the RSS key per VSI
469 **/
470static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
471 u16 vsi_id,
472 struct i40e_aqc_get_set_rss_key_data *key,
473 bool set)
474{
475 i40e_status status;
476 struct i40e_aq_desc desc;
477 struct i40e_aqc_get_set_rss_key *cmd_resp =
478 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
479 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
480
481 if (set)
482 i40e_fill_default_direct_cmd_desc(&desc,
483 i40e_aqc_opc_set_rss_key);
484 else
485 i40e_fill_default_direct_cmd_desc(&desc,
486 i40e_aqc_opc_get_rss_key);
487
488 /* Indirect command */
489 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
490 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
491
492 cmd_resp->vsi_id =
493 cpu_to_le16((u16)((vsi_id <<
494 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
495 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
496 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
497
498 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
499
500 return status;
501}
502
503/**
504 * i40e_aq_get_rss_key
505 * @hw: pointer to the hw struct
506 * @vsi_id: vsi fw index
507 * @key: pointer to key info struct
508 *
509 **/
510i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
511 u16 vsi_id,
512 struct i40e_aqc_get_set_rss_key_data *key)
513{
514 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
515}
516
517/**
518 * i40e_aq_set_rss_key
519 * @hw: pointer to the hw struct
520 * @vsi_id: vsi fw index
521 * @key: pointer to key info struct
522 *
523 * set the RSS key per VSI
524 **/
525i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
526 u16 vsi_id,
527 struct i40e_aqc_get_set_rss_key_data *key)
528{
529 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
530}
531
532/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
533 * hardware to a bit-field that can be used by SW to more easily determine the
534 * packet type.
535 *
536 * Macros are used to shorten the table lines and make this table human
537 * readable.
538 *
539 * We store the PTYPE in the top byte of the bit field - this is just so that
540 * we can check that the table doesn't have a row missing, as the index into
541 * the table should be the PTYPE.
542 *
543 * Typical work flow:
544 *
545 * IF NOT i40e_ptype_lookup[ptype].known
546 * THEN
547 * Packet is unknown
548 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
549 * Use the rest of the fields to look at the tunnels, inner protocols, etc
550 * ELSE
551 * Use the enum i40e_rx_l2_ptype to decode the packet type
552 * ENDIF
553 */
554
555/* macro to make the table lines short, use explicit indexing with [PTYPE] */
556#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
557 [PTYPE] = { \
558 1, \
559 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
560 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
561 I40E_RX_PTYPE_##OUTER_FRAG, \
562 I40E_RX_PTYPE_TUNNEL_##T, \
563 I40E_RX_PTYPE_TUNNEL_END_##TE, \
564 I40E_RX_PTYPE_##TEF, \
565 I40E_RX_PTYPE_INNER_PROT_##I, \
566 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
567
568#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
569
570/* shorter macros makes the table fit but are terse */
571#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
572#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
573#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
574
575/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
576struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
577 /* L2 Packet types */
578 I40E_PTT_UNUSED_ENTRY(0),
579 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
580 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
581 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
582 I40E_PTT_UNUSED_ENTRY(4),
583 I40E_PTT_UNUSED_ENTRY(5),
584 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
585 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
586 I40E_PTT_UNUSED_ENTRY(8),
587 I40E_PTT_UNUSED_ENTRY(9),
588 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
589 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
590 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
591 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
592 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
593 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
594 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
595 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
596 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
597 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
598 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
599 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
600
601 /* Non Tunneled IPv4 */
602 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
603 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
604 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
605 I40E_PTT_UNUSED_ENTRY(25),
606 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
607 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
608 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
609
610 /* IPv4 --> IPv4 */
611 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
612 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
613 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
614 I40E_PTT_UNUSED_ENTRY(32),
615 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
616 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
617 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
618
619 /* IPv4 --> IPv6 */
620 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
621 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
622 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
623 I40E_PTT_UNUSED_ENTRY(39),
624 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
625 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
626 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
627
628 /* IPv4 --> GRE/NAT */
629 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
630
631 /* IPv4 --> GRE/NAT --> IPv4 */
632 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
633 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
634 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
635 I40E_PTT_UNUSED_ENTRY(47),
636 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
637 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
638 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
639
640 /* IPv4 --> GRE/NAT --> IPv6 */
641 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
642 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
643 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
644 I40E_PTT_UNUSED_ENTRY(54),
645 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
646 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
647 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
648
649 /* IPv4 --> GRE/NAT --> MAC */
650 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
651
652 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
653 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
654 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
655 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
656 I40E_PTT_UNUSED_ENTRY(62),
657 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
658 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
659 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
660
661 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
662 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
663 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
664 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
665 I40E_PTT_UNUSED_ENTRY(69),
666 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
667 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
668 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
669
670 /* IPv4 --> GRE/NAT --> MAC/VLAN */
671 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
672
673 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
674 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
675 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
676 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
677 I40E_PTT_UNUSED_ENTRY(77),
678 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
679 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
680 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
681
682 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
683 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
684 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
685 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
686 I40E_PTT_UNUSED_ENTRY(84),
687 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
688 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
689 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
690
691 /* Non Tunneled IPv6 */
692 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
693 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
694 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
695 I40E_PTT_UNUSED_ENTRY(91),
696 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
697 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
698 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
699
700 /* IPv6 --> IPv4 */
701 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
702 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
703 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
704 I40E_PTT_UNUSED_ENTRY(98),
705 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
706 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
707 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
708
709 /* IPv6 --> IPv6 */
710 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
711 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
712 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
713 I40E_PTT_UNUSED_ENTRY(105),
714 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
715 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
716 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
717
718 /* IPv6 --> GRE/NAT */
719 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
720
721 /* IPv6 --> GRE/NAT -> IPv4 */
722 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
723 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
724 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
725 I40E_PTT_UNUSED_ENTRY(113),
726 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
727 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
728 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
729
730 /* IPv6 --> GRE/NAT -> IPv6 */
731 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
732 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
733 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
734 I40E_PTT_UNUSED_ENTRY(120),
735 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
736 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
737 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
738
739 /* IPv6 --> GRE/NAT -> MAC */
740 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
741
742 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
743 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
744 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
745 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
746 I40E_PTT_UNUSED_ENTRY(128),
747 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
748 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
749 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
750
751 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
752 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
753 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
754 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
755 I40E_PTT_UNUSED_ENTRY(135),
756 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
757 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
758 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
759
760 /* IPv6 --> GRE/NAT -> MAC/VLAN */
761 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
762
763 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
764 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
765 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
766 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
767 I40E_PTT_UNUSED_ENTRY(143),
768 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
769 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
770 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
771
772 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
773 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
774 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
775 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
776 I40E_PTT_UNUSED_ENTRY(150),
777 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
778 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
779 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
780
781 /* unused entries */
782 [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
783};
784
785/**
786 * i40e_init_shared_code - Initialize the shared code
787 * @hw: pointer to hardware structure
788 *
789 * This assigns the MAC type and PHY code and inits the NVM.
790 * Does not touch the hardware. This function must be called prior to any
791 * other function in the shared code. The i40e_hw structure should be
792 * memset to 0 prior to calling this function. The following fields in
793 * hw structure should be filled in prior to calling this function:
794 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
795 * subsystem_vendor_id, and revision_id
796 **/
797i40e_status i40e_init_shared_code(struct i40e_hw *hw)
798{
799 i40e_status status = 0;
800 u32 port, ari, func_rid;
801
802 i40e_set_mac_type(hw);
803
804 switch (hw->mac.type) {
805 case I40E_MAC_XL710:
806 case I40E_MAC_X722:
807 break;
808 default:
809 return I40E_ERR_DEVICE_NOT_SUPPORTED;
810 }
811
812 hw->phy.get_link_info = true;
813
814 /* Determine port number and PF number*/
815 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
816 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
817 hw->port = (u8)port;
818 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
819 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
820 func_rid = rd32(hw, I40E_PF_FUNC_RID);
821 if (ari)
822 hw->pf_id = (u8)(func_rid & 0xff);
823 else
824 hw->pf_id = (u8)(func_rid & 0x7);
825
826 status = i40e_init_nvm(hw);
827 return status;
828}
829
830/**
831 * i40e_aq_mac_address_read - Retrieve the MAC addresses
832 * @hw: pointer to the hw struct
833 * @flags: a return indicator of what addresses were added to the addr store
834 * @addrs: the requestor's mac addr store
835 * @cmd_details: pointer to command details structure or NULL
836 **/
837static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
838 u16 *flags,
839 struct i40e_aqc_mac_address_read_data *addrs,
840 struct i40e_asq_cmd_details *cmd_details)
841{
842 struct i40e_aq_desc desc;
843 struct i40e_aqc_mac_address_read *cmd_data =
844 (struct i40e_aqc_mac_address_read *)&desc.params.raw;
845 i40e_status status;
846
847 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
848 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
849
850 status = i40e_asq_send_command(hw, &desc, addrs,
851 sizeof(*addrs), cmd_details);
852 *flags = le16_to_cpu(cmd_data->command_flags);
853
854 return status;
855}
856
857/**
858 * i40e_aq_mac_address_write - Change the MAC addresses
859 * @hw: pointer to the hw struct
860 * @flags: indicates which MAC to be written
861 * @mac_addr: address to write
862 * @cmd_details: pointer to command details structure or NULL
863 **/
864i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
865 u16 flags, u8 *mac_addr,
866 struct i40e_asq_cmd_details *cmd_details)
867{
868 struct i40e_aq_desc desc;
869 struct i40e_aqc_mac_address_write *cmd_data =
870 (struct i40e_aqc_mac_address_write *)&desc.params.raw;
871 i40e_status status;
872
873 i40e_fill_default_direct_cmd_desc(&desc,
874 i40e_aqc_opc_mac_address_write);
875 cmd_data->command_flags = cpu_to_le16(flags);
876 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
877 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
878 ((u32)mac_addr[3] << 16) |
879 ((u32)mac_addr[4] << 8) |
880 mac_addr[5]);
881
882 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
883
884 return status;
885}
886
887/**
888 * i40e_get_mac_addr - get MAC address
889 * @hw: pointer to the HW structure
890 * @mac_addr: pointer to MAC address
891 *
892 * Reads the adapter's MAC address from register
893 **/
894i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
895{
896 struct i40e_aqc_mac_address_read_data addrs;
897 i40e_status status;
898 u16 flags = 0;
899
900 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
901
902 if (flags & I40E_AQC_LAN_ADDR_VALID)
903 ether_addr_copy(mac_addr, addrs.pf_lan_mac);
904
905 return status;
906}
907
908/**
909 * i40e_get_port_mac_addr - get Port MAC address
910 * @hw: pointer to the HW structure
911 * @mac_addr: pointer to Port MAC address
912 *
913 * Reads the adapter's Port MAC address
914 **/
915i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
916{
917 struct i40e_aqc_mac_address_read_data addrs;
918 i40e_status status;
919 u16 flags = 0;
920
921 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
922 if (status)
923 return status;
924
925 if (flags & I40E_AQC_PORT_ADDR_VALID)
926 ether_addr_copy(mac_addr, addrs.port_mac);
927 else
928 status = I40E_ERR_INVALID_MAC_ADDR;
929
930 return status;
931}
932
933/**
934 * i40e_pre_tx_queue_cfg - pre tx queue configure
935 * @hw: pointer to the HW structure
936 * @queue: target PF queue index
937 * @enable: state change request
938 *
939 * Handles hw requirement to indicate intention to enable
940 * or disable target queue.
941 **/
942void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
943{
944 u32 abs_queue_idx = hw->func_caps.base_queue + queue;
945 u32 reg_block = 0;
946 u32 reg_val;
947
948 if (abs_queue_idx >= 128) {
949 reg_block = abs_queue_idx / 128;
950 abs_queue_idx %= 128;
951 }
952
953 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
954 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
955 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
956
957 if (enable)
958 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
959 else
960 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
961
962 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
963}
964
965/**
966 * i40e_read_pba_string - Reads part number string from EEPROM
967 * @hw: pointer to hardware structure
968 * @pba_num: stores the part number string from the EEPROM
969 * @pba_num_size: part number string buffer length
970 *
971 * Reads the part number string from the EEPROM.
972 **/
973i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
974 u32 pba_num_size)
975{
976 i40e_status status = 0;
977 u16 pba_word = 0;
978 u16 pba_size = 0;
979 u16 pba_ptr = 0;
980 u16 i = 0;
981
982 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
983 if (status || (pba_word != 0xFAFA)) {
984 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
985 return status;
986 }
987
988 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
989 if (status) {
990 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
991 return status;
992 }
993
994 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
995 if (status) {
996 hw_dbg(hw, "Failed to read PBA Block size.\n");
997 return status;
998 }
999
1000 /* Subtract one to get PBA word count (PBA Size word is included in
1001 * total size)
1002 */
1003 pba_size--;
1004 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1005 hw_dbg(hw, "Buffer too small for PBA data.\n");
1006 return I40E_ERR_PARAM;
1007 }
1008
1009 for (i = 0; i < pba_size; i++) {
1010 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1011 if (status) {
1012 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1013 return status;
1014 }
1015
1016 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1017 pba_num[(i * 2) + 1] = pba_word & 0xFF;
1018 }
1019 pba_num[(pba_size * 2)] = '\0';
1020
1021 return status;
1022}
1023
1024/**
1025 * i40e_get_media_type - Gets media type
1026 * @hw: pointer to the hardware structure
1027 **/
1028static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1029{
1030 enum i40e_media_type media;
1031
1032 switch (hw->phy.link_info.phy_type) {
1033 case I40E_PHY_TYPE_10GBASE_SR:
1034 case I40E_PHY_TYPE_10GBASE_LR:
1035 case I40E_PHY_TYPE_1000BASE_SX:
1036 case I40E_PHY_TYPE_1000BASE_LX:
1037 case I40E_PHY_TYPE_40GBASE_SR4:
1038 case I40E_PHY_TYPE_40GBASE_LR4:
1039 case I40E_PHY_TYPE_25GBASE_LR:
1040 case I40E_PHY_TYPE_25GBASE_SR:
1041 media = I40E_MEDIA_TYPE_FIBER;
1042 break;
1043 case I40E_PHY_TYPE_100BASE_TX:
1044 case I40E_PHY_TYPE_1000BASE_T:
1045 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1046 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1047 case I40E_PHY_TYPE_10GBASE_T:
1048 media = I40E_MEDIA_TYPE_BASET;
1049 break;
1050 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1051 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1052 case I40E_PHY_TYPE_10GBASE_CR1:
1053 case I40E_PHY_TYPE_40GBASE_CR4:
1054 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1055 case I40E_PHY_TYPE_40GBASE_AOC:
1056 case I40E_PHY_TYPE_10GBASE_AOC:
1057 case I40E_PHY_TYPE_25GBASE_CR:
1058 case I40E_PHY_TYPE_25GBASE_AOC:
1059 case I40E_PHY_TYPE_25GBASE_ACC:
1060 media = I40E_MEDIA_TYPE_DA;
1061 break;
1062 case I40E_PHY_TYPE_1000BASE_KX:
1063 case I40E_PHY_TYPE_10GBASE_KX4:
1064 case I40E_PHY_TYPE_10GBASE_KR:
1065 case I40E_PHY_TYPE_40GBASE_KR4:
1066 case I40E_PHY_TYPE_20GBASE_KR2:
1067 case I40E_PHY_TYPE_25GBASE_KR:
1068 media = I40E_MEDIA_TYPE_BACKPLANE;
1069 break;
1070 case I40E_PHY_TYPE_SGMII:
1071 case I40E_PHY_TYPE_XAUI:
1072 case I40E_PHY_TYPE_XFI:
1073 case I40E_PHY_TYPE_XLAUI:
1074 case I40E_PHY_TYPE_XLPPI:
1075 default:
1076 media = I40E_MEDIA_TYPE_UNKNOWN;
1077 break;
1078 }
1079
1080 return media;
1081}
1082
1083/**
1084 * i40e_poll_globr - Poll for Global Reset completion
1085 * @hw: pointer to the hardware structure
1086 * @retry_limit: how many times to retry before failure
1087 **/
1088static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1089 u32 retry_limit)
1090{
1091 u32 cnt, reg = 0;
1092
1093 for (cnt = 0; cnt < retry_limit; cnt++) {
1094 reg = rd32(hw, I40E_GLGEN_RSTAT);
1095 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1096 return 0;
1097 msleep(100);
1098 }
1099
1100 hw_dbg(hw, "Global reset failed.\n");
1101 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1102
1103 return I40E_ERR_RESET_FAILED;
1104}
1105
1106#define I40E_PF_RESET_WAIT_COUNT_A0 200
1107#define I40E_PF_RESET_WAIT_COUNT 200
1108/**
1109 * i40e_pf_reset - Reset the PF
1110 * @hw: pointer to the hardware structure
1111 *
1112 * Assuming someone else has triggered a global reset,
1113 * assure the global reset is complete and then reset the PF
1114 **/
1115i40e_status i40e_pf_reset(struct i40e_hw *hw)
1116{
1117 u32 cnt = 0;
1118 u32 cnt1 = 0;
1119 u32 reg = 0;
1120 u32 grst_del;
1121
1122 /* Poll for Global Reset steady state in case of recent GRST.
1123 * The grst delay value is in 100ms units, and we'll wait a
1124 * couple counts longer to be sure we don't just miss the end.
1125 */
1126 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1127 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1128 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1129
1130 /* It can take upto 15 secs for GRST steady state.
1131 * Bump it to 16 secs max to be safe.
1132 */
1133 grst_del = grst_del * 20;
1134
1135 for (cnt = 0; cnt < grst_del; cnt++) {
1136 reg = rd32(hw, I40E_GLGEN_RSTAT);
1137 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1138 break;
1139 msleep(100);
1140 }
1141 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1142 hw_dbg(hw, "Global reset polling failed to complete.\n");
1143 return I40E_ERR_RESET_FAILED;
1144 }
1145
1146 /* Now Wait for the FW to be ready */
1147 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1148 reg = rd32(hw, I40E_GLNVM_ULD);
1149 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1150 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1151 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1152 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1153 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1154 break;
1155 }
1156 usleep_range(10000, 20000);
1157 }
1158 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1159 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1160 hw_dbg(hw, "wait for FW Reset complete timedout\n");
1161 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1162 return I40E_ERR_RESET_FAILED;
1163 }
1164
1165 /* If there was a Global Reset in progress when we got here,
1166 * we don't need to do the PF Reset
1167 */
1168 if (!cnt) {
1169 u32 reg2 = 0;
1170 if (hw->revision_id == 0)
1171 cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1172 else
1173 cnt = I40E_PF_RESET_WAIT_COUNT;
1174 reg = rd32(hw, I40E_PFGEN_CTRL);
1175 wr32(hw, I40E_PFGEN_CTRL,
1176 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1177 for (; cnt; cnt--) {
1178 reg = rd32(hw, I40E_PFGEN_CTRL);
1179 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1180 break;
1181 reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1182 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1183 break;
1184 usleep_range(1000, 2000);
1185 }
1186 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1187 if (i40e_poll_globr(hw, grst_del))
1188 return I40E_ERR_RESET_FAILED;
1189 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1190 hw_dbg(hw, "PF reset polling failed to complete.\n");
1191 return I40E_ERR_RESET_FAILED;
1192 }
1193 }
1194
1195 i40e_clear_pxe_mode(hw);
1196
1197 return 0;
1198}
1199
1200/**
1201 * i40e_clear_hw - clear out any left over hw state
1202 * @hw: pointer to the hw struct
1203 *
1204 * Clear queues and interrupts, typically called at init time,
1205 * but after the capabilities have been found so we know how many
1206 * queues and msix vectors have been allocated.
1207 **/
1208void i40e_clear_hw(struct i40e_hw *hw)
1209{
1210 u32 num_queues, base_queue;
1211 u32 num_pf_int;
1212 u32 num_vf_int;
1213 u32 num_vfs;
1214 u32 i, j;
1215 u32 val;
1216 u32 eol = 0x7ff;
1217
1218 /* get number of interrupts, queues, and VFs */
1219 val = rd32(hw, I40E_GLPCI_CNF2);
1220 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1221 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1222 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1223 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1224
1225 val = rd32(hw, I40E_PFLAN_QALLOC);
1226 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1227 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1228 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1229 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1230 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
1231 num_queues = (j - base_queue) + 1;
1232 else
1233 num_queues = 0;
1234
1235 val = rd32(hw, I40E_PF_VT_PFALLOC);
1236 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1237 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1238 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1239 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1240 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
1241 num_vfs = (j - i) + 1;
1242 else
1243 num_vfs = 0;
1244
1245 /* stop all the interrupts */
1246 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1247 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1248 for (i = 0; i < num_pf_int - 2; i++)
1249 wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1250
1251 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1252 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1253 wr32(hw, I40E_PFINT_LNKLST0, val);
1254 for (i = 0; i < num_pf_int - 2; i++)
1255 wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1256 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1257 for (i = 0; i < num_vfs; i++)
1258 wr32(hw, I40E_VPINT_LNKLST0(i), val);
1259 for (i = 0; i < num_vf_int - 2; i++)
1260 wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1261
1262 /* warn the HW of the coming Tx disables */
1263 for (i = 0; i < num_queues; i++) {
1264 u32 abs_queue_idx = base_queue + i;
1265 u32 reg_block = 0;
1266
1267 if (abs_queue_idx >= 128) {
1268 reg_block = abs_queue_idx / 128;
1269 abs_queue_idx %= 128;
1270 }
1271
1272 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1273 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1274 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1275 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1276
1277 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1278 }
1279 udelay(400);
1280
1281 /* stop all the queues */
1282 for (i = 0; i < num_queues; i++) {
1283 wr32(hw, I40E_QINT_TQCTL(i), 0);
1284 wr32(hw, I40E_QTX_ENA(i), 0);
1285 wr32(hw, I40E_QINT_RQCTL(i), 0);
1286 wr32(hw, I40E_QRX_ENA(i), 0);
1287 }
1288
1289 /* short wait for all queue disables to settle */
1290 udelay(50);
1291}
1292
1293/**
1294 * i40e_clear_pxe_mode - clear pxe operations mode
1295 * @hw: pointer to the hw struct
1296 *
1297 * Make sure all PXE mode settings are cleared, including things
1298 * like descriptor fetch/write-back mode.
1299 **/
1300void i40e_clear_pxe_mode(struct i40e_hw *hw)
1301{
1302 u32 reg;
1303
1304 if (i40e_check_asq_alive(hw))
1305 i40e_aq_clear_pxe_mode(hw, NULL);
1306
1307 /* Clear single descriptor fetch/write-back mode */
1308 reg = rd32(hw, I40E_GLLAN_RCTL_0);
1309
1310 if (hw->revision_id == 0) {
1311 /* As a work around clear PXE_MODE instead of setting it */
1312 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1313 } else {
1314 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1315 }
1316}
1317
1318/**
1319 * i40e_led_is_mine - helper to find matching led
1320 * @hw: pointer to the hw struct
1321 * @idx: index into GPIO registers
1322 *
1323 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1324 */
1325static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1326{
1327 u32 gpio_val = 0;
1328 u32 port;
1329
1330 if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1331 !hw->func_caps.led[idx])
1332 return 0;
1333 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1334 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1335 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1336
1337 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1338 * if it is not our port then ignore
1339 */
1340 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1341 (port != hw->port))
1342 return 0;
1343
1344 return gpio_val;
1345}
1346
1347#define I40E_FW_LED BIT(4)
1348#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1349 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1350
1351#define I40E_LED0 22
1352
1353#define I40E_PIN_FUNC_SDP 0x0
1354#define I40E_PIN_FUNC_LED 0x1
1355
1356/**
1357 * i40e_led_get - return current on/off mode
1358 * @hw: pointer to the hw struct
1359 *
1360 * The value returned is the 'mode' field as defined in the
1361 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1362 * values are variations of possible behaviors relating to
1363 * blink, link, and wire.
1364 **/
1365u32 i40e_led_get(struct i40e_hw *hw)
1366{
1367 u32 mode = 0;
1368 int i;
1369
1370 /* as per the documentation GPIO 22-29 are the LED
1371 * GPIO pins named LED0..LED7
1372 */
1373 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1374 u32 gpio_val = i40e_led_is_mine(hw, i);
1375
1376 if (!gpio_val)
1377 continue;
1378
1379 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1380 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1381 break;
1382 }
1383
1384 return mode;
1385}
1386
1387/**
1388 * i40e_led_set - set new on/off mode
1389 * @hw: pointer to the hw struct
1390 * @mode: 0=off, 0xf=on (else see manual for mode details)
1391 * @blink: true if the LED should blink when on, false if steady
1392 *
1393 * if this function is used to turn on the blink it should
1394 * be used to disable the blink when restoring the original state.
1395 **/
1396void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1397{
1398 int i;
1399
1400 if (mode & ~I40E_LED_MODE_VALID) {
1401 hw_dbg(hw, "invalid mode passed in %X\n", mode);
1402 return;
1403 }
1404
1405 /* as per the documentation GPIO 22-29 are the LED
1406 * GPIO pins named LED0..LED7
1407 */
1408 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1409 u32 gpio_val = i40e_led_is_mine(hw, i);
1410
1411 if (!gpio_val)
1412 continue;
1413
1414 if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1415 u32 pin_func = 0;
1416
1417 if (mode & I40E_FW_LED)
1418 pin_func = I40E_PIN_FUNC_SDP;
1419 else
1420 pin_func = I40E_PIN_FUNC_LED;
1421
1422 gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1423 gpio_val |= ((pin_func <<
1424 I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1425 I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1426 }
1427 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1428 /* this & is a bit of paranoia, but serves as a range check */
1429 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1430 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1431
1432 if (blink)
1433 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1434 else
1435 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1436
1437 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1438 break;
1439 }
1440}
1441
1442/* Admin command wrappers */
1443
1444/**
1445 * i40e_aq_get_phy_capabilities
1446 * @hw: pointer to the hw struct
1447 * @abilities: structure for PHY capabilities to be filled
1448 * @qualified_modules: report Qualified Modules
1449 * @report_init: report init capabilities (active are default)
1450 * @cmd_details: pointer to command details structure or NULL
1451 *
1452 * Returns the various PHY abilities supported on the Port.
1453 **/
1454i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1455 bool qualified_modules, bool report_init,
1456 struct i40e_aq_get_phy_abilities_resp *abilities,
1457 struct i40e_asq_cmd_details *cmd_details)
1458{
1459 struct i40e_aq_desc desc;
1460 i40e_status status;
1461 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1462 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1463
1464 if (!abilities)
1465 return I40E_ERR_PARAM;
1466
1467 do {
1468 i40e_fill_default_direct_cmd_desc(&desc,
1469 i40e_aqc_opc_get_phy_abilities);
1470
1471 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1472 if (abilities_size > I40E_AQ_LARGE_BUF)
1473 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1474
1475 if (qualified_modules)
1476 desc.params.external.param0 |=
1477 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1478
1479 if (report_init)
1480 desc.params.external.param0 |=
1481 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1482
1483 status = i40e_asq_send_command(hw, &desc, abilities,
1484 abilities_size, cmd_details);
1485
1486 switch (hw->aq.asq_last_status) {
1487 case I40E_AQ_RC_EIO:
1488 status = I40E_ERR_UNKNOWN_PHY;
1489 break;
1490 case I40E_AQ_RC_EAGAIN:
1491 usleep_range(1000, 2000);
1492 total_delay++;
1493 status = I40E_ERR_TIMEOUT;
1494 break;
1495 /* also covers I40E_AQ_RC_OK */
1496 default:
1497 break;
1498 }
1499
1500 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1501 (total_delay < max_delay));
1502
1503 if (status)
1504 return status;
1505
1506 if (report_init) {
1507 if (hw->mac.type == I40E_MAC_XL710 &&
1508 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1509 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1510 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1511 } else {
1512 hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1513 hw->phy.phy_types |=
1514 ((u64)abilities->phy_type_ext << 32);
1515 }
1516 }
1517
1518 return status;
1519}
1520
1521/**
1522 * i40e_aq_set_phy_config
1523 * @hw: pointer to the hw struct
1524 * @config: structure with PHY configuration to be set
1525 * @cmd_details: pointer to command details structure or NULL
1526 *
1527 * Set the various PHY configuration parameters
1528 * supported on the Port.One or more of the Set PHY config parameters may be
1529 * ignored in an MFP mode as the PF may not have the privilege to set some
1530 * of the PHY Config parameters. This status will be indicated by the
1531 * command response.
1532 **/
1533enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1534 struct i40e_aq_set_phy_config *config,
1535 struct i40e_asq_cmd_details *cmd_details)
1536{
1537 struct i40e_aq_desc desc;
1538 struct i40e_aq_set_phy_config *cmd =
1539 (struct i40e_aq_set_phy_config *)&desc.params.raw;
1540 enum i40e_status_code status;
1541
1542 if (!config)
1543 return I40E_ERR_PARAM;
1544
1545 i40e_fill_default_direct_cmd_desc(&desc,
1546 i40e_aqc_opc_set_phy_config);
1547
1548 *cmd = *config;
1549
1550 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1551
1552 return status;
1553}
1554
1555static noinline_for_stack enum i40e_status_code
1556i40e_set_fc_status(struct i40e_hw *hw,
1557 struct i40e_aq_get_phy_abilities_resp *abilities,
1558 bool atomic_restart)
1559{
1560 struct i40e_aq_set_phy_config config;
1561 enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1562 u8 pause_mask = 0x0;
1563
1564 switch (fc_mode) {
1565 case I40E_FC_FULL:
1566 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1567 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1568 break;
1569 case I40E_FC_RX_PAUSE:
1570 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1571 break;
1572 case I40E_FC_TX_PAUSE:
1573 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1574 break;
1575 default:
1576 break;
1577 }
1578
1579 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1580 /* clear the old pause settings */
1581 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1582 ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1583 /* set the new abilities */
1584 config.abilities |= pause_mask;
1585 /* If the abilities have changed, then set the new config */
1586 if (config.abilities == abilities->abilities)
1587 return 0;
1588
1589 /* Auto restart link so settings take effect */
1590 if (atomic_restart)
1591 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1592 /* Copy over all the old settings */
1593 config.phy_type = abilities->phy_type;
1594 config.phy_type_ext = abilities->phy_type_ext;
1595 config.link_speed = abilities->link_speed;
1596 config.eee_capability = abilities->eee_capability;
1597 config.eeer = abilities->eeer_val;
1598 config.low_power_ctrl = abilities->d3_lpan;
1599 config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1600 I40E_AQ_PHY_FEC_CONFIG_MASK;
1601
1602 return i40e_aq_set_phy_config(hw, &config, NULL);
1603}
1604
1605/**
1606 * i40e_set_fc
1607 * @hw: pointer to the hw struct
1608 * @aq_failures: buffer to return AdminQ failure information
1609 * @atomic_restart: whether to enable atomic link restart
1610 *
1611 * Set the requested flow control mode using set_phy_config.
1612 **/
1613enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1614 bool atomic_restart)
1615{
1616 struct i40e_aq_get_phy_abilities_resp abilities;
1617 enum i40e_status_code status;
1618
1619 *aq_failures = 0x0;
1620
1621 /* Get the current phy config */
1622 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1623 NULL);
1624 if (status) {
1625 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1626 return status;
1627 }
1628
1629 status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1630 if (status)
1631 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1632
1633 /* Update the link info */
1634 status = i40e_update_link_info(hw);
1635 if (status) {
1636 /* Wait a little bit (on 40G cards it sometimes takes a really
1637 * long time for link to come back from the atomic reset)
1638 * and try once more
1639 */
1640 msleep(1000);
1641 status = i40e_update_link_info(hw);
1642 }
1643 if (status)
1644 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1645
1646 return status;
1647}
1648
1649/**
1650 * i40e_aq_clear_pxe_mode
1651 * @hw: pointer to the hw struct
1652 * @cmd_details: pointer to command details structure or NULL
1653 *
1654 * Tell the firmware that the driver is taking over from PXE
1655 **/
1656i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1657 struct i40e_asq_cmd_details *cmd_details)
1658{
1659 i40e_status status;
1660 struct i40e_aq_desc desc;
1661 struct i40e_aqc_clear_pxe *cmd =
1662 (struct i40e_aqc_clear_pxe *)&desc.params.raw;
1663
1664 i40e_fill_default_direct_cmd_desc(&desc,
1665 i40e_aqc_opc_clear_pxe_mode);
1666
1667 cmd->rx_cnt = 0x2;
1668
1669 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1670
1671 wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1672
1673 return status;
1674}
1675
1676/**
1677 * i40e_aq_set_link_restart_an
1678 * @hw: pointer to the hw struct
1679 * @enable_link: if true: enable link, if false: disable link
1680 * @cmd_details: pointer to command details structure or NULL
1681 *
1682 * Sets up the link and restarts the Auto-Negotiation over the link.
1683 **/
1684i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1685 bool enable_link,
1686 struct i40e_asq_cmd_details *cmd_details)
1687{
1688 struct i40e_aq_desc desc;
1689 struct i40e_aqc_set_link_restart_an *cmd =
1690 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1691 i40e_status status;
1692
1693 i40e_fill_default_direct_cmd_desc(&desc,
1694 i40e_aqc_opc_set_link_restart_an);
1695
1696 cmd->command = I40E_AQ_PHY_RESTART_AN;
1697 if (enable_link)
1698 cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1699 else
1700 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1701
1702 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1703
1704 return status;
1705}
1706
1707/**
1708 * i40e_aq_get_link_info
1709 * @hw: pointer to the hw struct
1710 * @enable_lse: enable/disable LinkStatusEvent reporting
1711 * @link: pointer to link status structure - optional
1712 * @cmd_details: pointer to command details structure or NULL
1713 *
1714 * Returns the link status of the adapter.
1715 **/
1716i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1717 bool enable_lse, struct i40e_link_status *link,
1718 struct i40e_asq_cmd_details *cmd_details)
1719{
1720 struct i40e_aq_desc desc;
1721 struct i40e_aqc_get_link_status *resp =
1722 (struct i40e_aqc_get_link_status *)&desc.params.raw;
1723 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1724 i40e_status status;
1725 bool tx_pause, rx_pause;
1726 u16 command_flags;
1727
1728 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1729
1730 if (enable_lse)
1731 command_flags = I40E_AQ_LSE_ENABLE;
1732 else
1733 command_flags = I40E_AQ_LSE_DISABLE;
1734 resp->command_flags = cpu_to_le16(command_flags);
1735
1736 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1737
1738 if (status)
1739 goto aq_get_link_info_exit;
1740
1741 /* save off old link status information */
1742 hw->phy.link_info_old = *hw_link_info;
1743
1744 /* update link status */
1745 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1746 hw->phy.media_type = i40e_get_media_type(hw);
1747 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1748 hw_link_info->link_info = resp->link_info;
1749 hw_link_info->an_info = resp->an_info;
1750 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1751 I40E_AQ_CONFIG_FEC_RS_ENA);
1752 hw_link_info->ext_info = resp->ext_info;
1753 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1754 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1755 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1756
1757 /* update fc info */
1758 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1759 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1760 if (tx_pause & rx_pause)
1761 hw->fc.current_mode = I40E_FC_FULL;
1762 else if (tx_pause)
1763 hw->fc.current_mode = I40E_FC_TX_PAUSE;
1764 else if (rx_pause)
1765 hw->fc.current_mode = I40E_FC_RX_PAUSE;
1766 else
1767 hw->fc.current_mode = I40E_FC_NONE;
1768
1769 if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1770 hw_link_info->crc_enable = true;
1771 else
1772 hw_link_info->crc_enable = false;
1773
1774 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1775 hw_link_info->lse_enable = true;
1776 else
1777 hw_link_info->lse_enable = false;
1778
1779 if ((hw->mac.type == I40E_MAC_XL710) &&
1780 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1781 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1782 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1783
1784 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1785 hw->mac.type != I40E_MAC_X722) {
1786 __le32 tmp;
1787
1788 memcpy(&tmp, resp->link_type, sizeof(tmp));
1789 hw->phy.phy_types = le32_to_cpu(tmp);
1790 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1791 }
1792
1793 /* save link status information */
1794 if (link)
1795 *link = *hw_link_info;
1796
1797 /* flag cleared so helper functions don't call AQ again */
1798 hw->phy.get_link_info = false;
1799
1800aq_get_link_info_exit:
1801 return status;
1802}
1803
1804/**
1805 * i40e_aq_set_phy_int_mask
1806 * @hw: pointer to the hw struct
1807 * @mask: interrupt mask to be set
1808 * @cmd_details: pointer to command details structure or NULL
1809 *
1810 * Set link interrupt mask.
1811 **/
1812i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1813 u16 mask,
1814 struct i40e_asq_cmd_details *cmd_details)
1815{
1816 struct i40e_aq_desc desc;
1817 struct i40e_aqc_set_phy_int_mask *cmd =
1818 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1819 i40e_status status;
1820
1821 i40e_fill_default_direct_cmd_desc(&desc,
1822 i40e_aqc_opc_set_phy_int_mask);
1823
1824 cmd->event_mask = cpu_to_le16(mask);
1825
1826 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1827
1828 return status;
1829}
1830
1831/**
1832 * i40e_aq_set_phy_debug
1833 * @hw: pointer to the hw struct
1834 * @cmd_flags: debug command flags
1835 * @cmd_details: pointer to command details structure or NULL
1836 *
1837 * Reset the external PHY.
1838 **/
1839i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1840 struct i40e_asq_cmd_details *cmd_details)
1841{
1842 struct i40e_aq_desc desc;
1843 struct i40e_aqc_set_phy_debug *cmd =
1844 (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1845 i40e_status status;
1846
1847 i40e_fill_default_direct_cmd_desc(&desc,
1848 i40e_aqc_opc_set_phy_debug);
1849
1850 cmd->command_flags = cmd_flags;
1851
1852 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1853
1854 return status;
1855}
1856
1857/**
1858 * i40e_is_aq_api_ver_ge
1859 * @aq: pointer to AdminQ info containing HW API version to compare
1860 * @maj: API major value
1861 * @min: API minor value
1862 *
1863 * Assert whether current HW API version is greater/equal than provided.
1864 **/
1865static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1866 u16 min)
1867{
1868 return (aq->api_maj_ver > maj ||
1869 (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1870}
1871
1872/**
1873 * i40e_aq_add_vsi
1874 * @hw: pointer to the hw struct
1875 * @vsi_ctx: pointer to a vsi context struct
1876 * @cmd_details: pointer to command details structure or NULL
1877 *
1878 * Add a VSI context to the hardware.
1879**/
1880i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1881 struct i40e_vsi_context *vsi_ctx,
1882 struct i40e_asq_cmd_details *cmd_details)
1883{
1884 struct i40e_aq_desc desc;
1885 struct i40e_aqc_add_get_update_vsi *cmd =
1886 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1887 struct i40e_aqc_add_get_update_vsi_completion *resp =
1888 (struct i40e_aqc_add_get_update_vsi_completion *)
1889 &desc.params.raw;
1890 i40e_status status;
1891
1892 i40e_fill_default_direct_cmd_desc(&desc,
1893 i40e_aqc_opc_add_vsi);
1894
1895 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
1896 cmd->connection_type = vsi_ctx->connection_type;
1897 cmd->vf_id = vsi_ctx->vf_num;
1898 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1899
1900 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1901
1902 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
1903 sizeof(vsi_ctx->info), cmd_details);
1904
1905 if (status)
1906 goto aq_add_vsi_exit;
1907
1908 vsi_ctx->seid = le16_to_cpu(resp->seid);
1909 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
1910 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
1911 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1912
1913aq_add_vsi_exit:
1914 return status;
1915}
1916
1917/**
1918 * i40e_aq_set_default_vsi
1919 * @hw: pointer to the hw struct
1920 * @seid: vsi number
1921 * @cmd_details: pointer to command details structure or NULL
1922 **/
1923i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
1924 u16 seid,
1925 struct i40e_asq_cmd_details *cmd_details)
1926{
1927 struct i40e_aq_desc desc;
1928 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1929 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1930 &desc.params.raw;
1931 i40e_status status;
1932
1933 i40e_fill_default_direct_cmd_desc(&desc,
1934 i40e_aqc_opc_set_vsi_promiscuous_modes);
1935
1936 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1937 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1938 cmd->seid = cpu_to_le16(seid);
1939
1940 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1941
1942 return status;
1943}
1944
1945/**
1946 * i40e_aq_clear_default_vsi
1947 * @hw: pointer to the hw struct
1948 * @seid: vsi number
1949 * @cmd_details: pointer to command details structure or NULL
1950 **/
1951i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
1952 u16 seid,
1953 struct i40e_asq_cmd_details *cmd_details)
1954{
1955 struct i40e_aq_desc desc;
1956 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1957 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1958 &desc.params.raw;
1959 i40e_status status;
1960
1961 i40e_fill_default_direct_cmd_desc(&desc,
1962 i40e_aqc_opc_set_vsi_promiscuous_modes);
1963
1964 cmd->promiscuous_flags = cpu_to_le16(0);
1965 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1966 cmd->seid = cpu_to_le16(seid);
1967
1968 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1969
1970 return status;
1971}
1972
1973/**
1974 * i40e_aq_set_vsi_unicast_promiscuous
1975 * @hw: pointer to the hw struct
1976 * @seid: vsi number
1977 * @set: set unicast promiscuous enable/disable
1978 * @cmd_details: pointer to command details structure or NULL
1979 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
1980 **/
1981i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1982 u16 seid, bool set,
1983 struct i40e_asq_cmd_details *cmd_details,
1984 bool rx_only_promisc)
1985{
1986 struct i40e_aq_desc desc;
1987 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1988 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
1989 i40e_status status;
1990 u16 flags = 0;
1991
1992 i40e_fill_default_direct_cmd_desc(&desc,
1993 i40e_aqc_opc_set_vsi_promiscuous_modes);
1994
1995 if (set) {
1996 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1997 if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1998 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1999 }
2000
2001 cmd->promiscuous_flags = cpu_to_le16(flags);
2002
2003 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2004 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2005 cmd->valid_flags |=
2006 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2007
2008 cmd->seid = cpu_to_le16(seid);
2009 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2010
2011 return status;
2012}
2013
2014/**
2015 * i40e_aq_set_vsi_multicast_promiscuous
2016 * @hw: pointer to the hw struct
2017 * @seid: vsi number
2018 * @set: set multicast promiscuous enable/disable
2019 * @cmd_details: pointer to command details structure or NULL
2020 **/
2021i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2022 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2023{
2024 struct i40e_aq_desc desc;
2025 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2026 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2027 i40e_status status;
2028 u16 flags = 0;
2029
2030 i40e_fill_default_direct_cmd_desc(&desc,
2031 i40e_aqc_opc_set_vsi_promiscuous_modes);
2032
2033 if (set)
2034 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2035
2036 cmd->promiscuous_flags = cpu_to_le16(flags);
2037
2038 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2039
2040 cmd->seid = cpu_to_le16(seid);
2041 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2042
2043 return status;
2044}
2045
2046/**
2047 * i40e_aq_set_vsi_mc_promisc_on_vlan
2048 * @hw: pointer to the hw struct
2049 * @seid: vsi number
2050 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2051 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2052 * @cmd_details: pointer to command details structure or NULL
2053 **/
2054enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2055 u16 seid, bool enable,
2056 u16 vid,
2057 struct i40e_asq_cmd_details *cmd_details)
2058{
2059 struct i40e_aq_desc desc;
2060 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2061 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2062 enum i40e_status_code status;
2063 u16 flags = 0;
2064
2065 i40e_fill_default_direct_cmd_desc(&desc,
2066 i40e_aqc_opc_set_vsi_promiscuous_modes);
2067
2068 if (enable)
2069 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2070
2071 cmd->promiscuous_flags = cpu_to_le16(flags);
2072 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2073 cmd->seid = cpu_to_le16(seid);
2074 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2075
2076 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2077
2078 return status;
2079}
2080
2081/**
2082 * i40e_aq_set_vsi_uc_promisc_on_vlan
2083 * @hw: pointer to the hw struct
2084 * @seid: vsi number
2085 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2086 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2087 * @cmd_details: pointer to command details structure or NULL
2088 **/
2089enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2090 u16 seid, bool enable,
2091 u16 vid,
2092 struct i40e_asq_cmd_details *cmd_details)
2093{
2094 struct i40e_aq_desc desc;
2095 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2096 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2097 enum i40e_status_code status;
2098 u16 flags = 0;
2099
2100 i40e_fill_default_direct_cmd_desc(&desc,
2101 i40e_aqc_opc_set_vsi_promiscuous_modes);
2102
2103 if (enable) {
2104 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2105 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2106 flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2107 }
2108
2109 cmd->promiscuous_flags = cpu_to_le16(flags);
2110 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2111 if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2112 cmd->valid_flags |=
2113 cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2114 cmd->seid = cpu_to_le16(seid);
2115 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2116
2117 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2118
2119 return status;
2120}
2121
2122/**
2123 * i40e_aq_set_vsi_bc_promisc_on_vlan
2124 * @hw: pointer to the hw struct
2125 * @seid: vsi number
2126 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2127 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2128 * @cmd_details: pointer to command details structure or NULL
2129 **/
2130i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2131 u16 seid, bool enable, u16 vid,
2132 struct i40e_asq_cmd_details *cmd_details)
2133{
2134 struct i40e_aq_desc desc;
2135 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2136 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2137 i40e_status status;
2138 u16 flags = 0;
2139
2140 i40e_fill_default_direct_cmd_desc(&desc,
2141 i40e_aqc_opc_set_vsi_promiscuous_modes);
2142
2143 if (enable)
2144 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2145
2146 cmd->promiscuous_flags = cpu_to_le16(flags);
2147 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2148 cmd->seid = cpu_to_le16(seid);
2149 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2150
2151 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2152
2153 return status;
2154}
2155
2156/**
2157 * i40e_aq_set_vsi_broadcast
2158 * @hw: pointer to the hw struct
2159 * @seid: vsi number
2160 * @set_filter: true to set filter, false to clear filter
2161 * @cmd_details: pointer to command details structure or NULL
2162 *
2163 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2164 **/
2165i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2166 u16 seid, bool set_filter,
2167 struct i40e_asq_cmd_details *cmd_details)
2168{
2169 struct i40e_aq_desc desc;
2170 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2171 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2172 i40e_status status;
2173
2174 i40e_fill_default_direct_cmd_desc(&desc,
2175 i40e_aqc_opc_set_vsi_promiscuous_modes);
2176
2177 if (set_filter)
2178 cmd->promiscuous_flags
2179 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2180 else
2181 cmd->promiscuous_flags
2182 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2183
2184 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2185 cmd->seid = cpu_to_le16(seid);
2186 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2187
2188 return status;
2189}
2190
2191/**
2192 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2193 * @hw: pointer to the hw struct
2194 * @seid: vsi number
2195 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2196 * @cmd_details: pointer to command details structure or NULL
2197 **/
2198i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2199 u16 seid, bool enable,
2200 struct i40e_asq_cmd_details *cmd_details)
2201{
2202 struct i40e_aq_desc desc;
2203 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2204 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2205 i40e_status status;
2206 u16 flags = 0;
2207
2208 i40e_fill_default_direct_cmd_desc(&desc,
2209 i40e_aqc_opc_set_vsi_promiscuous_modes);
2210 if (enable)
2211 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2212
2213 cmd->promiscuous_flags = cpu_to_le16(flags);
2214 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2215 cmd->seid = cpu_to_le16(seid);
2216
2217 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2218
2219 return status;
2220}
2221
2222/**
2223 * i40e_aq_get_vsi_params - get VSI configuration info
2224 * @hw: pointer to the hw struct
2225 * @vsi_ctx: pointer to a vsi context struct
2226 * @cmd_details: pointer to command details structure or NULL
2227 **/
2228i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2229 struct i40e_vsi_context *vsi_ctx,
2230 struct i40e_asq_cmd_details *cmd_details)
2231{
2232 struct i40e_aq_desc desc;
2233 struct i40e_aqc_add_get_update_vsi *cmd =
2234 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2235 struct i40e_aqc_add_get_update_vsi_completion *resp =
2236 (struct i40e_aqc_add_get_update_vsi_completion *)
2237 &desc.params.raw;
2238 i40e_status status;
2239
2240 i40e_fill_default_direct_cmd_desc(&desc,
2241 i40e_aqc_opc_get_vsi_parameters);
2242
2243 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2244
2245 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2246
2247 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2248 sizeof(vsi_ctx->info), NULL);
2249
2250 if (status)
2251 goto aq_get_vsi_params_exit;
2252
2253 vsi_ctx->seid = le16_to_cpu(resp->seid);
2254 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2255 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2256 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2257
2258aq_get_vsi_params_exit:
2259 return status;
2260}
2261
2262/**
2263 * i40e_aq_update_vsi_params
2264 * @hw: pointer to the hw struct
2265 * @vsi_ctx: pointer to a vsi context struct
2266 * @cmd_details: pointer to command details structure or NULL
2267 *
2268 * Update a VSI context.
2269 **/
2270i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2271 struct i40e_vsi_context *vsi_ctx,
2272 struct i40e_asq_cmd_details *cmd_details)
2273{
2274 struct i40e_aq_desc desc;
2275 struct i40e_aqc_add_get_update_vsi *cmd =
2276 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2277 struct i40e_aqc_add_get_update_vsi_completion *resp =
2278 (struct i40e_aqc_add_get_update_vsi_completion *)
2279 &desc.params.raw;
2280 i40e_status status;
2281
2282 i40e_fill_default_direct_cmd_desc(&desc,
2283 i40e_aqc_opc_update_vsi_parameters);
2284 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2285
2286 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2287
2288 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2289 sizeof(vsi_ctx->info), cmd_details);
2290
2291 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2292 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2293
2294 return status;
2295}
2296
2297/**
2298 * i40e_aq_get_switch_config
2299 * @hw: pointer to the hardware structure
2300 * @buf: pointer to the result buffer
2301 * @buf_size: length of input buffer
2302 * @start_seid: seid to start for the report, 0 == beginning
2303 * @cmd_details: pointer to command details structure or NULL
2304 *
2305 * Fill the buf with switch configuration returned from AdminQ command
2306 **/
2307i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2308 struct i40e_aqc_get_switch_config_resp *buf,
2309 u16 buf_size, u16 *start_seid,
2310 struct i40e_asq_cmd_details *cmd_details)
2311{
2312 struct i40e_aq_desc desc;
2313 struct i40e_aqc_switch_seid *scfg =
2314 (struct i40e_aqc_switch_seid *)&desc.params.raw;
2315 i40e_status status;
2316
2317 i40e_fill_default_direct_cmd_desc(&desc,
2318 i40e_aqc_opc_get_switch_config);
2319 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2320 if (buf_size > I40E_AQ_LARGE_BUF)
2321 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2322 scfg->seid = cpu_to_le16(*start_seid);
2323
2324 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2325 *start_seid = le16_to_cpu(scfg->seid);
2326
2327 return status;
2328}
2329
2330/**
2331 * i40e_aq_set_switch_config
2332 * @hw: pointer to the hardware structure
2333 * @flags: bit flag values to set
2334 * @mode: cloud filter mode
2335 * @valid_flags: which bit flags to set
2336 * @mode: cloud filter mode
2337 * @cmd_details: pointer to command details structure or NULL
2338 *
2339 * Set switch configuration bits
2340 **/
2341enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2342 u16 flags,
2343 u16 valid_flags, u8 mode,
2344 struct i40e_asq_cmd_details *cmd_details)
2345{
2346 struct i40e_aq_desc desc;
2347 struct i40e_aqc_set_switch_config *scfg =
2348 (struct i40e_aqc_set_switch_config *)&desc.params.raw;
2349 enum i40e_status_code status;
2350
2351 i40e_fill_default_direct_cmd_desc(&desc,
2352 i40e_aqc_opc_set_switch_config);
2353 scfg->flags = cpu_to_le16(flags);
2354 scfg->valid_flags = cpu_to_le16(valid_flags);
2355 scfg->mode = mode;
2356 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2357 scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2358 scfg->first_tag = cpu_to_le16(hw->first_tag);
2359 scfg->second_tag = cpu_to_le16(hw->second_tag);
2360 }
2361 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2362
2363 return status;
2364}
2365
2366/**
2367 * i40e_aq_get_firmware_version
2368 * @hw: pointer to the hw struct
2369 * @fw_major_version: firmware major version
2370 * @fw_minor_version: firmware minor version
2371 * @fw_build: firmware build number
2372 * @api_major_version: major queue version
2373 * @api_minor_version: minor queue version
2374 * @cmd_details: pointer to command details structure or NULL
2375 *
2376 * Get the firmware version from the admin queue commands
2377 **/
2378i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2379 u16 *fw_major_version, u16 *fw_minor_version,
2380 u32 *fw_build,
2381 u16 *api_major_version, u16 *api_minor_version,
2382 struct i40e_asq_cmd_details *cmd_details)
2383{
2384 struct i40e_aq_desc desc;
2385 struct i40e_aqc_get_version *resp =
2386 (struct i40e_aqc_get_version *)&desc.params.raw;
2387 i40e_status status;
2388
2389 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2390
2391 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2392
2393 if (!status) {
2394 if (fw_major_version)
2395 *fw_major_version = le16_to_cpu(resp->fw_major);
2396 if (fw_minor_version)
2397 *fw_minor_version = le16_to_cpu(resp->fw_minor);
2398 if (fw_build)
2399 *fw_build = le32_to_cpu(resp->fw_build);
2400 if (api_major_version)
2401 *api_major_version = le16_to_cpu(resp->api_major);
2402 if (api_minor_version)
2403 *api_minor_version = le16_to_cpu(resp->api_minor);
2404 }
2405
2406 return status;
2407}
2408
2409/**
2410 * i40e_aq_send_driver_version
2411 * @hw: pointer to the hw struct
2412 * @dv: driver's major, minor version
2413 * @cmd_details: pointer to command details structure or NULL
2414 *
2415 * Send the driver version to the firmware
2416 **/
2417i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2418 struct i40e_driver_version *dv,
2419 struct i40e_asq_cmd_details *cmd_details)
2420{
2421 struct i40e_aq_desc desc;
2422 struct i40e_aqc_driver_version *cmd =
2423 (struct i40e_aqc_driver_version *)&desc.params.raw;
2424 i40e_status status;
2425 u16 len;
2426
2427 if (dv == NULL)
2428 return I40E_ERR_PARAM;
2429
2430 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2431
2432 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2433 cmd->driver_major_ver = dv->major_version;
2434 cmd->driver_minor_ver = dv->minor_version;
2435 cmd->driver_build_ver = dv->build_version;
2436 cmd->driver_subbuild_ver = dv->subbuild_version;
2437
2438 len = 0;
2439 while (len < sizeof(dv->driver_string) &&
2440 (dv->driver_string[len] < 0x80) &&
2441 dv->driver_string[len])
2442 len++;
2443 status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2444 len, cmd_details);
2445
2446 return status;
2447}
2448
2449/**
2450 * i40e_get_link_status - get status of the HW network link
2451 * @hw: pointer to the hw struct
2452 * @link_up: pointer to bool (true/false = linkup/linkdown)
2453 *
2454 * Variable link_up true if link is up, false if link is down.
2455 * The variable link_up is invalid if returned value of status != 0
2456 *
2457 * Side effect: LinkStatusEvent reporting becomes enabled
2458 **/
2459i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2460{
2461 i40e_status status = 0;
2462
2463 if (hw->phy.get_link_info) {
2464 status = i40e_update_link_info(hw);
2465
2466 if (status)
2467 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2468 status);
2469 }
2470
2471 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2472
2473 return status;
2474}
2475
2476/**
2477 * i40e_update_link_info - update status of the HW network link
2478 * @hw: pointer to the hw struct
2479 **/
2480noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
2481{
2482 struct i40e_aq_get_phy_abilities_resp abilities;
2483 i40e_status status = 0;
2484
2485 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2486 if (status)
2487 return status;
2488
2489 /* extra checking needed to ensure link info to user is timely */
2490 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2491 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2492 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2493 status = i40e_aq_get_phy_capabilities(hw, false, false,
2494 &abilities, NULL);
2495 if (status)
2496 return status;
2497
2498 if (abilities.fec_cfg_curr_mod_ext_info &
2499 I40E_AQ_ENABLE_FEC_AUTO)
2500 hw->phy.link_info.req_fec_info =
2501 (I40E_AQ_REQUEST_FEC_KR |
2502 I40E_AQ_REQUEST_FEC_RS);
2503 else
2504 hw->phy.link_info.req_fec_info =
2505 abilities.fec_cfg_curr_mod_ext_info &
2506 (I40E_AQ_REQUEST_FEC_KR |
2507 I40E_AQ_REQUEST_FEC_RS);
2508
2509 memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2510 sizeof(hw->phy.link_info.module_type));
2511 }
2512
2513 return status;
2514}
2515
2516/**
2517 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2518 * @hw: pointer to the hw struct
2519 * @uplink_seid: the MAC or other gizmo SEID
2520 * @downlink_seid: the VSI SEID
2521 * @enabled_tc: bitmap of TCs to be enabled
2522 * @default_port: true for default port VSI, false for control port
2523 * @veb_seid: pointer to where to put the resulting VEB SEID
2524 * @enable_stats: true to turn on VEB stats
2525 * @cmd_details: pointer to command details structure or NULL
2526 *
2527 * This asks the FW to add a VEB between the uplink and downlink
2528 * elements. If the uplink SEID is 0, this will be a floating VEB.
2529 **/
2530i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2531 u16 downlink_seid, u8 enabled_tc,
2532 bool default_port, u16 *veb_seid,
2533 bool enable_stats,
2534 struct i40e_asq_cmd_details *cmd_details)
2535{
2536 struct i40e_aq_desc desc;
2537 struct i40e_aqc_add_veb *cmd =
2538 (struct i40e_aqc_add_veb *)&desc.params.raw;
2539 struct i40e_aqc_add_veb_completion *resp =
2540 (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2541 i40e_status status;
2542 u16 veb_flags = 0;
2543
2544 /* SEIDs need to either both be set or both be 0 for floating VEB */
2545 if (!!uplink_seid != !!downlink_seid)
2546 return I40E_ERR_PARAM;
2547
2548 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2549
2550 cmd->uplink_seid = cpu_to_le16(uplink_seid);
2551 cmd->downlink_seid = cpu_to_le16(downlink_seid);
2552 cmd->enable_tcs = enabled_tc;
2553 if (!uplink_seid)
2554 veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2555 if (default_port)
2556 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2557 else
2558 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2559
2560 /* reverse logic here: set the bitflag to disable the stats */
2561 if (!enable_stats)
2562 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2563
2564 cmd->veb_flags = cpu_to_le16(veb_flags);
2565
2566 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2567
2568 if (!status && veb_seid)
2569 *veb_seid = le16_to_cpu(resp->veb_seid);
2570
2571 return status;
2572}
2573
2574/**
2575 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2576 * @hw: pointer to the hw struct
2577 * @veb_seid: the SEID of the VEB to query
2578 * @switch_id: the uplink switch id
2579 * @floating: set to true if the VEB is floating
2580 * @statistic_index: index of the stats counter block for this VEB
2581 * @vebs_used: number of VEB's used by function
2582 * @vebs_free: total VEB's not reserved by any function
2583 * @cmd_details: pointer to command details structure or NULL
2584 *
2585 * This retrieves the parameters for a particular VEB, specified by
2586 * uplink_seid, and returns them to the caller.
2587 **/
2588i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2589 u16 veb_seid, u16 *switch_id,
2590 bool *floating, u16 *statistic_index,
2591 u16 *vebs_used, u16 *vebs_free,
2592 struct i40e_asq_cmd_details *cmd_details)
2593{
2594 struct i40e_aq_desc desc;
2595 struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2596 (struct i40e_aqc_get_veb_parameters_completion *)
2597 &desc.params.raw;
2598 i40e_status status;
2599
2600 if (veb_seid == 0)
2601 return I40E_ERR_PARAM;
2602
2603 i40e_fill_default_direct_cmd_desc(&desc,
2604 i40e_aqc_opc_get_veb_parameters);
2605 cmd_resp->seid = cpu_to_le16(veb_seid);
2606
2607 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2608 if (status)
2609 goto get_veb_exit;
2610
2611 if (switch_id)
2612 *switch_id = le16_to_cpu(cmd_resp->switch_id);
2613 if (statistic_index)
2614 *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2615 if (vebs_used)
2616 *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2617 if (vebs_free)
2618 *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2619 if (floating) {
2620 u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2621
2622 if (flags & I40E_AQC_ADD_VEB_FLOATING)
2623 *floating = true;
2624 else
2625 *floating = false;
2626 }
2627
2628get_veb_exit:
2629 return status;
2630}
2631
2632/**
2633 * i40e_aq_add_macvlan
2634 * @hw: pointer to the hw struct
2635 * @seid: VSI for the mac address
2636 * @mv_list: list of macvlans to be added
2637 * @count: length of the list
2638 * @cmd_details: pointer to command details structure or NULL
2639 *
2640 * Add MAC/VLAN addresses to the HW filtering
2641 **/
2642i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2643 struct i40e_aqc_add_macvlan_element_data *mv_list,
2644 u16 count, struct i40e_asq_cmd_details *cmd_details)
2645{
2646 struct i40e_aq_desc desc;
2647 struct i40e_aqc_macvlan *cmd =
2648 (struct i40e_aqc_macvlan *)&desc.params.raw;
2649 i40e_status status;
2650 u16 buf_size;
2651 int i;
2652
2653 if (count == 0 || !mv_list || !hw)
2654 return I40E_ERR_PARAM;
2655
2656 buf_size = count * sizeof(*mv_list);
2657
2658 /* prep the rest of the request */
2659 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
2660 cmd->num_addresses = cpu_to_le16(count);
2661 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2662 cmd->seid[1] = 0;
2663 cmd->seid[2] = 0;
2664
2665 for (i = 0; i < count; i++)
2666 if (is_multicast_ether_addr(mv_list[i].mac_addr))
2667 mv_list[i].flags |=
2668 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2669
2670 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2671 if (buf_size > I40E_AQ_LARGE_BUF)
2672 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2673
2674 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2675 cmd_details);
2676
2677 return status;
2678}
2679
2680/**
2681 * i40e_aq_remove_macvlan
2682 * @hw: pointer to the hw struct
2683 * @seid: VSI for the mac address
2684 * @mv_list: list of macvlans to be removed
2685 * @count: length of the list
2686 * @cmd_details: pointer to command details structure or NULL
2687 *
2688 * Remove MAC/VLAN addresses from the HW filtering
2689 **/
2690i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2691 struct i40e_aqc_remove_macvlan_element_data *mv_list,
2692 u16 count, struct i40e_asq_cmd_details *cmd_details)
2693{
2694 struct i40e_aq_desc desc;
2695 struct i40e_aqc_macvlan *cmd =
2696 (struct i40e_aqc_macvlan *)&desc.params.raw;
2697 i40e_status status;
2698 u16 buf_size;
2699
2700 if (count == 0 || !mv_list || !hw)
2701 return I40E_ERR_PARAM;
2702
2703 buf_size = count * sizeof(*mv_list);
2704
2705 /* prep the rest of the request */
2706 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2707 cmd->num_addresses = cpu_to_le16(count);
2708 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2709 cmd->seid[1] = 0;
2710 cmd->seid[2] = 0;
2711
2712 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2713 if (buf_size > I40E_AQ_LARGE_BUF)
2714 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2715
2716 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2717 cmd_details);
2718
2719 return status;
2720}
2721
2722/**
2723 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2724 * @hw: pointer to the hw struct
2725 * @opcode: AQ opcode for add or delete mirror rule
2726 * @sw_seid: Switch SEID (to which rule refers)
2727 * @rule_type: Rule Type (ingress/egress/VLAN)
2728 * @id: Destination VSI SEID or Rule ID
2729 * @count: length of the list
2730 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2731 * @cmd_details: pointer to command details structure or NULL
2732 * @rule_id: Rule ID returned from FW
2733 * @rules_used: Number of rules used in internal switch
2734 * @rules_free: Number of rules free in internal switch
2735 *
2736 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2737 * VEBs/VEPA elements only
2738 **/
2739static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2740 u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2741 u16 count, __le16 *mr_list,
2742 struct i40e_asq_cmd_details *cmd_details,
2743 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2744{
2745 struct i40e_aq_desc desc;
2746 struct i40e_aqc_add_delete_mirror_rule *cmd =
2747 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2748 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2749 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2750 i40e_status status;
2751 u16 buf_size;
2752
2753 buf_size = count * sizeof(*mr_list);
2754
2755 /* prep the rest of the request */
2756 i40e_fill_default_direct_cmd_desc(&desc, opcode);
2757 cmd->seid = cpu_to_le16(sw_seid);
2758 cmd->rule_type = cpu_to_le16(rule_type &
2759 I40E_AQC_MIRROR_RULE_TYPE_MASK);
2760 cmd->num_entries = cpu_to_le16(count);
2761 /* Dest VSI for add, rule_id for delete */
2762 cmd->destination = cpu_to_le16(id);
2763 if (mr_list) {
2764 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2765 I40E_AQ_FLAG_RD));
2766 if (buf_size > I40E_AQ_LARGE_BUF)
2767 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2768 }
2769
2770 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2771 cmd_details);
2772 if (!status ||
2773 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2774 if (rule_id)
2775 *rule_id = le16_to_cpu(resp->rule_id);
2776 if (rules_used)
2777 *rules_used = le16_to_cpu(resp->mirror_rules_used);
2778 if (rules_free)
2779 *rules_free = le16_to_cpu(resp->mirror_rules_free);
2780 }
2781 return status;
2782}
2783
2784/**
2785 * i40e_aq_add_mirrorrule - add a mirror rule
2786 * @hw: pointer to the hw struct
2787 * @sw_seid: Switch SEID (to which rule refers)
2788 * @rule_type: Rule Type (ingress/egress/VLAN)
2789 * @dest_vsi: SEID of VSI to which packets will be mirrored
2790 * @count: length of the list
2791 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2792 * @cmd_details: pointer to command details structure or NULL
2793 * @rule_id: Rule ID returned from FW
2794 * @rules_used: Number of rules used in internal switch
2795 * @rules_free: Number of rules free in internal switch
2796 *
2797 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2798 **/
2799i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2800 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2801 struct i40e_asq_cmd_details *cmd_details,
2802 u16 *rule_id, u16 *rules_used, u16 *rules_free)
2803{
2804 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2805 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2806 if (count == 0 || !mr_list)
2807 return I40E_ERR_PARAM;
2808 }
2809
2810 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2811 rule_type, dest_vsi, count, mr_list,
2812 cmd_details, rule_id, rules_used, rules_free);
2813}
2814
2815/**
2816 * i40e_aq_delete_mirrorrule - delete a mirror rule
2817 * @hw: pointer to the hw struct
2818 * @sw_seid: Switch SEID (to which rule refers)
2819 * @rule_type: Rule Type (ingress/egress/VLAN)
2820 * @count: length of the list
2821 * @rule_id: Rule ID that is returned in the receive desc as part of
2822 * add_mirrorrule.
2823 * @mr_list: list of mirrored VLAN IDs to be removed
2824 * @cmd_details: pointer to command details structure or NULL
2825 * @rules_used: Number of rules used in internal switch
2826 * @rules_free: Number of rules free in internal switch
2827 *
2828 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2829 **/
2830i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2831 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2832 struct i40e_asq_cmd_details *cmd_details,
2833 u16 *rules_used, u16 *rules_free)
2834{
2835 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2836 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2837 /* count and mr_list shall be valid for rule_type INGRESS VLAN
2838 * mirroring. For other rule_type, count and rule_type should
2839 * not matter.
2840 */
2841 if (count == 0 || !mr_list)
2842 return I40E_ERR_PARAM;
2843 }
2844
2845 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2846 rule_type, rule_id, count, mr_list,
2847 cmd_details, NULL, rules_used, rules_free);
2848}
2849
2850/**
2851 * i40e_aq_send_msg_to_vf
2852 * @hw: pointer to the hardware structure
2853 * @vfid: VF id to send msg
2854 * @v_opcode: opcodes for VF-PF communication
2855 * @v_retval: return error code
2856 * @msg: pointer to the msg buffer
2857 * @msglen: msg length
2858 * @cmd_details: pointer to command details
2859 *
2860 * send msg to vf
2861 **/
2862i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2863 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2864 struct i40e_asq_cmd_details *cmd_details)
2865{
2866 struct i40e_aq_desc desc;
2867 struct i40e_aqc_pf_vf_message *cmd =
2868 (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2869 i40e_status status;
2870
2871 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2872 cmd->id = cpu_to_le32(vfid);
2873 desc.cookie_high = cpu_to_le32(v_opcode);
2874 desc.cookie_low = cpu_to_le32(v_retval);
2875 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2876 if (msglen) {
2877 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2878 I40E_AQ_FLAG_RD));
2879 if (msglen > I40E_AQ_LARGE_BUF)
2880 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2881 desc.datalen = cpu_to_le16(msglen);
2882 }
2883 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2884
2885 return status;
2886}
2887
2888/**
2889 * i40e_aq_debug_read_register
2890 * @hw: pointer to the hw struct
2891 * @reg_addr: register address
2892 * @reg_val: register value
2893 * @cmd_details: pointer to command details structure or NULL
2894 *
2895 * Read the register using the admin queue commands
2896 **/
2897i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
2898 u32 reg_addr, u64 *reg_val,
2899 struct i40e_asq_cmd_details *cmd_details)
2900{
2901 struct i40e_aq_desc desc;
2902 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2903 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2904 i40e_status status;
2905
2906 if (reg_val == NULL)
2907 return I40E_ERR_PARAM;
2908
2909 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
2910
2911 cmd_resp->address = cpu_to_le32(reg_addr);
2912
2913 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2914
2915 if (!status) {
2916 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
2917 (u64)le32_to_cpu(cmd_resp->value_low);
2918 }
2919
2920 return status;
2921}
2922
2923/**
2924 * i40e_aq_debug_write_register
2925 * @hw: pointer to the hw struct
2926 * @reg_addr: register address
2927 * @reg_val: register value
2928 * @cmd_details: pointer to command details structure or NULL
2929 *
2930 * Write to a register using the admin queue commands
2931 **/
2932i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
2933 u32 reg_addr, u64 reg_val,
2934 struct i40e_asq_cmd_details *cmd_details)
2935{
2936 struct i40e_aq_desc desc;
2937 struct i40e_aqc_debug_reg_read_write *cmd =
2938 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2939 i40e_status status;
2940
2941 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
2942
2943 cmd->address = cpu_to_le32(reg_addr);
2944 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
2945 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
2946
2947 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2948
2949 return status;
2950}
2951
2952/**
2953 * i40e_aq_request_resource
2954 * @hw: pointer to the hw struct
2955 * @resource: resource id
2956 * @access: access type
2957 * @sdp_number: resource number
2958 * @timeout: the maximum time in ms that the driver may hold the resource
2959 * @cmd_details: pointer to command details structure or NULL
2960 *
2961 * requests common resource using the admin queue commands
2962 **/
2963i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
2964 enum i40e_aq_resources_ids resource,
2965 enum i40e_aq_resource_access_type access,
2966 u8 sdp_number, u64 *timeout,
2967 struct i40e_asq_cmd_details *cmd_details)
2968{
2969 struct i40e_aq_desc desc;
2970 struct i40e_aqc_request_resource *cmd_resp =
2971 (struct i40e_aqc_request_resource *)&desc.params.raw;
2972 i40e_status status;
2973
2974 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
2975
2976 cmd_resp->resource_id = cpu_to_le16(resource);
2977 cmd_resp->access_type = cpu_to_le16(access);
2978 cmd_resp->resource_number = cpu_to_le32(sdp_number);
2979
2980 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2981 /* The completion specifies the maximum time in ms that the driver
2982 * may hold the resource in the Timeout field.
2983 * If the resource is held by someone else, the command completes with
2984 * busy return value and the timeout field indicates the maximum time
2985 * the current owner of the resource has to free it.
2986 */
2987 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
2988 *timeout = le32_to_cpu(cmd_resp->timeout);
2989
2990 return status;
2991}
2992
2993/**
2994 * i40e_aq_release_resource
2995 * @hw: pointer to the hw struct
2996 * @resource: resource id
2997 * @sdp_number: resource number
2998 * @cmd_details: pointer to command details structure or NULL
2999 *
3000 * release common resource using the admin queue commands
3001 **/
3002i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3003 enum i40e_aq_resources_ids resource,
3004 u8 sdp_number,
3005 struct i40e_asq_cmd_details *cmd_details)
3006{
3007 struct i40e_aq_desc desc;
3008 struct i40e_aqc_request_resource *cmd =
3009 (struct i40e_aqc_request_resource *)&desc.params.raw;
3010 i40e_status status;
3011
3012 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3013
3014 cmd->resource_id = cpu_to_le16(resource);
3015 cmd->resource_number = cpu_to_le32(sdp_number);
3016
3017 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3018
3019 return status;
3020}
3021
3022/**
3023 * i40e_aq_read_nvm
3024 * @hw: pointer to the hw struct
3025 * @module_pointer: module pointer location in words from the NVM beginning
3026 * @offset: byte offset from the module beginning
3027 * @length: length of the section to be read (in bytes from the offset)
3028 * @data: command buffer (size [bytes] = length)
3029 * @last_command: tells if this is the last command in a series
3030 * @cmd_details: pointer to command details structure or NULL
3031 *
3032 * Read the NVM using the admin queue commands
3033 **/
3034i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3035 u32 offset, u16 length, void *data,
3036 bool last_command,
3037 struct i40e_asq_cmd_details *cmd_details)
3038{
3039 struct i40e_aq_desc desc;
3040 struct i40e_aqc_nvm_update *cmd =
3041 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3042 i40e_status status;
3043
3044 /* In offset the highest byte must be zeroed. */
3045 if (offset & 0xFF000000) {
3046 status = I40E_ERR_PARAM;
3047 goto i40e_aq_read_nvm_exit;
3048 }
3049
3050 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3051
3052 /* If this is the last command in a series, set the proper flag. */
3053 if (last_command)
3054 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3055 cmd->module_pointer = module_pointer;
3056 cmd->offset = cpu_to_le32(offset);
3057 cmd->length = cpu_to_le16(length);
3058
3059 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3060 if (length > I40E_AQ_LARGE_BUF)
3061 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3062
3063 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3064
3065i40e_aq_read_nvm_exit:
3066 return status;
3067}
3068
3069/**
3070 * i40e_aq_erase_nvm
3071 * @hw: pointer to the hw struct
3072 * @module_pointer: module pointer location in words from the NVM beginning
3073 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3074 * @length: length of the section to be erased (expressed in 4 KB)
3075 * @last_command: tells if this is the last command in a series
3076 * @cmd_details: pointer to command details structure or NULL
3077 *
3078 * Erase the NVM sector using the admin queue commands
3079 **/
3080i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3081 u32 offset, u16 length, bool last_command,
3082 struct i40e_asq_cmd_details *cmd_details)
3083{
3084 struct i40e_aq_desc desc;
3085 struct i40e_aqc_nvm_update *cmd =
3086 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3087 i40e_status status;
3088
3089 /* In offset the highest byte must be zeroed. */
3090 if (offset & 0xFF000000) {
3091 status = I40E_ERR_PARAM;
3092 goto i40e_aq_erase_nvm_exit;
3093 }
3094
3095 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3096
3097 /* If this is the last command in a series, set the proper flag. */
3098 if (last_command)
3099 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3100 cmd->module_pointer = module_pointer;
3101 cmd->offset = cpu_to_le32(offset);
3102 cmd->length = cpu_to_le16(length);
3103
3104 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3105
3106i40e_aq_erase_nvm_exit:
3107 return status;
3108}
3109
3110/**
3111 * i40e_parse_discover_capabilities
3112 * @hw: pointer to the hw struct
3113 * @buff: pointer to a buffer containing device/function capability records
3114 * @cap_count: number of capability records in the list
3115 * @list_type_opc: type of capabilities list to parse
3116 *
3117 * Parse the device/function capabilities list.
3118 **/
3119static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3120 u32 cap_count,
3121 enum i40e_admin_queue_opc list_type_opc)
3122{
3123 struct i40e_aqc_list_capabilities_element_resp *cap;
3124 u32 valid_functions, num_functions;
3125 u32 number, logical_id, phys_id;
3126 struct i40e_hw_capabilities *p;
3127 u16 id, ocp_cfg_word0;
3128 i40e_status status;
3129 u8 major_rev;
3130 u32 i = 0;
3131
3132 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3133
3134 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3135 p = &hw->dev_caps;
3136 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3137 p = &hw->func_caps;
3138 else
3139 return;
3140
3141 for (i = 0; i < cap_count; i++, cap++) {
3142 id = le16_to_cpu(cap->id);
3143 number = le32_to_cpu(cap->number);
3144 logical_id = le32_to_cpu(cap->logical_id);
3145 phys_id = le32_to_cpu(cap->phys_id);
3146 major_rev = cap->major_rev;
3147
3148 switch (id) {
3149 case I40E_AQ_CAP_ID_SWITCH_MODE:
3150 p->switch_mode = number;
3151 break;
3152 case I40E_AQ_CAP_ID_MNG_MODE:
3153 p->management_mode = number;
3154 if (major_rev > 1) {
3155 p->mng_protocols_over_mctp = logical_id;
3156 i40e_debug(hw, I40E_DEBUG_INIT,
3157 "HW Capability: Protocols over MCTP = %d\n",
3158 p->mng_protocols_over_mctp);
3159 } else {
3160 p->mng_protocols_over_mctp = 0;
3161 }
3162 break;
3163 case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3164 p->npar_enable = number;
3165 break;
3166 case I40E_AQ_CAP_ID_OS2BMC_CAP:
3167 p->os2bmc = number;
3168 break;
3169 case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3170 p->valid_functions = number;
3171 break;
3172 case I40E_AQ_CAP_ID_SRIOV:
3173 if (number == 1)
3174 p->sr_iov_1_1 = true;
3175 break;
3176 case I40E_AQ_CAP_ID_VF:
3177 p->num_vfs = number;
3178 p->vf_base_id = logical_id;
3179 break;
3180 case I40E_AQ_CAP_ID_VMDQ:
3181 if (number == 1)
3182 p->vmdq = true;
3183 break;
3184 case I40E_AQ_CAP_ID_8021QBG:
3185 if (number == 1)
3186 p->evb_802_1_qbg = true;
3187 break;
3188 case I40E_AQ_CAP_ID_8021QBR:
3189 if (number == 1)
3190 p->evb_802_1_qbh = true;
3191 break;
3192 case I40E_AQ_CAP_ID_VSI:
3193 p->num_vsis = number;
3194 break;
3195 case I40E_AQ_CAP_ID_DCB:
3196 if (number == 1) {
3197 p->dcb = true;
3198 p->enabled_tcmap = logical_id;
3199 p->maxtc = phys_id;
3200 }
3201 break;
3202 case I40E_AQ_CAP_ID_FCOE:
3203 if (number == 1)
3204 p->fcoe = true;
3205 break;
3206 case I40E_AQ_CAP_ID_ISCSI:
3207 if (number == 1)
3208 p->iscsi = true;
3209 break;
3210 case I40E_AQ_CAP_ID_RSS:
3211 p->rss = true;
3212 p->rss_table_size = number;
3213 p->rss_table_entry_width = logical_id;
3214 break;
3215 case I40E_AQ_CAP_ID_RXQ:
3216 p->num_rx_qp = number;
3217 p->base_queue = phys_id;
3218 break;
3219 case I40E_AQ_CAP_ID_TXQ:
3220 p->num_tx_qp = number;
3221 p->base_queue = phys_id;
3222 break;
3223 case I40E_AQ_CAP_ID_MSIX:
3224 p->num_msix_vectors = number;
3225 i40e_debug(hw, I40E_DEBUG_INIT,
3226 "HW Capability: MSIX vector count = %d\n",
3227 p->num_msix_vectors);
3228 break;
3229 case I40E_AQ_CAP_ID_VF_MSIX:
3230 p->num_msix_vectors_vf = number;
3231 break;
3232 case I40E_AQ_CAP_ID_FLEX10:
3233 if (major_rev == 1) {
3234 if (number == 1) {
3235 p->flex10_enable = true;
3236 p->flex10_capable = true;
3237 }
3238 } else {
3239 /* Capability revision >= 2 */
3240 if (number & 1)
3241 p->flex10_enable = true;
3242 if (number & 2)
3243 p->flex10_capable = true;
3244 }
3245 p->flex10_mode = logical_id;
3246 p->flex10_status = phys_id;
3247 break;
3248 case I40E_AQ_CAP_ID_CEM:
3249 if (number == 1)
3250 p->mgmt_cem = true;
3251 break;
3252 case I40E_AQ_CAP_ID_IWARP:
3253 if (number == 1)
3254 p->iwarp = true;
3255 break;
3256 case I40E_AQ_CAP_ID_LED:
3257 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3258 p->led[phys_id] = true;
3259 break;
3260 case I40E_AQ_CAP_ID_SDP:
3261 if (phys_id < I40E_HW_CAP_MAX_GPIO)
3262 p->sdp[phys_id] = true;
3263 break;
3264 case I40E_AQ_CAP_ID_MDIO:
3265 if (number == 1) {
3266 p->mdio_port_num = phys_id;
3267 p->mdio_port_mode = logical_id;
3268 }
3269 break;
3270 case I40E_AQ_CAP_ID_1588:
3271 if (number == 1)
3272 p->ieee_1588 = true;
3273 break;
3274 case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3275 p->fd = true;
3276 p->fd_filters_guaranteed = number;
3277 p->fd_filters_best_effort = logical_id;
3278 break;
3279 case I40E_AQ_CAP_ID_WSR_PROT:
3280 p->wr_csr_prot = (u64)number;
3281 p->wr_csr_prot |= (u64)logical_id << 32;
3282 break;
3283 case I40E_AQ_CAP_ID_NVM_MGMT:
3284 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3285 p->sec_rev_disabled = true;
3286 if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3287 p->update_disabled = true;
3288 break;
3289 default:
3290 break;
3291 }
3292 }
3293
3294 if (p->fcoe)
3295 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3296
3297 /* Software override ensuring FCoE is disabled if npar or mfp
3298 * mode because it is not supported in these modes.
3299 */
3300 if (p->npar_enable || p->flex10_enable)
3301 p->fcoe = false;
3302
3303 /* count the enabled ports (aka the "not disabled" ports) */
3304 hw->num_ports = 0;
3305 for (i = 0; i < 4; i++) {
3306 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3307 u64 port_cfg = 0;
3308
3309 /* use AQ read to get the physical register offset instead
3310 * of the port relative offset
3311 */
3312 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3313 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3314 hw->num_ports++;
3315 }
3316
3317 /* OCP cards case: if a mezz is removed the Ethernet port is at
3318 * disabled state in PRTGEN_CNF register. Additional NVM read is
3319 * needed in order to check if we are dealing with OCP card.
3320 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3321 * physical ports results in wrong partition id calculation and thus
3322 * not supporting WoL.
3323 */
3324 if (hw->mac.type == I40E_MAC_X722) {
3325 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3326 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3327 2 * I40E_SR_OCP_CFG_WORD0,
3328 sizeof(ocp_cfg_word0),
3329 &ocp_cfg_word0, true, NULL);
3330 if (!status &&
3331 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3332 hw->num_ports = 4;
3333 i40e_release_nvm(hw);
3334 }
3335 }
3336
3337 valid_functions = p->valid_functions;
3338 num_functions = 0;
3339 while (valid_functions) {
3340 if (valid_functions & 1)
3341 num_functions++;
3342 valid_functions >>= 1;
3343 }
3344
3345 /* partition id is 1-based, and functions are evenly spread
3346 * across the ports as partitions
3347 */
3348 if (hw->num_ports != 0) {
3349 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3350 hw->num_partitions = num_functions / hw->num_ports;
3351 }
3352
3353 /* additional HW specific goodies that might
3354 * someday be HW version specific
3355 */
3356 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3357}
3358
3359/**
3360 * i40e_aq_discover_capabilities
3361 * @hw: pointer to the hw struct
3362 * @buff: a virtual buffer to hold the capabilities
3363 * @buff_size: Size of the virtual buffer
3364 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3365 * @list_type_opc: capabilities type to discover - pass in the command opcode
3366 * @cmd_details: pointer to command details structure or NULL
3367 *
3368 * Get the device capabilities descriptions from the firmware
3369 **/
3370i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3371 void *buff, u16 buff_size, u16 *data_size,
3372 enum i40e_admin_queue_opc list_type_opc,
3373 struct i40e_asq_cmd_details *cmd_details)
3374{
3375 struct i40e_aqc_list_capabilites *cmd;
3376 struct i40e_aq_desc desc;
3377 i40e_status status = 0;
3378
3379 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3380
3381 if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3382 list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3383 status = I40E_ERR_PARAM;
3384 goto exit;
3385 }
3386
3387 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3388
3389 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3390 if (buff_size > I40E_AQ_LARGE_BUF)
3391 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3392
3393 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3394 *data_size = le16_to_cpu(desc.datalen);
3395
3396 if (status)
3397 goto exit;
3398
3399 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3400 list_type_opc);
3401
3402exit:
3403 return status;
3404}
3405
3406/**
3407 * i40e_aq_update_nvm
3408 * @hw: pointer to the hw struct
3409 * @module_pointer: module pointer location in words from the NVM beginning
3410 * @offset: byte offset from the module beginning
3411 * @length: length of the section to be written (in bytes from the offset)
3412 * @data: command buffer (size [bytes] = length)
3413 * @last_command: tells if this is the last command in a series
3414 * @preservation_flags: Preservation mode flags
3415 * @cmd_details: pointer to command details structure or NULL
3416 *
3417 * Update the NVM using the admin queue commands
3418 **/
3419i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3420 u32 offset, u16 length, void *data,
3421 bool last_command, u8 preservation_flags,
3422 struct i40e_asq_cmd_details *cmd_details)
3423{
3424 struct i40e_aq_desc desc;
3425 struct i40e_aqc_nvm_update *cmd =
3426 (struct i40e_aqc_nvm_update *)&desc.params.raw;
3427 i40e_status status;
3428
3429 /* In offset the highest byte must be zeroed. */
3430 if (offset & 0xFF000000) {
3431 status = I40E_ERR_PARAM;
3432 goto i40e_aq_update_nvm_exit;
3433 }
3434
3435 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3436
3437 /* If this is the last command in a series, set the proper flag. */
3438 if (last_command)
3439 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3440 if (hw->mac.type == I40E_MAC_X722) {
3441 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3442 cmd->command_flags |=
3443 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3444 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3445 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3446 cmd->command_flags |=
3447 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3448 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3449 }
3450 cmd->module_pointer = module_pointer;
3451 cmd->offset = cpu_to_le32(offset);
3452 cmd->length = cpu_to_le16(length);
3453
3454 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3455 if (length > I40E_AQ_LARGE_BUF)
3456 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3457
3458 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3459
3460i40e_aq_update_nvm_exit:
3461 return status;
3462}
3463
3464/**
3465 * i40e_aq_rearrange_nvm
3466 * @hw: pointer to the hw struct
3467 * @rearrange_nvm: defines direction of rearrangement
3468 * @cmd_details: pointer to command details structure or NULL
3469 *
3470 * Rearrange NVM structure, available only for transition FW
3471 **/
3472i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3473 u8 rearrange_nvm,
3474 struct i40e_asq_cmd_details *cmd_details)
3475{
3476 struct i40e_aqc_nvm_update *cmd;
3477 i40e_status status;
3478 struct i40e_aq_desc desc;
3479
3480 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3481
3482 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3483
3484 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3485 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3486
3487 if (!rearrange_nvm) {
3488 status = I40E_ERR_PARAM;
3489 goto i40e_aq_rearrange_nvm_exit;
3490 }
3491
3492 cmd->command_flags |= rearrange_nvm;
3493 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3494
3495i40e_aq_rearrange_nvm_exit:
3496 return status;
3497}
3498
3499/**
3500 * i40e_aq_get_lldp_mib
3501 * @hw: pointer to the hw struct
3502 * @bridge_type: type of bridge requested
3503 * @mib_type: Local, Remote or both Local and Remote MIBs
3504 * @buff: pointer to a user supplied buffer to store the MIB block
3505 * @buff_size: size of the buffer (in bytes)
3506 * @local_len : length of the returned Local LLDP MIB
3507 * @remote_len: length of the returned Remote LLDP MIB
3508 * @cmd_details: pointer to command details structure or NULL
3509 *
3510 * Requests the complete LLDP MIB (entire packet).
3511 **/
3512i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3513 u8 mib_type, void *buff, u16 buff_size,
3514 u16 *local_len, u16 *remote_len,
3515 struct i40e_asq_cmd_details *cmd_details)
3516{
3517 struct i40e_aq_desc desc;
3518 struct i40e_aqc_lldp_get_mib *cmd =
3519 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3520 struct i40e_aqc_lldp_get_mib *resp =
3521 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3522 i40e_status status;
3523
3524 if (buff_size == 0 || !buff)
3525 return I40E_ERR_PARAM;
3526
3527 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3528 /* Indirect Command */
3529 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3530
3531 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3532 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3533 I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3534
3535 desc.datalen = cpu_to_le16(buff_size);
3536
3537 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3538 if (buff_size > I40E_AQ_LARGE_BUF)
3539 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3540
3541 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3542 if (!status) {
3543 if (local_len != NULL)
3544 *local_len = le16_to_cpu(resp->local_len);
3545 if (remote_len != NULL)
3546 *remote_len = le16_to_cpu(resp->remote_len);
3547 }
3548
3549 return status;
3550}
3551
3552/**
3553 * i40e_aq_set_lldp_mib - Set the LLDP MIB
3554 * @hw: pointer to the hw struct
3555 * @mib_type: Local, Remote or both Local and Remote MIBs
3556 * @buff: pointer to a user supplied buffer to store the MIB block
3557 * @buff_size: size of the buffer (in bytes)
3558 * @cmd_details: pointer to command details structure or NULL
3559 *
3560 * Set the LLDP MIB.
3561 **/
3562enum i40e_status_code
3563i40e_aq_set_lldp_mib(struct i40e_hw *hw,
3564 u8 mib_type, void *buff, u16 buff_size,
3565 struct i40e_asq_cmd_details *cmd_details)
3566{
3567 struct i40e_aqc_lldp_set_local_mib *cmd;
3568 enum i40e_status_code status;
3569 struct i40e_aq_desc desc;
3570
3571 cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
3572 if (buff_size == 0 || !buff)
3573 return I40E_ERR_PARAM;
3574
3575 i40e_fill_default_direct_cmd_desc(&desc,
3576 i40e_aqc_opc_lldp_set_local_mib);
3577 /* Indirect Command */
3578 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3579 if (buff_size > I40E_AQ_LARGE_BUF)
3580 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3581 desc.datalen = cpu_to_le16(buff_size);
3582
3583 cmd->type = mib_type;
3584 cmd->length = cpu_to_le16(buff_size);
3585 cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
3586 cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
3587
3588 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3589 return status;
3590}
3591
3592/**
3593 * i40e_aq_cfg_lldp_mib_change_event
3594 * @hw: pointer to the hw struct
3595 * @enable_update: Enable or Disable event posting
3596 * @cmd_details: pointer to command details structure or NULL
3597 *
3598 * Enable or Disable posting of an event on ARQ when LLDP MIB
3599 * associated with the interface changes
3600 **/
3601i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3602 bool enable_update,
3603 struct i40e_asq_cmd_details *cmd_details)
3604{
3605 struct i40e_aq_desc desc;
3606 struct i40e_aqc_lldp_update_mib *cmd =
3607 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3608 i40e_status status;
3609
3610 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3611
3612 if (!enable_update)
3613 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3614
3615 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3616
3617 return status;
3618}
3619
3620/**
3621 * i40e_aq_restore_lldp
3622 * @hw: pointer to the hw struct
3623 * @setting: pointer to factory setting variable or NULL
3624 * @restore: True if factory settings should be restored
3625 * @cmd_details: pointer to command details structure or NULL
3626 *
3627 * Restore LLDP Agent factory settings if @restore set to True. In other case
3628 * only returns factory setting in AQ response.
3629 **/
3630enum i40e_status_code
3631i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3632 struct i40e_asq_cmd_details *cmd_details)
3633{
3634 struct i40e_aq_desc desc;
3635 struct i40e_aqc_lldp_restore *cmd =
3636 (struct i40e_aqc_lldp_restore *)&desc.params.raw;
3637 i40e_status status;
3638
3639 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3640 i40e_debug(hw, I40E_DEBUG_ALL,
3641 "Restore LLDP not supported by current FW version.\n");
3642 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3643 }
3644
3645 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3646
3647 if (restore)
3648 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3649
3650 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3651
3652 if (setting)
3653 *setting = cmd->command & 1;
3654
3655 return status;
3656}
3657
3658/**
3659 * i40e_aq_stop_lldp
3660 * @hw: pointer to the hw struct
3661 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3662 * @persist: True if stop of LLDP should be persistent across power cycles
3663 * @cmd_details: pointer to command details structure or NULL
3664 *
3665 * Stop or Shutdown the embedded LLDP Agent
3666 **/
3667i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3668 bool persist,
3669 struct i40e_asq_cmd_details *cmd_details)
3670{
3671 struct i40e_aq_desc desc;
3672 struct i40e_aqc_lldp_stop *cmd =
3673 (struct i40e_aqc_lldp_stop *)&desc.params.raw;
3674 i40e_status status;
3675
3676 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3677
3678 if (shutdown_agent)
3679 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3680
3681 if (persist) {
3682 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3683 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3684 else
3685 i40e_debug(hw, I40E_DEBUG_ALL,
3686 "Persistent Stop LLDP not supported by current FW version.\n");
3687 }
3688
3689 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3690
3691 return status;
3692}
3693
3694/**
3695 * i40e_aq_start_lldp
3696 * @hw: pointer to the hw struct
3697 * @persist: True if start of LLDP should be persistent across power cycles
3698 * @cmd_details: pointer to command details structure or NULL
3699 *
3700 * Start the embedded LLDP Agent on all ports.
3701 **/
3702i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3703 struct i40e_asq_cmd_details *cmd_details)
3704{
3705 struct i40e_aq_desc desc;
3706 struct i40e_aqc_lldp_start *cmd =
3707 (struct i40e_aqc_lldp_start *)&desc.params.raw;
3708 i40e_status status;
3709
3710 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3711
3712 cmd->command = I40E_AQ_LLDP_AGENT_START;
3713
3714 if (persist) {
3715 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3716 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3717 else
3718 i40e_debug(hw, I40E_DEBUG_ALL,
3719 "Persistent Start LLDP not supported by current FW version.\n");
3720 }
3721
3722 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3723
3724 return status;
3725}
3726
3727/**
3728 * i40e_aq_set_dcb_parameters
3729 * @hw: pointer to the hw struct
3730 * @cmd_details: pointer to command details structure or NULL
3731 * @dcb_enable: True if DCB configuration needs to be applied
3732 *
3733 **/
3734enum i40e_status_code
3735i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3736 struct i40e_asq_cmd_details *cmd_details)
3737{
3738 struct i40e_aq_desc desc;
3739 struct i40e_aqc_set_dcb_parameters *cmd =
3740 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3741 i40e_status status;
3742
3743 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3744 return I40E_ERR_DEVICE_NOT_SUPPORTED;
3745
3746 i40e_fill_default_direct_cmd_desc(&desc,
3747 i40e_aqc_opc_set_dcb_parameters);
3748
3749 if (dcb_enable) {
3750 cmd->valid_flags = I40E_DCB_VALID;
3751 cmd->command = I40E_AQ_DCB_SET_AGENT;
3752 }
3753 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3754
3755 return status;
3756}
3757
3758/**
3759 * i40e_aq_get_cee_dcb_config
3760 * @hw: pointer to the hw struct
3761 * @buff: response buffer that stores CEE operational configuration
3762 * @buff_size: size of the buffer passed
3763 * @cmd_details: pointer to command details structure or NULL
3764 *
3765 * Get CEE DCBX mode operational configuration from firmware
3766 **/
3767i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3768 void *buff, u16 buff_size,
3769 struct i40e_asq_cmd_details *cmd_details)
3770{
3771 struct i40e_aq_desc desc;
3772 i40e_status status;
3773
3774 if (buff_size == 0 || !buff)
3775 return I40E_ERR_PARAM;
3776
3777 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3778
3779 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3780 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3781 cmd_details);
3782
3783 return status;
3784}
3785
3786/**
3787 * i40e_aq_add_udp_tunnel
3788 * @hw: pointer to the hw struct
3789 * @udp_port: the UDP port to add in Host byte order
3790 * @protocol_index: protocol index type
3791 * @filter_index: pointer to filter index
3792 * @cmd_details: pointer to command details structure or NULL
3793 *
3794 * Note: Firmware expects the udp_port value to be in Little Endian format,
3795 * and this function will call cpu_to_le16 to convert from Host byte order to
3796 * Little Endian order.
3797 **/
3798i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3799 u16 udp_port, u8 protocol_index,
3800 u8 *filter_index,
3801 struct i40e_asq_cmd_details *cmd_details)
3802{
3803 struct i40e_aq_desc desc;
3804 struct i40e_aqc_add_udp_tunnel *cmd =
3805 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3806 struct i40e_aqc_del_udp_tunnel_completion *resp =
3807 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3808 i40e_status status;
3809
3810 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3811
3812 cmd->udp_port = cpu_to_le16(udp_port);
3813 cmd->protocol_type = protocol_index;
3814
3815 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3816
3817 if (!status && filter_index)
3818 *filter_index = resp->index;
3819
3820 return status;
3821}
3822
3823/**
3824 * i40e_aq_del_udp_tunnel
3825 * @hw: pointer to the hw struct
3826 * @index: filter index
3827 * @cmd_details: pointer to command details structure or NULL
3828 **/
3829i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3830 struct i40e_asq_cmd_details *cmd_details)
3831{
3832 struct i40e_aq_desc desc;
3833 struct i40e_aqc_remove_udp_tunnel *cmd =
3834 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3835 i40e_status status;
3836
3837 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3838
3839 cmd->index = index;
3840
3841 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3842
3843 return status;
3844}
3845
3846/**
3847 * i40e_aq_delete_element - Delete switch element
3848 * @hw: pointer to the hw struct
3849 * @seid: the SEID to delete from the switch
3850 * @cmd_details: pointer to command details structure or NULL
3851 *
3852 * This deletes a switch element from the switch.
3853 **/
3854i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3855 struct i40e_asq_cmd_details *cmd_details)
3856{
3857 struct i40e_aq_desc desc;
3858 struct i40e_aqc_switch_seid *cmd =
3859 (struct i40e_aqc_switch_seid *)&desc.params.raw;
3860 i40e_status status;
3861
3862 if (seid == 0)
3863 return I40E_ERR_PARAM;
3864
3865 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3866
3867 cmd->seid = cpu_to_le16(seid);
3868
3869 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3870
3871 return status;
3872}
3873
3874/**
3875 * i40e_aq_dcb_updated - DCB Updated Command
3876 * @hw: pointer to the hw struct
3877 * @cmd_details: pointer to command details structure or NULL
3878 *
3879 * EMP will return when the shared RPB settings have been
3880 * recomputed and modified. The retval field in the descriptor
3881 * will be set to 0 when RPB is modified.
3882 **/
3883i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
3884 struct i40e_asq_cmd_details *cmd_details)
3885{
3886 struct i40e_aq_desc desc;
3887 i40e_status status;
3888
3889 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3890
3891 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3892
3893 return status;
3894}
3895
3896/**
3897 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3898 * @hw: pointer to the hw struct
3899 * @seid: seid for the physical port/switching component/vsi
3900 * @buff: Indirect buffer to hold data parameters and response
3901 * @buff_size: Indirect buffer size
3902 * @opcode: Tx scheduler AQ command opcode
3903 * @cmd_details: pointer to command details structure or NULL
3904 *
3905 * Generic command handler for Tx scheduler AQ commands
3906 **/
3907static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3908 void *buff, u16 buff_size,
3909 enum i40e_admin_queue_opc opcode,
3910 struct i40e_asq_cmd_details *cmd_details)
3911{
3912 struct i40e_aq_desc desc;
3913 struct i40e_aqc_tx_sched_ind *cmd =
3914 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3915 i40e_status status;
3916 bool cmd_param_flag = false;
3917
3918 switch (opcode) {
3919 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3920 case i40e_aqc_opc_configure_vsi_tc_bw:
3921 case i40e_aqc_opc_enable_switching_comp_ets:
3922 case i40e_aqc_opc_modify_switching_comp_ets:
3923 case i40e_aqc_opc_disable_switching_comp_ets:
3924 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3925 case i40e_aqc_opc_configure_switching_comp_bw_config:
3926 cmd_param_flag = true;
3927 break;
3928 case i40e_aqc_opc_query_vsi_bw_config:
3929 case i40e_aqc_opc_query_vsi_ets_sla_config:
3930 case i40e_aqc_opc_query_switching_comp_ets_config:
3931 case i40e_aqc_opc_query_port_ets_config:
3932 case i40e_aqc_opc_query_switching_comp_bw_config:
3933 cmd_param_flag = false;
3934 break;
3935 default:
3936 return I40E_ERR_PARAM;
3937 }
3938
3939 i40e_fill_default_direct_cmd_desc(&desc, opcode);
3940
3941 /* Indirect command */
3942 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3943 if (cmd_param_flag)
3944 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
3945 if (buff_size > I40E_AQ_LARGE_BUF)
3946 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3947
3948 desc.datalen = cpu_to_le16(buff_size);
3949
3950 cmd->vsi_seid = cpu_to_le16(seid);
3951
3952 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3953
3954 return status;
3955}
3956
3957/**
3958 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
3959 * @hw: pointer to the hw struct
3960 * @seid: VSI seid
3961 * @credit: BW limit credits (0 = disabled)
3962 * @max_credit: Max BW limit credits
3963 * @cmd_details: pointer to command details structure or NULL
3964 **/
3965i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
3966 u16 seid, u16 credit, u8 max_credit,
3967 struct i40e_asq_cmd_details *cmd_details)
3968{
3969 struct i40e_aq_desc desc;
3970 struct i40e_aqc_configure_vsi_bw_limit *cmd =
3971 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
3972 i40e_status status;
3973
3974 i40e_fill_default_direct_cmd_desc(&desc,
3975 i40e_aqc_opc_configure_vsi_bw_limit);
3976
3977 cmd->vsi_seid = cpu_to_le16(seid);
3978 cmd->credit = cpu_to_le16(credit);
3979 cmd->max_credit = max_credit;
3980
3981 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3982
3983 return status;
3984}
3985
3986/**
3987 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
3988 * @hw: pointer to the hw struct
3989 * @seid: VSI seid
3990 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
3991 * @cmd_details: pointer to command details structure or NULL
3992 **/
3993i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
3994 u16 seid,
3995 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
3996 struct i40e_asq_cmd_details *cmd_details)
3997{
3998 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
3999 i40e_aqc_opc_configure_vsi_tc_bw,
4000 cmd_details);
4001}
4002
4003/**
4004 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4005 * @hw: pointer to the hw struct
4006 * @seid: seid of the switching component connected to Physical Port
4007 * @ets_data: Buffer holding ETS parameters
4008 * @opcode: Tx scheduler AQ command opcode
4009 * @cmd_details: pointer to command details structure or NULL
4010 **/
4011i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4012 u16 seid,
4013 struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4014 enum i40e_admin_queue_opc opcode,
4015 struct i40e_asq_cmd_details *cmd_details)
4016{
4017 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4018 sizeof(*ets_data), opcode, cmd_details);
4019}
4020
4021/**
4022 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4023 * @hw: pointer to the hw struct
4024 * @seid: seid of the switching component
4025 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4026 * @cmd_details: pointer to command details structure or NULL
4027 **/
4028i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4029 u16 seid,
4030 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4031 struct i40e_asq_cmd_details *cmd_details)
4032{
4033 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4034 i40e_aqc_opc_configure_switching_comp_bw_config,
4035 cmd_details);
4036}
4037
4038/**
4039 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4040 * @hw: pointer to the hw struct
4041 * @seid: seid of the VSI
4042 * @bw_data: Buffer to hold VSI BW configuration
4043 * @cmd_details: pointer to command details structure or NULL
4044 **/
4045i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4046 u16 seid,
4047 struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4048 struct i40e_asq_cmd_details *cmd_details)
4049{
4050 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4051 i40e_aqc_opc_query_vsi_bw_config,
4052 cmd_details);
4053}
4054
4055/**
4056 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4057 * @hw: pointer to the hw struct
4058 * @seid: seid of the VSI
4059 * @bw_data: Buffer to hold VSI BW configuration per TC
4060 * @cmd_details: pointer to command details structure or NULL
4061 **/
4062i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4063 u16 seid,
4064 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4065 struct i40e_asq_cmd_details *cmd_details)
4066{
4067 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4068 i40e_aqc_opc_query_vsi_ets_sla_config,
4069 cmd_details);
4070}
4071
4072/**
4073 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4074 * @hw: pointer to the hw struct
4075 * @seid: seid of the switching component
4076 * @bw_data: Buffer to hold switching component's per TC BW config
4077 * @cmd_details: pointer to command details structure or NULL
4078 **/
4079i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4080 u16 seid,
4081 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4082 struct i40e_asq_cmd_details *cmd_details)
4083{
4084 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4085 i40e_aqc_opc_query_switching_comp_ets_config,
4086 cmd_details);
4087}
4088
4089/**
4090 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4091 * @hw: pointer to the hw struct
4092 * @seid: seid of the VSI or switching component connected to Physical Port
4093 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4094 * @cmd_details: pointer to command details structure or NULL
4095 **/
4096i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4097 u16 seid,
4098 struct i40e_aqc_query_port_ets_config_resp *bw_data,
4099 struct i40e_asq_cmd_details *cmd_details)
4100{
4101 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4102 i40e_aqc_opc_query_port_ets_config,
4103 cmd_details);
4104}
4105
4106/**
4107 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4108 * @hw: pointer to the hw struct
4109 * @seid: seid of the switching component
4110 * @bw_data: Buffer to hold switching component's BW configuration
4111 * @cmd_details: pointer to command details structure or NULL
4112 **/
4113i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4114 u16 seid,
4115 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4116 struct i40e_asq_cmd_details *cmd_details)
4117{
4118 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4119 i40e_aqc_opc_query_switching_comp_bw_config,
4120 cmd_details);
4121}
4122
4123/**
4124 * i40e_validate_filter_settings
4125 * @hw: pointer to the hardware structure
4126 * @settings: Filter control settings
4127 *
4128 * Check and validate the filter control settings passed.
4129 * The function checks for the valid filter/context sizes being
4130 * passed for FCoE and PE.
4131 *
4132 * Returns 0 if the values passed are valid and within
4133 * range else returns an error.
4134 **/
4135static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4136 struct i40e_filter_control_settings *settings)
4137{
4138 u32 fcoe_cntx_size, fcoe_filt_size;
4139 u32 pe_cntx_size, pe_filt_size;
4140 u32 fcoe_fmax;
4141 u32 val;
4142
4143 /* Validate FCoE settings passed */
4144 switch (settings->fcoe_filt_num) {
4145 case I40E_HASH_FILTER_SIZE_1K:
4146 case I40E_HASH_FILTER_SIZE_2K:
4147 case I40E_HASH_FILTER_SIZE_4K:
4148 case I40E_HASH_FILTER_SIZE_8K:
4149 case I40E_HASH_FILTER_SIZE_16K:
4150 case I40E_HASH_FILTER_SIZE_32K:
4151 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4152 fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4153 break;
4154 default:
4155 return I40E_ERR_PARAM;
4156 }
4157
4158 switch (settings->fcoe_cntx_num) {
4159 case I40E_DMA_CNTX_SIZE_512:
4160 case I40E_DMA_CNTX_SIZE_1K:
4161 case I40E_DMA_CNTX_SIZE_2K:
4162 case I40E_DMA_CNTX_SIZE_4K:
4163 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4164 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4165 break;
4166 default:
4167 return I40E_ERR_PARAM;
4168 }
4169
4170 /* Validate PE settings passed */
4171 switch (settings->pe_filt_num) {
4172 case I40E_HASH_FILTER_SIZE_1K:
4173 case I40E_HASH_FILTER_SIZE_2K:
4174 case I40E_HASH_FILTER_SIZE_4K:
4175 case I40E_HASH_FILTER_SIZE_8K:
4176 case I40E_HASH_FILTER_SIZE_16K:
4177 case I40E_HASH_FILTER_SIZE_32K:
4178 case I40E_HASH_FILTER_SIZE_64K:
4179 case I40E_HASH_FILTER_SIZE_128K:
4180 case I40E_HASH_FILTER_SIZE_256K:
4181 case I40E_HASH_FILTER_SIZE_512K:
4182 case I40E_HASH_FILTER_SIZE_1M:
4183 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4184 pe_filt_size <<= (u32)settings->pe_filt_num;
4185 break;
4186 default:
4187 return I40E_ERR_PARAM;
4188 }
4189
4190 switch (settings->pe_cntx_num) {
4191 case I40E_DMA_CNTX_SIZE_512:
4192 case I40E_DMA_CNTX_SIZE_1K:
4193 case I40E_DMA_CNTX_SIZE_2K:
4194 case I40E_DMA_CNTX_SIZE_4K:
4195 case I40E_DMA_CNTX_SIZE_8K:
4196 case I40E_DMA_CNTX_SIZE_16K:
4197 case I40E_DMA_CNTX_SIZE_32K:
4198 case I40E_DMA_CNTX_SIZE_64K:
4199 case I40E_DMA_CNTX_SIZE_128K:
4200 case I40E_DMA_CNTX_SIZE_256K:
4201 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4202 pe_cntx_size <<= (u32)settings->pe_cntx_num;
4203 break;
4204 default:
4205 return I40E_ERR_PARAM;
4206 }
4207
4208 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4209 val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4210 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4211 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4212 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
4213 return I40E_ERR_INVALID_SIZE;
4214
4215 return 0;
4216}
4217
4218/**
4219 * i40e_set_filter_control
4220 * @hw: pointer to the hardware structure
4221 * @settings: Filter control settings
4222 *
4223 * Set the Queue Filters for PE/FCoE and enable filters required
4224 * for a single PF. It is expected that these settings are programmed
4225 * at the driver initialization time.
4226 **/
4227i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4228 struct i40e_filter_control_settings *settings)
4229{
4230 i40e_status ret = 0;
4231 u32 hash_lut_size = 0;
4232 u32 val;
4233
4234 if (!settings)
4235 return I40E_ERR_PARAM;
4236
4237 /* Validate the input settings */
4238 ret = i40e_validate_filter_settings(hw, settings);
4239 if (ret)
4240 return ret;
4241
4242 /* Read the PF Queue Filter control register */
4243 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4244
4245 /* Program required PE hash buckets for the PF */
4246 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4247 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4248 I40E_PFQF_CTL_0_PEHSIZE_MASK;
4249 /* Program required PE contexts for the PF */
4250 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4251 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4252 I40E_PFQF_CTL_0_PEDSIZE_MASK;
4253
4254 /* Program required FCoE hash buckets for the PF */
4255 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4256 val |= ((u32)settings->fcoe_filt_num <<
4257 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4258 I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4259 /* Program required FCoE DDP contexts for the PF */
4260 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4261 val |= ((u32)settings->fcoe_cntx_num <<
4262 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4263 I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4264
4265 /* Program Hash LUT size for the PF */
4266 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4267 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4268 hash_lut_size = 1;
4269 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4270 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4271
4272 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4273 if (settings->enable_fdir)
4274 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4275 if (settings->enable_ethtype)
4276 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4277 if (settings->enable_macvlan)
4278 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4279
4280 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4281
4282 return 0;
4283}
4284
4285/**
4286 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4287 * @hw: pointer to the hw struct
4288 * @mac_addr: MAC address to use in the filter
4289 * @ethtype: Ethertype to use in the filter
4290 * @flags: Flags that needs to be applied to the filter
4291 * @vsi_seid: seid of the control VSI
4292 * @queue: VSI queue number to send the packet to
4293 * @is_add: Add control packet filter if True else remove
4294 * @stats: Structure to hold information on control filter counts
4295 * @cmd_details: pointer to command details structure or NULL
4296 *
4297 * This command will Add or Remove control packet filter for a control VSI.
4298 * In return it will update the total number of perfect filter count in
4299 * the stats member.
4300 **/
4301i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4302 u8 *mac_addr, u16 ethtype, u16 flags,
4303 u16 vsi_seid, u16 queue, bool is_add,
4304 struct i40e_control_filter_stats *stats,
4305 struct i40e_asq_cmd_details *cmd_details)
4306{
4307 struct i40e_aq_desc desc;
4308 struct i40e_aqc_add_remove_control_packet_filter *cmd =
4309 (struct i40e_aqc_add_remove_control_packet_filter *)
4310 &desc.params.raw;
4311 struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4312 (struct i40e_aqc_add_remove_control_packet_filter_completion *)
4313 &desc.params.raw;
4314 i40e_status status;
4315
4316 if (vsi_seid == 0)
4317 return I40E_ERR_PARAM;
4318
4319 if (is_add) {
4320 i40e_fill_default_direct_cmd_desc(&desc,
4321 i40e_aqc_opc_add_control_packet_filter);
4322 cmd->queue = cpu_to_le16(queue);
4323 } else {
4324 i40e_fill_default_direct_cmd_desc(&desc,
4325 i40e_aqc_opc_remove_control_packet_filter);
4326 }
4327
4328 if (mac_addr)
4329 ether_addr_copy(cmd->mac, mac_addr);
4330
4331 cmd->etype = cpu_to_le16(ethtype);
4332 cmd->flags = cpu_to_le16(flags);
4333 cmd->seid = cpu_to_le16(vsi_seid);
4334
4335 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4336
4337 if (!status && stats) {
4338 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4339 stats->etype_used = le16_to_cpu(resp->etype_used);
4340 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4341 stats->etype_free = le16_to_cpu(resp->etype_free);
4342 }
4343
4344 return status;
4345}
4346
4347/**
4348 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4349 * @hw: pointer to the hw struct
4350 * @seid: VSI seid to add ethertype filter from
4351 **/
4352void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4353 u16 seid)
4354{
4355#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4356 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4357 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4358 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4359 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4360 i40e_status status;
4361
4362 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4363 seid, 0, true, NULL,
4364 NULL);
4365 if (status)
4366 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4367}
4368
4369/**
4370 * i40e_aq_alternate_read
4371 * @hw: pointer to the hardware structure
4372 * @reg_addr0: address of first dword to be read
4373 * @reg_val0: pointer for data read from 'reg_addr0'
4374 * @reg_addr1: address of second dword to be read
4375 * @reg_val1: pointer for data read from 'reg_addr1'
4376 *
4377 * Read one or two dwords from alternate structure. Fields are indicated
4378 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4379 * is not passed then only register at 'reg_addr0' is read.
4380 *
4381 **/
4382static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4383 u32 reg_addr0, u32 *reg_val0,
4384 u32 reg_addr1, u32 *reg_val1)
4385{
4386 struct i40e_aq_desc desc;
4387 struct i40e_aqc_alternate_write *cmd_resp =
4388 (struct i40e_aqc_alternate_write *)&desc.params.raw;
4389 i40e_status status;
4390
4391 if (!reg_val0)
4392 return I40E_ERR_PARAM;
4393
4394 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4395 cmd_resp->address0 = cpu_to_le32(reg_addr0);
4396 cmd_resp->address1 = cpu_to_le32(reg_addr1);
4397
4398 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4399
4400 if (!status) {
4401 *reg_val0 = le32_to_cpu(cmd_resp->data0);
4402
4403 if (reg_val1)
4404 *reg_val1 = le32_to_cpu(cmd_resp->data1);
4405 }
4406
4407 return status;
4408}
4409
4410/**
4411 * i40e_aq_suspend_port_tx
4412 * @hw: pointer to the hardware structure
4413 * @seid: port seid
4414 * @cmd_details: pointer to command details structure or NULL
4415 *
4416 * Suspend port's Tx traffic
4417 **/
4418i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
4419 struct i40e_asq_cmd_details *cmd_details)
4420{
4421 struct i40e_aqc_tx_sched_ind *cmd;
4422 struct i40e_aq_desc desc;
4423 i40e_status status;
4424
4425 cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
4426 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
4427 cmd->vsi_seid = cpu_to_le16(seid);
4428 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4429
4430 return status;
4431}
4432
4433/**
4434 * i40e_aq_resume_port_tx
4435 * @hw: pointer to the hardware structure
4436 * @cmd_details: pointer to command details structure or NULL
4437 *
4438 * Resume port's Tx traffic
4439 **/
4440i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4441 struct i40e_asq_cmd_details *cmd_details)
4442{
4443 struct i40e_aq_desc desc;
4444 i40e_status status;
4445
4446 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4447
4448 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4449
4450 return status;
4451}
4452
4453/**
4454 * i40e_set_pci_config_data - store PCI bus info
4455 * @hw: pointer to hardware structure
4456 * @link_status: the link status word from PCI config space
4457 *
4458 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4459 **/
4460void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4461{
4462 hw->bus.type = i40e_bus_type_pci_express;
4463
4464 switch (link_status & PCI_EXP_LNKSTA_NLW) {
4465 case PCI_EXP_LNKSTA_NLW_X1:
4466 hw->bus.width = i40e_bus_width_pcie_x1;
4467 break;
4468 case PCI_EXP_LNKSTA_NLW_X2:
4469 hw->bus.width = i40e_bus_width_pcie_x2;
4470 break;
4471 case PCI_EXP_LNKSTA_NLW_X4:
4472 hw->bus.width = i40e_bus_width_pcie_x4;
4473 break;
4474 case PCI_EXP_LNKSTA_NLW_X8:
4475 hw->bus.width = i40e_bus_width_pcie_x8;
4476 break;
4477 default:
4478 hw->bus.width = i40e_bus_width_unknown;
4479 break;
4480 }
4481
4482 switch (link_status & PCI_EXP_LNKSTA_CLS) {
4483 case PCI_EXP_LNKSTA_CLS_2_5GB:
4484 hw->bus.speed = i40e_bus_speed_2500;
4485 break;
4486 case PCI_EXP_LNKSTA_CLS_5_0GB:
4487 hw->bus.speed = i40e_bus_speed_5000;
4488 break;
4489 case PCI_EXP_LNKSTA_CLS_8_0GB:
4490 hw->bus.speed = i40e_bus_speed_8000;
4491 break;
4492 default:
4493 hw->bus.speed = i40e_bus_speed_unknown;
4494 break;
4495 }
4496}
4497
4498/**
4499 * i40e_aq_debug_dump
4500 * @hw: pointer to the hardware structure
4501 * @cluster_id: specific cluster to dump
4502 * @table_id: table id within cluster
4503 * @start_index: index of line in the block to read
4504 * @buff_size: dump buffer size
4505 * @buff: dump buffer
4506 * @ret_buff_size: actual buffer size returned
4507 * @ret_next_table: next block to read
4508 * @ret_next_index: next index to read
4509 * @cmd_details: pointer to command details structure or NULL
4510 *
4511 * Dump internal FW/HW data for debug purposes.
4512 *
4513 **/
4514i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4515 u8 table_id, u32 start_index, u16 buff_size,
4516 void *buff, u16 *ret_buff_size,
4517 u8 *ret_next_table, u32 *ret_next_index,
4518 struct i40e_asq_cmd_details *cmd_details)
4519{
4520 struct i40e_aq_desc desc;
4521 struct i40e_aqc_debug_dump_internals *cmd =
4522 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4523 struct i40e_aqc_debug_dump_internals *resp =
4524 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4525 i40e_status status;
4526
4527 if (buff_size == 0 || !buff)
4528 return I40E_ERR_PARAM;
4529
4530 i40e_fill_default_direct_cmd_desc(&desc,
4531 i40e_aqc_opc_debug_dump_internals);
4532 /* Indirect Command */
4533 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4534 if (buff_size > I40E_AQ_LARGE_BUF)
4535 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4536
4537 cmd->cluster_id = cluster_id;
4538 cmd->table_id = table_id;
4539 cmd->idx = cpu_to_le32(start_index);
4540
4541 desc.datalen = cpu_to_le16(buff_size);
4542
4543 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4544 if (!status) {
4545 if (ret_buff_size)
4546 *ret_buff_size = le16_to_cpu(desc.datalen);
4547 if (ret_next_table)
4548 *ret_next_table = resp->table_id;
4549 if (ret_next_index)
4550 *ret_next_index = le32_to_cpu(resp->idx);
4551 }
4552
4553 return status;
4554}
4555
4556/**
4557 * i40e_read_bw_from_alt_ram
4558 * @hw: pointer to the hardware structure
4559 * @max_bw: pointer for max_bw read
4560 * @min_bw: pointer for min_bw read
4561 * @min_valid: pointer for bool that is true if min_bw is a valid value
4562 * @max_valid: pointer for bool that is true if max_bw is a valid value
4563 *
4564 * Read bw from the alternate ram for the given pf
4565 **/
4566i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4567 u32 *max_bw, u32 *min_bw,
4568 bool *min_valid, bool *max_valid)
4569{
4570 i40e_status status;
4571 u32 max_bw_addr, min_bw_addr;
4572
4573 /* Calculate the address of the min/max bw registers */
4574 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4575 I40E_ALT_STRUCT_MAX_BW_OFFSET +
4576 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4577 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4578 I40E_ALT_STRUCT_MIN_BW_OFFSET +
4579 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4580
4581 /* Read the bandwidths from alt ram */
4582 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4583 min_bw_addr, min_bw);
4584
4585 if (*min_bw & I40E_ALT_BW_VALID_MASK)
4586 *min_valid = true;
4587 else
4588 *min_valid = false;
4589
4590 if (*max_bw & I40E_ALT_BW_VALID_MASK)
4591 *max_valid = true;
4592 else
4593 *max_valid = false;
4594
4595 return status;
4596}
4597
4598/**
4599 * i40e_aq_configure_partition_bw
4600 * @hw: pointer to the hardware structure
4601 * @bw_data: Buffer holding valid pfs and bw limits
4602 * @cmd_details: pointer to command details
4603 *
4604 * Configure partitions guaranteed/max bw
4605 **/
4606i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4607 struct i40e_aqc_configure_partition_bw_data *bw_data,
4608 struct i40e_asq_cmd_details *cmd_details)
4609{
4610 i40e_status status;
4611 struct i40e_aq_desc desc;
4612 u16 bwd_size = sizeof(*bw_data);
4613
4614 i40e_fill_default_direct_cmd_desc(&desc,
4615 i40e_aqc_opc_configure_partition_bw);
4616
4617 /* Indirect command */
4618 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4619 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4620
4621 if (bwd_size > I40E_AQ_LARGE_BUF)
4622 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4623
4624 desc.datalen = cpu_to_le16(bwd_size);
4625
4626 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4627 cmd_details);
4628
4629 return status;
4630}
4631
4632/**
4633 * i40e_read_phy_register_clause22
4634 * @hw: pointer to the HW structure
4635 * @reg: register address in the page
4636 * @phy_addr: PHY address on MDIO interface
4637 * @value: PHY register value
4638 *
4639 * Reads specified PHY register value
4640 **/
4641i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4642 u16 reg, u8 phy_addr, u16 *value)
4643{
4644 i40e_status status = I40E_ERR_TIMEOUT;
4645 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4646 u32 command = 0;
4647 u16 retry = 1000;
4648
4649 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4650 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4651 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4652 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4653 (I40E_GLGEN_MSCA_MDICMD_MASK);
4654 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4655 do {
4656 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4657 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4658 status = 0;
4659 break;
4660 }
4661 udelay(10);
4662 retry--;
4663 } while (retry);
4664
4665 if (status) {
4666 i40e_debug(hw, I40E_DEBUG_PHY,
4667 "PHY: Can't write command to external PHY.\n");
4668 } else {
4669 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4670 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4671 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4672 }
4673
4674 return status;
4675}
4676
4677/**
4678 * i40e_write_phy_register_clause22
4679 * @hw: pointer to the HW structure
4680 * @reg: register address in the page
4681 * @phy_addr: PHY address on MDIO interface
4682 * @value: PHY register value
4683 *
4684 * Writes specified PHY register value
4685 **/
4686i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4687 u16 reg, u8 phy_addr, u16 value)
4688{
4689 i40e_status status = I40E_ERR_TIMEOUT;
4690 u8 port_num = (u8)hw->func_caps.mdio_port_num;
4691 u32 command = 0;
4692 u16 retry = 1000;
4693
4694 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4695 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4696
4697 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4698 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4699 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4700 (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4701 (I40E_GLGEN_MSCA_MDICMD_MASK);
4702
4703 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4704 do {
4705 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4706 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4707 status = 0;
4708 break;
4709 }
4710 udelay(10);
4711 retry--;
4712 } while (retry);
4713
4714 return status;
4715}
4716
4717/**
4718 * i40e_read_phy_register_clause45
4719 * @hw: pointer to the HW structure
4720 * @page: registers page number
4721 * @reg: register address in the page
4722 * @phy_addr: PHY address on MDIO interface
4723 * @value: PHY register value
4724 *
4725 * Reads specified PHY register value
4726 **/
4727i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4728 u8 page, u16 reg, u8 phy_addr, u16 *value)
4729{
4730 i40e_status status = I40E_ERR_TIMEOUT;
4731 u32 command = 0;
4732 u16 retry = 1000;
4733 u8 port_num = hw->func_caps.mdio_port_num;
4734
4735 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4736 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4737 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4738 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4739 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4740 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4741 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4742 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4743 do {
4744 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4745 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4746 status = 0;
4747 break;
4748 }
4749 usleep_range(10, 20);
4750 retry--;
4751 } while (retry);
4752
4753 if (status) {
4754 i40e_debug(hw, I40E_DEBUG_PHY,
4755 "PHY: Can't write command to external PHY.\n");
4756 goto phy_read_end;
4757 }
4758
4759 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4760 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4761 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4762 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4763 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4764 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4765 status = I40E_ERR_TIMEOUT;
4766 retry = 1000;
4767 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4768 do {
4769 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4770 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4771 status = 0;
4772 break;
4773 }
4774 usleep_range(10, 20);
4775 retry--;
4776 } while (retry);
4777
4778 if (!status) {
4779 command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4780 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4781 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4782 } else {
4783 i40e_debug(hw, I40E_DEBUG_PHY,
4784 "PHY: Can't read register value from external PHY.\n");
4785 }
4786
4787phy_read_end:
4788 return status;
4789}
4790
4791/**
4792 * i40e_write_phy_register_clause45
4793 * @hw: pointer to the HW structure
4794 * @page: registers page number
4795 * @reg: register address in the page
4796 * @phy_addr: PHY address on MDIO interface
4797 * @value: PHY register value
4798 *
4799 * Writes value to specified PHY register
4800 **/
4801i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4802 u8 page, u16 reg, u8 phy_addr, u16 value)
4803{
4804 i40e_status status = I40E_ERR_TIMEOUT;
4805 u32 command = 0;
4806 u16 retry = 1000;
4807 u8 port_num = hw->func_caps.mdio_port_num;
4808
4809 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4810 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4811 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4812 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4813 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4814 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4815 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4816 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4817 do {
4818 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4819 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4820 status = 0;
4821 break;
4822 }
4823 usleep_range(10, 20);
4824 retry--;
4825 } while (retry);
4826 if (status) {
4827 i40e_debug(hw, I40E_DEBUG_PHY,
4828 "PHY: Can't write command to external PHY.\n");
4829 goto phy_write_end;
4830 }
4831
4832 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4833 wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4834
4835 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4836 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4837 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4838 (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4839 (I40E_GLGEN_MSCA_MDICMD_MASK) |
4840 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4841 status = I40E_ERR_TIMEOUT;
4842 retry = 1000;
4843 wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4844 do {
4845 command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4846 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4847 status = 0;
4848 break;
4849 }
4850 usleep_range(10, 20);
4851 retry--;
4852 } while (retry);
4853
4854phy_write_end:
4855 return status;
4856}
4857
4858/**
4859 * i40e_write_phy_register
4860 * @hw: pointer to the HW structure
4861 * @page: registers page number
4862 * @reg: register address in the page
4863 * @phy_addr: PHY address on MDIO interface
4864 * @value: PHY register value
4865 *
4866 * Writes value to specified PHY register
4867 **/
4868i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4869 u8 page, u16 reg, u8 phy_addr, u16 value)
4870{
4871 i40e_status status;
4872
4873 switch (hw->device_id) {
4874 case I40E_DEV_ID_1G_BASE_T_X722:
4875 status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4876 value);
4877 break;
4878 case I40E_DEV_ID_5G_BASE_T_BC:
4879 case I40E_DEV_ID_10G_BASE_T:
4880 case I40E_DEV_ID_10G_BASE_T4:
4881 case I40E_DEV_ID_10G_BASE_T_BC:
4882 case I40E_DEV_ID_10G_BASE_T_X722:
4883 case I40E_DEV_ID_25G_B:
4884 case I40E_DEV_ID_25G_SFP28:
4885 status = i40e_write_phy_register_clause45(hw, page, reg,
4886 phy_addr, value);
4887 break;
4888 default:
4889 status = I40E_ERR_UNKNOWN_PHY;
4890 break;
4891 }
4892
4893 return status;
4894}
4895
4896/**
4897 * i40e_read_phy_register
4898 * @hw: pointer to the HW structure
4899 * @page: registers page number
4900 * @reg: register address in the page
4901 * @phy_addr: PHY address on MDIO interface
4902 * @value: PHY register value
4903 *
4904 * Reads specified PHY register value
4905 **/
4906i40e_status i40e_read_phy_register(struct i40e_hw *hw,
4907 u8 page, u16 reg, u8 phy_addr, u16 *value)
4908{
4909 i40e_status status;
4910
4911 switch (hw->device_id) {
4912 case I40E_DEV_ID_1G_BASE_T_X722:
4913 status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4914 value);
4915 break;
4916 case I40E_DEV_ID_5G_BASE_T_BC:
4917 case I40E_DEV_ID_10G_BASE_T:
4918 case I40E_DEV_ID_10G_BASE_T4:
4919 case I40E_DEV_ID_10G_BASE_T_BC:
4920 case I40E_DEV_ID_10G_BASE_T_X722:
4921 case I40E_DEV_ID_25G_B:
4922 case I40E_DEV_ID_25G_SFP28:
4923 status = i40e_read_phy_register_clause45(hw, page, reg,
4924 phy_addr, value);
4925 break;
4926 default:
4927 status = I40E_ERR_UNKNOWN_PHY;
4928 break;
4929 }
4930
4931 return status;
4932}
4933
4934/**
4935 * i40e_get_phy_address
4936 * @hw: pointer to the HW structure
4937 * @dev_num: PHY port num that address we want
4938 *
4939 * Gets PHY address for current port
4940 **/
4941u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4942{
4943 u8 port_num = hw->func_caps.mdio_port_num;
4944 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4945
4946 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4947}
4948
4949/**
4950 * i40e_blink_phy_link_led
4951 * @hw: pointer to the HW structure
4952 * @time: time how long led will blinks in secs
4953 * @interval: gap between LED on and off in msecs
4954 *
4955 * Blinks PHY link LED
4956 **/
4957i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
4958 u32 time, u32 interval)
4959{
4960 i40e_status status = 0;
4961 u32 i;
4962 u16 led_ctl;
4963 u16 gpio_led_port;
4964 u16 led_reg;
4965 u16 led_addr = I40E_PHY_LED_PROV_REG_1;
4966 u8 phy_addr = 0;
4967 u8 port_num;
4968
4969 i = rd32(hw, I40E_PFGEN_PORTNUM);
4970 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
4971 phy_addr = i40e_get_phy_address(hw, port_num);
4972
4973 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
4974 led_addr++) {
4975 status = i40e_read_phy_register_clause45(hw,
4976 I40E_PHY_COM_REG_PAGE,
4977 led_addr, phy_addr,
4978 &led_reg);
4979 if (status)
4980 goto phy_blinking_end;
4981 led_ctl = led_reg;
4982 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
4983 led_reg = 0;
4984 status = i40e_write_phy_register_clause45(hw,
4985 I40E_PHY_COM_REG_PAGE,
4986 led_addr, phy_addr,
4987 led_reg);
4988 if (status)
4989 goto phy_blinking_end;
4990 break;
4991 }
4992 }
4993
4994 if (time > 0 && interval > 0) {
4995 for (i = 0; i < time * 1000; i += interval) {
4996 status = i40e_read_phy_register_clause45(hw,
4997 I40E_PHY_COM_REG_PAGE,
4998 led_addr, phy_addr, &led_reg);
4999 if (status)
5000 goto restore_config;
5001 if (led_reg & I40E_PHY_LED_MANUAL_ON)
5002 led_reg = 0;
5003 else
5004 led_reg = I40E_PHY_LED_MANUAL_ON;
5005 status = i40e_write_phy_register_clause45(hw,
5006 I40E_PHY_COM_REG_PAGE,
5007 led_addr, phy_addr, led_reg);
5008 if (status)
5009 goto restore_config;
5010 msleep(interval);
5011 }
5012 }
5013
5014restore_config:
5015 status = i40e_write_phy_register_clause45(hw,
5016 I40E_PHY_COM_REG_PAGE,
5017 led_addr, phy_addr, led_ctl);
5018
5019phy_blinking_end:
5020 return status;
5021}
5022
5023/**
5024 * i40e_led_get_reg - read LED register
5025 * @hw: pointer to the HW structure
5026 * @led_addr: LED register address
5027 * @reg_val: read register value
5028 **/
5029static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5030 u32 *reg_val)
5031{
5032 enum i40e_status_code status;
5033 u8 phy_addr = 0;
5034 u8 port_num;
5035 u32 i;
5036
5037 *reg_val = 0;
5038 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5039 status =
5040 i40e_aq_get_phy_register(hw,
5041 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5042 I40E_PHY_COM_REG_PAGE, true,
5043 I40E_PHY_LED_PROV_REG_1,
5044 reg_val, NULL);
5045 } else {
5046 i = rd32(hw, I40E_PFGEN_PORTNUM);
5047 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5048 phy_addr = i40e_get_phy_address(hw, port_num);
5049 status = i40e_read_phy_register_clause45(hw,
5050 I40E_PHY_COM_REG_PAGE,
5051 led_addr, phy_addr,
5052 (u16 *)reg_val);
5053 }
5054 return status;
5055}
5056
5057/**
5058 * i40e_led_set_reg - write LED register
5059 * @hw: pointer to the HW structure
5060 * @led_addr: LED register address
5061 * @reg_val: register value to write
5062 **/
5063static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5064 u32 reg_val)
5065{
5066 enum i40e_status_code status;
5067 u8 phy_addr = 0;
5068 u8 port_num;
5069 u32 i;
5070
5071 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5072 status =
5073 i40e_aq_set_phy_register(hw,
5074 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5075 I40E_PHY_COM_REG_PAGE, true,
5076 I40E_PHY_LED_PROV_REG_1,
5077 reg_val, NULL);
5078 } else {
5079 i = rd32(hw, I40E_PFGEN_PORTNUM);
5080 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5081 phy_addr = i40e_get_phy_address(hw, port_num);
5082 status = i40e_write_phy_register_clause45(hw,
5083 I40E_PHY_COM_REG_PAGE,
5084 led_addr, phy_addr,
5085 (u16)reg_val);
5086 }
5087
5088 return status;
5089}
5090
5091/**
5092 * i40e_led_get_phy - return current on/off mode
5093 * @hw: pointer to the hw struct
5094 * @led_addr: address of led register to use
5095 * @val: original value of register to use
5096 *
5097 **/
5098i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5099 u16 *val)
5100{
5101 i40e_status status = 0;
5102 u16 gpio_led_port;
5103 u8 phy_addr = 0;
5104 u16 reg_val;
5105 u16 temp_addr;
5106 u8 port_num;
5107 u32 i;
5108 u32 reg_val_aq;
5109
5110 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5111 status =
5112 i40e_aq_get_phy_register(hw,
5113 I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5114 I40E_PHY_COM_REG_PAGE, true,
5115 I40E_PHY_LED_PROV_REG_1,
5116 ®_val_aq, NULL);
5117 if (status == I40E_SUCCESS)
5118 *val = (u16)reg_val_aq;
5119 return status;
5120 }
5121 temp_addr = I40E_PHY_LED_PROV_REG_1;
5122 i = rd32(hw, I40E_PFGEN_PORTNUM);
5123 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5124 phy_addr = i40e_get_phy_address(hw, port_num);
5125
5126 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5127 temp_addr++) {
5128 status = i40e_read_phy_register_clause45(hw,
5129 I40E_PHY_COM_REG_PAGE,
5130 temp_addr, phy_addr,
5131 ®_val);
5132 if (status)
5133 return status;
5134 *val = reg_val;
5135 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5136 *led_addr = temp_addr;
5137 break;
5138 }
5139 }
5140 return status;
5141}
5142
5143/**
5144 * i40e_led_set_phy
5145 * @hw: pointer to the HW structure
5146 * @on: true or false
5147 * @led_addr: address of led register to use
5148 * @mode: original val plus bit for set or ignore
5149 *
5150 * Set led's on or off when controlled by the PHY
5151 *
5152 **/
5153i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5154 u16 led_addr, u32 mode)
5155{
5156 i40e_status status = 0;
5157 u32 led_ctl = 0;
5158 u32 led_reg = 0;
5159
5160 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5161 if (status)
5162 return status;
5163 led_ctl = led_reg;
5164 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5165 led_reg = 0;
5166 status = i40e_led_set_reg(hw, led_addr, led_reg);
5167 if (status)
5168 return status;
5169 }
5170 status = i40e_led_get_reg(hw, led_addr, &led_reg);
5171 if (status)
5172 goto restore_config;
5173 if (on)
5174 led_reg = I40E_PHY_LED_MANUAL_ON;
5175 else
5176 led_reg = 0;
5177
5178 status = i40e_led_set_reg(hw, led_addr, led_reg);
5179 if (status)
5180 goto restore_config;
5181 if (mode & I40E_PHY_LED_MODE_ORIG) {
5182 led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5183 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5184 }
5185 return status;
5186
5187restore_config:
5188 status = i40e_led_set_reg(hw, led_addr, led_ctl);
5189 return status;
5190}
5191
5192/**
5193 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5194 * @hw: pointer to the hw struct
5195 * @reg_addr: register address
5196 * @reg_val: ptr to register value
5197 * @cmd_details: pointer to command details structure or NULL
5198 *
5199 * Use the firmware to read the Rx control register,
5200 * especially useful if the Rx unit is under heavy pressure
5201 **/
5202i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5203 u32 reg_addr, u32 *reg_val,
5204 struct i40e_asq_cmd_details *cmd_details)
5205{
5206 struct i40e_aq_desc desc;
5207 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5208 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5209 i40e_status status;
5210
5211 if (!reg_val)
5212 return I40E_ERR_PARAM;
5213
5214 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5215
5216 cmd_resp->address = cpu_to_le32(reg_addr);
5217
5218 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5219
5220 if (status == 0)
5221 *reg_val = le32_to_cpu(cmd_resp->value);
5222
5223 return status;
5224}
5225
5226/**
5227 * i40e_read_rx_ctl - read from an Rx control register
5228 * @hw: pointer to the hw struct
5229 * @reg_addr: register address
5230 **/
5231u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5232{
5233 i40e_status status = 0;
5234 bool use_register;
5235 int retry = 5;
5236 u32 val = 0;
5237
5238 use_register = (((hw->aq.api_maj_ver == 1) &&
5239 (hw->aq.api_min_ver < 5)) ||
5240 (hw->mac.type == I40E_MAC_X722));
5241 if (!use_register) {
5242do_retry:
5243 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5244 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5245 usleep_range(1000, 2000);
5246 retry--;
5247 goto do_retry;
5248 }
5249 }
5250
5251 /* if the AQ access failed, try the old-fashioned way */
5252 if (status || use_register)
5253 val = rd32(hw, reg_addr);
5254
5255 return val;
5256}
5257
5258/**
5259 * i40e_aq_rx_ctl_write_register
5260 * @hw: pointer to the hw struct
5261 * @reg_addr: register address
5262 * @reg_val: register value
5263 * @cmd_details: pointer to command details structure or NULL
5264 *
5265 * Use the firmware to write to an Rx control register,
5266 * especially useful if the Rx unit is under heavy pressure
5267 **/
5268i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5269 u32 reg_addr, u32 reg_val,
5270 struct i40e_asq_cmd_details *cmd_details)
5271{
5272 struct i40e_aq_desc desc;
5273 struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5274 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5275 i40e_status status;
5276
5277 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5278
5279 cmd->address = cpu_to_le32(reg_addr);
5280 cmd->value = cpu_to_le32(reg_val);
5281
5282 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5283
5284 return status;
5285}
5286
5287/**
5288 * i40e_write_rx_ctl - write to an Rx control register
5289 * @hw: pointer to the hw struct
5290 * @reg_addr: register address
5291 * @reg_val: register value
5292 **/
5293void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5294{
5295 i40e_status status = 0;
5296 bool use_register;
5297 int retry = 5;
5298
5299 use_register = (((hw->aq.api_maj_ver == 1) &&
5300 (hw->aq.api_min_ver < 5)) ||
5301 (hw->mac.type == I40E_MAC_X722));
5302 if (!use_register) {
5303do_retry:
5304 status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5305 reg_val, NULL);
5306 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5307 usleep_range(1000, 2000);
5308 retry--;
5309 goto do_retry;
5310 }
5311 }
5312
5313 /* if the AQ access failed, try the old-fashioned way */
5314 if (status || use_register)
5315 wr32(hw, reg_addr, reg_val);
5316}
5317
5318/**
5319 * i40e_mdio_if_number_selection - MDIO I/F number selection
5320 * @hw: pointer to the hw struct
5321 * @set_mdio: use MDIO I/F number specified by mdio_num
5322 * @mdio_num: MDIO I/F number
5323 * @cmd: pointer to PHY Register command structure
5324 **/
5325static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5326 u8 mdio_num,
5327 struct i40e_aqc_phy_register_access *cmd)
5328{
5329 if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5330 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5331 cmd->cmd_flags |=
5332 I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5333 ((mdio_num <<
5334 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5335 I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5336 else
5337 i40e_debug(hw, I40E_DEBUG_PHY,
5338 "MDIO I/F number selection not supported by current FW version.\n");
5339 }
5340}
5341
5342/**
5343 * i40e_aq_set_phy_register_ext
5344 * @hw: pointer to the hw struct
5345 * @phy_select: select which phy should be accessed
5346 * @dev_addr: PHY device address
5347 * @page_change: flag to indicate if phy page should be updated
5348 * @set_mdio: use MDIO I/F number specified by mdio_num
5349 * @mdio_num: MDIO I/F number
5350 * @reg_addr: PHY register address
5351 * @reg_val: new register value
5352 * @cmd_details: pointer to command details structure or NULL
5353 *
5354 * Write the external PHY register.
5355 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5356 * may use simple wrapper i40e_aq_set_phy_register.
5357 **/
5358enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5359 u8 phy_select, u8 dev_addr, bool page_change,
5360 bool set_mdio, u8 mdio_num,
5361 u32 reg_addr, u32 reg_val,
5362 struct i40e_asq_cmd_details *cmd_details)
5363{
5364 struct i40e_aq_desc desc;
5365 struct i40e_aqc_phy_register_access *cmd =
5366 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5367 i40e_status status;
5368
5369 i40e_fill_default_direct_cmd_desc(&desc,
5370 i40e_aqc_opc_set_phy_register);
5371
5372 cmd->phy_interface = phy_select;
5373 cmd->dev_address = dev_addr;
5374 cmd->reg_address = cpu_to_le32(reg_addr);
5375 cmd->reg_value = cpu_to_le32(reg_val);
5376
5377 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5378
5379 if (!page_change)
5380 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5381
5382 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5383
5384 return status;
5385}
5386
5387/**
5388 * i40e_aq_get_phy_register_ext
5389 * @hw: pointer to the hw struct
5390 * @phy_select: select which phy should be accessed
5391 * @dev_addr: PHY device address
5392 * @page_change: flag to indicate if phy page should be updated
5393 * @set_mdio: use MDIO I/F number specified by mdio_num
5394 * @mdio_num: MDIO I/F number
5395 * @reg_addr: PHY register address
5396 * @reg_val: read register value
5397 * @cmd_details: pointer to command details structure or NULL
5398 *
5399 * Read the external PHY register.
5400 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5401 * may use simple wrapper i40e_aq_get_phy_register.
5402 **/
5403enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5404 u8 phy_select, u8 dev_addr, bool page_change,
5405 bool set_mdio, u8 mdio_num,
5406 u32 reg_addr, u32 *reg_val,
5407 struct i40e_asq_cmd_details *cmd_details)
5408{
5409 struct i40e_aq_desc desc;
5410 struct i40e_aqc_phy_register_access *cmd =
5411 (struct i40e_aqc_phy_register_access *)&desc.params.raw;
5412 i40e_status status;
5413
5414 i40e_fill_default_direct_cmd_desc(&desc,
5415 i40e_aqc_opc_get_phy_register);
5416
5417 cmd->phy_interface = phy_select;
5418 cmd->dev_address = dev_addr;
5419 cmd->reg_address = cpu_to_le32(reg_addr);
5420
5421 i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5422
5423 if (!page_change)
5424 cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5425
5426 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5427 if (!status)
5428 *reg_val = le32_to_cpu(cmd->reg_value);
5429
5430 return status;
5431}
5432
5433/**
5434 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5435 * @hw: pointer to the hw struct
5436 * @buff: command buffer (size in bytes = buff_size)
5437 * @buff_size: buffer size in bytes
5438 * @track_id: package tracking id
5439 * @error_offset: returns error offset
5440 * @error_info: returns error information
5441 * @cmd_details: pointer to command details structure or NULL
5442 **/
5443enum
5444i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5445 u16 buff_size, u32 track_id,
5446 u32 *error_offset, u32 *error_info,
5447 struct i40e_asq_cmd_details *cmd_details)
5448{
5449 struct i40e_aq_desc desc;
5450 struct i40e_aqc_write_personalization_profile *cmd =
5451 (struct i40e_aqc_write_personalization_profile *)
5452 &desc.params.raw;
5453 struct i40e_aqc_write_ddp_resp *resp;
5454 i40e_status status;
5455
5456 i40e_fill_default_direct_cmd_desc(&desc,
5457 i40e_aqc_opc_write_personalization_profile);
5458
5459 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5460 if (buff_size > I40E_AQ_LARGE_BUF)
5461 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5462
5463 desc.datalen = cpu_to_le16(buff_size);
5464
5465 cmd->profile_track_id = cpu_to_le32(track_id);
5466
5467 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5468 if (!status) {
5469 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5470 if (error_offset)
5471 *error_offset = le32_to_cpu(resp->error_offset);
5472 if (error_info)
5473 *error_info = le32_to_cpu(resp->error_info);
5474 }
5475
5476 return status;
5477}
5478
5479/**
5480 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5481 * @hw: pointer to the hw struct
5482 * @buff: command buffer (size in bytes = buff_size)
5483 * @buff_size: buffer size in bytes
5484 * @flags: AdminQ command flags
5485 * @cmd_details: pointer to command details structure or NULL
5486 **/
5487enum
5488i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5489 u16 buff_size, u8 flags,
5490 struct i40e_asq_cmd_details *cmd_details)
5491{
5492 struct i40e_aq_desc desc;
5493 struct i40e_aqc_get_applied_profiles *cmd =
5494 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5495 i40e_status status;
5496
5497 i40e_fill_default_direct_cmd_desc(&desc,
5498 i40e_aqc_opc_get_personalization_profile_list);
5499
5500 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5501 if (buff_size > I40E_AQ_LARGE_BUF)
5502 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5503 desc.datalen = cpu_to_le16(buff_size);
5504
5505 cmd->flags = flags;
5506
5507 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5508
5509 return status;
5510}
5511
5512/**
5513 * i40e_find_segment_in_package
5514 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5515 * @pkg_hdr: pointer to the package header to be searched
5516 *
5517 * This function searches a package file for a particular segment type. On
5518 * success it returns a pointer to the segment header, otherwise it will
5519 * return NULL.
5520 **/
5521struct i40e_generic_seg_header *
5522i40e_find_segment_in_package(u32 segment_type,
5523 struct i40e_package_header *pkg_hdr)
5524{
5525 struct i40e_generic_seg_header *segment;
5526 u32 i;
5527
5528 /* Search all package segments for the requested segment type */
5529 for (i = 0; i < pkg_hdr->segment_count; i++) {
5530 segment =
5531 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5532 pkg_hdr->segment_offset[i]);
5533
5534 if (segment->type == segment_type)
5535 return segment;
5536 }
5537
5538 return NULL;
5539}
5540
5541/* Get section table in profile */
5542#define I40E_SECTION_TABLE(profile, sec_tbl) \
5543 do { \
5544 struct i40e_profile_segment *p = (profile); \
5545 u32 count; \
5546 u32 *nvm; \
5547 count = p->device_table_count; \
5548 nvm = (u32 *)&p->device_table[count]; \
5549 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5550 } while (0)
5551
5552/* Get section header in profile */
5553#define I40E_SECTION_HEADER(profile, offset) \
5554 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5555
5556/**
5557 * i40e_find_section_in_profile
5558 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5559 * @profile: pointer to the i40e segment header to be searched
5560 *
5561 * This function searches i40e segment for a particular section type. On
5562 * success it returns a pointer to the section header, otherwise it will
5563 * return NULL.
5564 **/
5565struct i40e_profile_section_header *
5566i40e_find_section_in_profile(u32 section_type,
5567 struct i40e_profile_segment *profile)
5568{
5569 struct i40e_profile_section_header *sec;
5570 struct i40e_section_table *sec_tbl;
5571 u32 sec_off;
5572 u32 i;
5573
5574 if (profile->header.type != SEGMENT_TYPE_I40E)
5575 return NULL;
5576
5577 I40E_SECTION_TABLE(profile, sec_tbl);
5578
5579 for (i = 0; i < sec_tbl->section_count; i++) {
5580 sec_off = sec_tbl->section_offset[i];
5581 sec = I40E_SECTION_HEADER(profile, sec_off);
5582 if (sec->section.type == section_type)
5583 return sec;
5584 }
5585
5586 return NULL;
5587}
5588
5589/**
5590 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5591 * @hw: pointer to the hw struct
5592 * @aq: command buffer containing all data to execute AQ
5593 **/
5594static enum
5595i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5596 struct i40e_profile_aq_section *aq)
5597{
5598 i40e_status status;
5599 struct i40e_aq_desc desc;
5600 u8 *msg = NULL;
5601 u16 msglen;
5602
5603 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5604 desc.flags |= cpu_to_le16(aq->flags);
5605 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5606
5607 msglen = aq->datalen;
5608 if (msglen) {
5609 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5610 I40E_AQ_FLAG_RD));
5611 if (msglen > I40E_AQ_LARGE_BUF)
5612 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5613 desc.datalen = cpu_to_le16(msglen);
5614 msg = &aq->data[0];
5615 }
5616
5617 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5618
5619 if (status) {
5620 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5621 "unable to exec DDP AQ opcode %u, error %d\n",
5622 aq->opcode, status);
5623 return status;
5624 }
5625
5626 /* copy returned desc to aq_buf */
5627 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5628
5629 return 0;
5630}
5631
5632/**
5633 * i40e_validate_profile
5634 * @hw: pointer to the hardware structure
5635 * @profile: pointer to the profile segment of the package to be validated
5636 * @track_id: package tracking id
5637 * @rollback: flag if the profile is for rollback.
5638 *
5639 * Validates supported devices and profile's sections.
5640 */
5641static enum i40e_status_code
5642i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5643 u32 track_id, bool rollback)
5644{
5645 struct i40e_profile_section_header *sec = NULL;
5646 i40e_status status = 0;
5647 struct i40e_section_table *sec_tbl;
5648 u32 vendor_dev_id;
5649 u32 dev_cnt;
5650 u32 sec_off;
5651 u32 i;
5652
5653 if (track_id == I40E_DDP_TRACKID_INVALID) {
5654 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5655 return I40E_NOT_SUPPORTED;
5656 }
5657
5658 dev_cnt = profile->device_table_count;
5659 for (i = 0; i < dev_cnt; i++) {
5660 vendor_dev_id = profile->device_table[i].vendor_dev_id;
5661 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5662 hw->device_id == (vendor_dev_id & 0xFFFF))
5663 break;
5664 }
5665 if (dev_cnt && i == dev_cnt) {
5666 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5667 "Device doesn't support DDP\n");
5668 return I40E_ERR_DEVICE_NOT_SUPPORTED;
5669 }
5670
5671 I40E_SECTION_TABLE(profile, sec_tbl);
5672
5673 /* Validate sections types */
5674 for (i = 0; i < sec_tbl->section_count; i++) {
5675 sec_off = sec_tbl->section_offset[i];
5676 sec = I40E_SECTION_HEADER(profile, sec_off);
5677 if (rollback) {
5678 if (sec->section.type == SECTION_TYPE_MMIO ||
5679 sec->section.type == SECTION_TYPE_AQ ||
5680 sec->section.type == SECTION_TYPE_RB_AQ) {
5681 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5682 "Not a roll-back package\n");
5683 return I40E_NOT_SUPPORTED;
5684 }
5685 } else {
5686 if (sec->section.type == SECTION_TYPE_RB_AQ ||
5687 sec->section.type == SECTION_TYPE_RB_MMIO) {
5688 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5689 "Not an original package\n");
5690 return I40E_NOT_SUPPORTED;
5691 }
5692 }
5693 }
5694
5695 return status;
5696}
5697
5698/**
5699 * i40e_write_profile
5700 * @hw: pointer to the hardware structure
5701 * @profile: pointer to the profile segment of the package to be downloaded
5702 * @track_id: package tracking id
5703 *
5704 * Handles the download of a complete package.
5705 */
5706enum i40e_status_code
5707i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5708 u32 track_id)
5709{
5710 i40e_status status = 0;
5711 struct i40e_section_table *sec_tbl;
5712 struct i40e_profile_section_header *sec = NULL;
5713 struct i40e_profile_aq_section *ddp_aq;
5714 u32 section_size = 0;
5715 u32 offset = 0, info = 0;
5716 u32 sec_off;
5717 u32 i;
5718
5719 status = i40e_validate_profile(hw, profile, track_id, false);
5720 if (status)
5721 return status;
5722
5723 I40E_SECTION_TABLE(profile, sec_tbl);
5724
5725 for (i = 0; i < sec_tbl->section_count; i++) {
5726 sec_off = sec_tbl->section_offset[i];
5727 sec = I40E_SECTION_HEADER(profile, sec_off);
5728 /* Process generic admin command */
5729 if (sec->section.type == SECTION_TYPE_AQ) {
5730 ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5731 status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5732 if (status) {
5733 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5734 "Failed to execute aq: section %d, opcode %u\n",
5735 i, ddp_aq->opcode);
5736 break;
5737 }
5738 sec->section.type = SECTION_TYPE_RB_AQ;
5739 }
5740
5741 /* Skip any non-mmio sections */
5742 if (sec->section.type != SECTION_TYPE_MMIO)
5743 continue;
5744
5745 section_size = sec->section.size +
5746 sizeof(struct i40e_profile_section_header);
5747
5748 /* Write MMIO section */
5749 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5750 track_id, &offset, &info, NULL);
5751 if (status) {
5752 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5753 "Failed to write profile: section %d, offset %d, info %d\n",
5754 i, offset, info);
5755 break;
5756 }
5757 }
5758 return status;
5759}
5760
5761/**
5762 * i40e_rollback_profile
5763 * @hw: pointer to the hardware structure
5764 * @profile: pointer to the profile segment of the package to be removed
5765 * @track_id: package tracking id
5766 *
5767 * Rolls back previously loaded package.
5768 */
5769enum i40e_status_code
5770i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5771 u32 track_id)
5772{
5773 struct i40e_profile_section_header *sec = NULL;
5774 i40e_status status = 0;
5775 struct i40e_section_table *sec_tbl;
5776 u32 offset = 0, info = 0;
5777 u32 section_size = 0;
5778 u32 sec_off;
5779 int i;
5780
5781 status = i40e_validate_profile(hw, profile, track_id, true);
5782 if (status)
5783 return status;
5784
5785 I40E_SECTION_TABLE(profile, sec_tbl);
5786
5787 /* For rollback write sections in reverse */
5788 for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5789 sec_off = sec_tbl->section_offset[i];
5790 sec = I40E_SECTION_HEADER(profile, sec_off);
5791
5792 /* Skip any non-rollback sections */
5793 if (sec->section.type != SECTION_TYPE_RB_MMIO)
5794 continue;
5795
5796 section_size = sec->section.size +
5797 sizeof(struct i40e_profile_section_header);
5798
5799 /* Write roll-back MMIO section */
5800 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5801 track_id, &offset, &info, NULL);
5802 if (status) {
5803 i40e_debug(hw, I40E_DEBUG_PACKAGE,
5804 "Failed to write profile: section %d, offset %d, info %d\n",
5805 i, offset, info);
5806 break;
5807 }
5808 }
5809 return status;
5810}
5811
5812/**
5813 * i40e_add_pinfo_to_list
5814 * @hw: pointer to the hardware structure
5815 * @profile: pointer to the profile segment of the package
5816 * @profile_info_sec: buffer for information section
5817 * @track_id: package tracking id
5818 *
5819 * Register a profile to the list of loaded profiles.
5820 */
5821enum i40e_status_code
5822i40e_add_pinfo_to_list(struct i40e_hw *hw,
5823 struct i40e_profile_segment *profile,
5824 u8 *profile_info_sec, u32 track_id)
5825{
5826 i40e_status status = 0;
5827 struct i40e_profile_section_header *sec = NULL;
5828 struct i40e_profile_info *pinfo;
5829 u32 offset = 0, info = 0;
5830
5831 sec = (struct i40e_profile_section_header *)profile_info_sec;
5832 sec->tbl_size = 1;
5833 sec->data_end = sizeof(struct i40e_profile_section_header) +
5834 sizeof(struct i40e_profile_info);
5835 sec->section.type = SECTION_TYPE_INFO;
5836 sec->section.offset = sizeof(struct i40e_profile_section_header);
5837 sec->section.size = sizeof(struct i40e_profile_info);
5838 pinfo = (struct i40e_profile_info *)(profile_info_sec +
5839 sec->section.offset);
5840 pinfo->track_id = track_id;
5841 pinfo->version = profile->version;
5842 pinfo->op = I40E_DDP_ADD_TRACKID;
5843 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5844
5845 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5846 track_id, &offset, &info, NULL);
5847
5848 return status;
5849}
5850
5851/**
5852 * i40e_aq_add_cloud_filters
5853 * @hw: pointer to the hardware structure
5854 * @seid: VSI seid to add cloud filters from
5855 * @filters: Buffer which contains the filters to be added
5856 * @filter_count: number of filters contained in the buffer
5857 *
5858 * Set the cloud filters for a given VSI. The contents of the
5859 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5860 * of the function.
5861 *
5862 **/
5863enum i40e_status_code
5864i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5865 struct i40e_aqc_cloud_filters_element_data *filters,
5866 u8 filter_count)
5867{
5868 struct i40e_aq_desc desc;
5869 struct i40e_aqc_add_remove_cloud_filters *cmd =
5870 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5871 enum i40e_status_code status;
5872 u16 buff_len;
5873
5874 i40e_fill_default_direct_cmd_desc(&desc,
5875 i40e_aqc_opc_add_cloud_filters);
5876
5877 buff_len = filter_count * sizeof(*filters);
5878 desc.datalen = cpu_to_le16(buff_len);
5879 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5880 cmd->num_filters = filter_count;
5881 cmd->seid = cpu_to_le16(seid);
5882
5883 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5884
5885 return status;
5886}
5887
5888/**
5889 * i40e_aq_add_cloud_filters_bb
5890 * @hw: pointer to the hardware structure
5891 * @seid: VSI seid to add cloud filters from
5892 * @filters: Buffer which contains the filters in big buffer to be added
5893 * @filter_count: number of filters contained in the buffer
5894 *
5895 * Set the big buffer cloud filters for a given VSI. The contents of the
5896 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5897 * function.
5898 *
5899 **/
5900enum i40e_status_code
5901i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5902 struct i40e_aqc_cloud_filters_element_bb *filters,
5903 u8 filter_count)
5904{
5905 struct i40e_aq_desc desc;
5906 struct i40e_aqc_add_remove_cloud_filters *cmd =
5907 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5908 i40e_status status;
5909 u16 buff_len;
5910 int i;
5911
5912 i40e_fill_default_direct_cmd_desc(&desc,
5913 i40e_aqc_opc_add_cloud_filters);
5914
5915 buff_len = filter_count * sizeof(*filters);
5916 desc.datalen = cpu_to_le16(buff_len);
5917 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5918 cmd->num_filters = filter_count;
5919 cmd->seid = cpu_to_le16(seid);
5920 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5921
5922 for (i = 0; i < filter_count; i++) {
5923 u16 tnl_type;
5924 u32 ti;
5925
5926 tnl_type = (le16_to_cpu(filters[i].element.flags) &
5927 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5928 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5929
5930 /* Due to hardware eccentricities, the VNI for Geneve is shifted
5931 * one more byte further than normally used for Tenant ID in
5932 * other tunnel types.
5933 */
5934 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5935 ti = le32_to_cpu(filters[i].element.tenant_id);
5936 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5937 }
5938 }
5939
5940 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5941
5942 return status;
5943}
5944
5945/**
5946 * i40e_aq_rem_cloud_filters
5947 * @hw: pointer to the hardware structure
5948 * @seid: VSI seid to remove cloud filters from
5949 * @filters: Buffer which contains the filters to be removed
5950 * @filter_count: number of filters contained in the buffer
5951 *
5952 * Remove the cloud filters for a given VSI. The contents of the
5953 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5954 * of the function.
5955 *
5956 **/
5957enum i40e_status_code
5958i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
5959 struct i40e_aqc_cloud_filters_element_data *filters,
5960 u8 filter_count)
5961{
5962 struct i40e_aq_desc desc;
5963 struct i40e_aqc_add_remove_cloud_filters *cmd =
5964 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5965 enum i40e_status_code status;
5966 u16 buff_len;
5967
5968 i40e_fill_default_direct_cmd_desc(&desc,
5969 i40e_aqc_opc_remove_cloud_filters);
5970
5971 buff_len = filter_count * sizeof(*filters);
5972 desc.datalen = cpu_to_le16(buff_len);
5973 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5974 cmd->num_filters = filter_count;
5975 cmd->seid = cpu_to_le16(seid);
5976
5977 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5978
5979 return status;
5980}
5981
5982/**
5983 * i40e_aq_rem_cloud_filters_bb
5984 * @hw: pointer to the hardware structure
5985 * @seid: VSI seid to remove cloud filters from
5986 * @filters: Buffer which contains the filters in big buffer to be removed
5987 * @filter_count: number of filters contained in the buffer
5988 *
5989 * Remove the big buffer cloud filters for a given VSI. The contents of the
5990 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5991 * function.
5992 *
5993 **/
5994enum i40e_status_code
5995i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5996 struct i40e_aqc_cloud_filters_element_bb *filters,
5997 u8 filter_count)
5998{
5999 struct i40e_aq_desc desc;
6000 struct i40e_aqc_add_remove_cloud_filters *cmd =
6001 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6002 i40e_status status;
6003 u16 buff_len;
6004 int i;
6005
6006 i40e_fill_default_direct_cmd_desc(&desc,
6007 i40e_aqc_opc_remove_cloud_filters);
6008
6009 buff_len = filter_count * sizeof(*filters);
6010 desc.datalen = cpu_to_le16(buff_len);
6011 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6012 cmd->num_filters = filter_count;
6013 cmd->seid = cpu_to_le16(seid);
6014 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6015
6016 for (i = 0; i < filter_count; i++) {
6017 u16 tnl_type;
6018 u32 ti;
6019
6020 tnl_type = (le16_to_cpu(filters[i].element.flags) &
6021 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6022 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6023
6024 /* Due to hardware eccentricities, the VNI for Geneve is shifted
6025 * one more byte further than normally used for Tenant ID in
6026 * other tunnel types.
6027 */
6028 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6029 ti = le32_to_cpu(filters[i].element.tenant_id);
6030 filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6031 }
6032 }
6033
6034 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6035
6036 return status;
6037}