Loading...
1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2/* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7#ifndef _QED_MCP_H
8#define _QED_MCP_H
9
10#include <linux/types.h>
11#include <linux/delay.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/qed/qed_fcoe_if.h>
15#include "qed_hsi.h"
16#include "qed_dev_api.h"
17
18#define QED_MFW_REPORT_STR_SIZE 256
19
20struct qed_mcp_link_speed_params {
21 bool autoneg;
22
23 u32 advertised_speeds;
24#define QED_EXT_SPEED_MASK_RES 0x1
25#define QED_EXT_SPEED_MASK_1G 0x2
26#define QED_EXT_SPEED_MASK_10G 0x4
27#define QED_EXT_SPEED_MASK_20G 0x8
28#define QED_EXT_SPEED_MASK_25G 0x10
29#define QED_EXT_SPEED_MASK_40G 0x20
30#define QED_EXT_SPEED_MASK_50G_R 0x40
31#define QED_EXT_SPEED_MASK_50G_R2 0x80
32#define QED_EXT_SPEED_MASK_100G_R2 0x100
33#define QED_EXT_SPEED_MASK_100G_R4 0x200
34#define QED_EXT_SPEED_MASK_100G_P4 0x400
35
36 u32 forced_speed; /* In Mb/s */
37#define QED_EXT_SPEED_1G 0x1
38#define QED_EXT_SPEED_10G 0x2
39#define QED_EXT_SPEED_20G 0x4
40#define QED_EXT_SPEED_25G 0x8
41#define QED_EXT_SPEED_40G 0x10
42#define QED_EXT_SPEED_50G_R 0x20
43#define QED_EXT_SPEED_50G_R2 0x40
44#define QED_EXT_SPEED_100G_R2 0x80
45#define QED_EXT_SPEED_100G_R4 0x100
46#define QED_EXT_SPEED_100G_P4 0x200
47};
48
49struct qed_mcp_link_pause_params {
50 bool autoneg;
51 bool forced_rx;
52 bool forced_tx;
53};
54
55enum qed_mcp_eee_mode {
56 QED_MCP_EEE_DISABLED,
57 QED_MCP_EEE_ENABLED,
58 QED_MCP_EEE_UNSUPPORTED
59};
60
61struct qed_mcp_link_params {
62 struct qed_mcp_link_speed_params speed;
63 struct qed_mcp_link_pause_params pause;
64 u32 loopback_mode;
65 struct qed_link_eee_params eee;
66 u32 fec;
67
68 struct qed_mcp_link_speed_params ext_speed;
69 u32 ext_fec_mode;
70};
71
72struct qed_mcp_link_capabilities {
73 u32 speed_capabilities;
74 bool default_speed_autoneg;
75 u32 fec_default;
76 enum qed_mcp_eee_mode default_eee;
77 u32 eee_lpi_timer;
78 u8 eee_speed_caps;
79
80 u32 default_ext_speed_caps;
81 u32 default_ext_autoneg;
82 u32 default_ext_speed;
83 u32 default_ext_fec;
84};
85
86struct qed_mcp_link_state {
87 bool link_up;
88 u32 min_pf_rate;
89
90 /* Actual link speed in Mb/s */
91 u32 line_speed;
92
93 /* PF max speed in Mb/s, deduced from line_speed
94 * according to PF max bandwidth configuration.
95 */
96 u32 speed;
97
98 bool full_duplex;
99 bool an;
100 bool an_complete;
101 bool parallel_detection;
102 bool pfc_enabled;
103
104 u32 partner_adv_speed;
105#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
106#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
107#define QED_LINK_PARTNER_SPEED_10G BIT(2)
108#define QED_LINK_PARTNER_SPEED_20G BIT(3)
109#define QED_LINK_PARTNER_SPEED_25G BIT(4)
110#define QED_LINK_PARTNER_SPEED_40G BIT(5)
111#define QED_LINK_PARTNER_SPEED_50G BIT(6)
112#define QED_LINK_PARTNER_SPEED_100G BIT(7)
113
114 bool partner_tx_flow_ctrl_en;
115 bool partner_rx_flow_ctrl_en;
116
117 u8 partner_adv_pause;
118#define QED_LINK_PARTNER_SYMMETRIC_PAUSE 0x1
119#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE 0x2
120#define QED_LINK_PARTNER_BOTH_PAUSE 0x3
121
122 bool sfp_tx_fault;
123 bool eee_active;
124 u8 eee_adv_caps;
125 u8 eee_lp_adv_caps;
126
127 u32 fec_active;
128};
129
130struct qed_mcp_function_info {
131 u8 pause_on_host;
132
133 enum qed_pci_personality protocol;
134
135 u8 bandwidth_min;
136 u8 bandwidth_max;
137
138 u8 mac[ETH_ALEN];
139
140 u64 wwn_port;
141 u64 wwn_node;
142
143#define QED_MCP_VLAN_UNSET (0xffff)
144 u16 ovlan;
145
146 u16 mtu;
147};
148
149struct qed_mcp_nvm_common {
150 u32 offset;
151 u32 param;
152 u32 resp;
153 u32 cmd;
154};
155
156struct qed_mcp_drv_version {
157 u32 version;
158 u8 name[MCP_DRV_VER_STR_SIZE - 4];
159};
160
161struct qed_mcp_lan_stats {
162 u64 ucast_rx_pkts;
163 u64 ucast_tx_pkts;
164 u32 fcs_err;
165};
166
167struct qed_mcp_fcoe_stats {
168 u64 rx_pkts;
169 u64 tx_pkts;
170 u32 fcs_err;
171 u32 login_failure;
172};
173
174struct qed_mcp_iscsi_stats {
175 u64 rx_pdus;
176 u64 tx_pdus;
177 u64 rx_bytes;
178 u64 tx_bytes;
179};
180
181struct qed_mcp_rdma_stats {
182 u64 rx_pkts;
183 u64 tx_pkts;
184 u64 rx_bytes;
185 u64 tx_byts;
186};
187
188enum qed_mcp_protocol_type {
189 QED_MCP_LAN_STATS,
190 QED_MCP_FCOE_STATS,
191 QED_MCP_ISCSI_STATS,
192 QED_MCP_RDMA_STATS
193};
194
195union qed_mcp_protocol_stats {
196 struct qed_mcp_lan_stats lan_stats;
197 struct qed_mcp_fcoe_stats fcoe_stats;
198 struct qed_mcp_iscsi_stats iscsi_stats;
199 struct qed_mcp_rdma_stats rdma_stats;
200};
201
202enum qed_ov_eswitch {
203 QED_OV_ESWITCH_NONE,
204 QED_OV_ESWITCH_VEB,
205 QED_OV_ESWITCH_VEPA
206};
207
208enum qed_ov_client {
209 QED_OV_CLIENT_DRV,
210 QED_OV_CLIENT_USER,
211 QED_OV_CLIENT_VENDOR_SPEC
212};
213
214enum qed_ov_driver_state {
215 QED_OV_DRIVER_STATE_NOT_LOADED,
216 QED_OV_DRIVER_STATE_DISABLED,
217 QED_OV_DRIVER_STATE_ACTIVE
218};
219
220enum qed_ov_wol {
221 QED_OV_WOL_DEFAULT,
222 QED_OV_WOL_DISABLED,
223 QED_OV_WOL_ENABLED
224};
225
226enum qed_mfw_tlv_type {
227 QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
228 QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
229 QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
230 QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
231 QED_MFW_TLV_MAX = 0x16,
232};
233
234struct qed_mfw_tlv_generic {
235#define QED_MFW_TLV_FLAGS_SIZE 2
236 struct {
237 u8 ipv4_csum_offload;
238 u8 lso_supported;
239 bool b_set;
240 } flags;
241
242#define QED_MFW_TLV_MAC_COUNT 3
243 /* First entry for primary MAC, 2 secondary MACs possible */
244 u8 mac[QED_MFW_TLV_MAC_COUNT][6];
245 bool mac_set[QED_MFW_TLV_MAC_COUNT];
246
247 u64 rx_frames;
248 bool rx_frames_set;
249 u64 rx_bytes;
250 bool rx_bytes_set;
251 u64 tx_frames;
252 bool tx_frames_set;
253 u64 tx_bytes;
254 bool tx_bytes_set;
255};
256
257union qed_mfw_tlv_data {
258 struct qed_mfw_tlv_generic generic;
259 struct qed_mfw_tlv_eth eth;
260 struct qed_mfw_tlv_fcoe fcoe;
261 struct qed_mfw_tlv_iscsi iscsi;
262};
263
264#define QED_NVM_CFG_OPTION_ALL BIT(0)
265#define QED_NVM_CFG_OPTION_INIT BIT(1)
266#define QED_NVM_CFG_OPTION_COMMIT BIT(2)
267#define QED_NVM_CFG_OPTION_FREE BIT(3)
268#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
269
270/**
271 * qed_mcp_get_link_params(): Returns the link params of the hw function.
272 *
273 * @p_hwfn: HW device data.
274 *
275 * Returns: Pointer to link params.
276 */
277struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn);
278
279/**
280 * qed_mcp_get_link_state(): Return the link state of the hw function.
281 *
282 * @p_hwfn: HW device data.
283 *
284 * Returns: Pointer to link state.
285 */
286struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn);
287
288/**
289 * qed_mcp_get_link_capabilities(): Return the link capabilities of the
290 * hw function.
291 *
292 * @p_hwfn: HW device data.
293 *
294 * Returns: Pointer to link capabilities.
295 */
296struct qed_mcp_link_capabilities
297 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
298
299/**
300 * qed_mcp_set_link(): Request the MFW to set the link according
301 * to 'link_input'.
302 *
303 * @p_hwfn: HW device data.
304 * @p_ptt: P_ptt.
305 * @b_up: Raise link if `true'. Reset link if `false'.
306 *
307 * Return: Int.
308 */
309int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
310 struct qed_ptt *p_ptt,
311 bool b_up);
312
313/**
314 * qed_mcp_get_mfw_ver(): Get the management firmware version value.
315 *
316 * @p_hwfn: HW device data.
317 * @p_ptt: P_ptt.
318 * @p_mfw_ver: MFW version value.
319 * @p_running_bundle_id: Image id in nvram; Optional.
320 *
321 * Return: Int - 0 - operation was successful.
322 */
323int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
324 struct qed_ptt *p_ptt,
325 u32 *p_mfw_ver, u32 *p_running_bundle_id);
326
327/**
328 * qed_mcp_get_mbi_ver(): Get the MBI version value.
329 *
330 * @p_hwfn: HW device data.
331 * @p_ptt: P_ptt.
332 * @p_mbi_ver: A pointer to a variable to be filled with the MBI version.
333 *
334 * Return: Int - 0 - operation was successful.
335 */
336int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
337 struct qed_ptt *p_ptt, u32 *p_mbi_ver);
338
339/**
340 * qed_mcp_get_media_type(): Get media type value of the port.
341 *
342 * @p_hwfn: HW device data.
343 * @p_ptt: P_ptt.
344 * @media_type: Media type value
345 *
346 * Return: Int - 0 - Operation was successul.
347 * -EBUSY - Operation failed
348 */
349int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
350 struct qed_ptt *p_ptt, u32 *media_type);
351
352/**
353 * qed_mcp_get_transceiver_data(): Get transceiver data of the port.
354 *
355 * @p_hwfn: HW device data.
356 * @p_ptt: P_ptt.
357 * @p_transceiver_state: Transceiver state.
358 * @p_tranceiver_type: Media type value.
359 *
360 * Return: Int - 0 - Operation was successul.
361 * -EBUSY - Operation failed
362 */
363int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
364 struct qed_ptt *p_ptt,
365 u32 *p_transceiver_state,
366 u32 *p_tranceiver_type);
367
368/**
369 * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask.
370 *
371 * @p_hwfn: HW device data.
372 * @p_ptt: P_ptt.
373 * @p_speed_mask: Bit mask of all supported speeds.
374 *
375 * Return: Int - 0 - Operation was successul.
376 * -EBUSY - Operation failed
377 */
378
379int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
380 struct qed_ptt *p_ptt, u32 *p_speed_mask);
381
382/**
383 * qed_mcp_get_board_config(): Get board configuration.
384 *
385 * @p_hwfn: HW device data.
386 * @p_ptt: P_ptt.
387 * @p_board_config: Board config.
388 *
389 * Return: Int - 0 - Operation was successul.
390 * -EBUSY - Operation failed
391 */
392int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
393 struct qed_ptt *p_ptt, u32 *p_board_config);
394
395/**
396 * qed_mcp_cmd(): Sleepable function for sending commands to the MCP
397 * mailbox. It acquire mutex lock for the entire
398 * operation, from sending the request until the MCP
399 * response. Waiting for MCP response will be checked up
400 * to 5 seconds every 10ms. Should not be called from atomic
401 * context.
402 *
403 * @p_hwfn: HW device data.
404 * @p_ptt: PTT required for register access.
405 * @cmd: command to be sent to the MCP.
406 * @param: Optional param
407 * @o_mcp_resp: The MCP response code (exclude sequence).
408 * @o_mcp_param: Optional parameter provided by the MCP
409 * response
410 *
411 * Return: Int - 0 - Operation was successul.
412 */
413int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
414 struct qed_ptt *p_ptt,
415 u32 cmd,
416 u32 param,
417 u32 *o_mcp_resp,
418 u32 *o_mcp_param);
419
420/**
421 * qed_mcp_cmd_nosleep(): Function for sending commands to the MCP
422 * mailbox. It acquire mutex lock for the entire
423 * operation, from sending the request until the MCP
424 * response. Waiting for MCP response will be checked up
425 * to 5 seconds every 10us. Should be called when sleep
426 * is not allowed.
427 *
428 * @p_hwfn: HW device data.
429 * @p_ptt: PTT required for register access.
430 * @cmd: command to be sent to the MCP.
431 * @param: Optional param
432 * @o_mcp_resp: The MCP response code (exclude sequence).
433 * @o_mcp_param: Optional parameter provided by the MCP
434 * response
435 *
436 * Return: Int - 0 - Operation was successul.
437 */
438int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
439 struct qed_ptt *p_ptt,
440 u32 cmd,
441 u32 param,
442 u32 *o_mcp_resp,
443 u32 *o_mcp_param);
444
445/**
446 * qed_mcp_drain(): drains the nig, allowing completion to pass in
447 * case of pauses.
448 * (Should be called only from sleepable context)
449 *
450 * @p_hwfn: HW device data.
451 * @p_ptt: PTT required for register access.
452 *
453 * Return: Int.
454 */
455int qed_mcp_drain(struct qed_hwfn *p_hwfn,
456 struct qed_ptt *p_ptt);
457
458/**
459 * qed_mcp_get_flash_size(): Get the flash size value.
460 *
461 * @p_hwfn: HW device data.
462 * @p_ptt: PTT required for register access.
463 * @p_flash_size: Flash size in bytes to be filled.
464 *
465 * Return: Int - 0 - Operation was successul.
466 */
467int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
468 struct qed_ptt *p_ptt,
469 u32 *p_flash_size);
470
471/**
472 * qed_mcp_send_drv_version(): Send driver version to MFW.
473 *
474 * @p_hwfn: HW device data.
475 * @p_ptt: PTT required for register access.
476 * @p_ver: Version value.
477 *
478 * Return: Int - 0 - Operation was successul.
479 */
480int
481qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
482 struct qed_ptt *p_ptt,
483 struct qed_mcp_drv_version *p_ver);
484
485/**
486 * qed_get_process_kill_counter(): Read the MFW process kill counter.
487 *
488 * @p_hwfn: HW device data.
489 * @p_ptt: PTT required for register access.
490 *
491 * Return: u32.
492 */
493u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
494 struct qed_ptt *p_ptt);
495
496/**
497 * qed_start_recovery_process(): Trigger a recovery process.
498 *
499 * @p_hwfn: HW device data.
500 * @p_ptt: PTT required for register access.
501 *
502 * Return: Int.
503 */
504int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
505
506/**
507 * qed_recovery_prolog(): A recovery handler must call this function
508 * as its first step.
509 * It is assumed that the handler is not run from
510 * an interrupt context.
511 *
512 * @cdev: Qed dev pointer.
513 *
514 * Return: int.
515 */
516int qed_recovery_prolog(struct qed_dev *cdev);
517
518/**
519 * qed_mcp_ov_update_current_config(): Notify MFW about the change in base
520 * device properties
521 *
522 * @p_hwfn: HW device data.
523 * @p_ptt: P_ptt.
524 * @client: Qed client type.
525 *
526 * Return: Int - 0 - Operation was successul.
527 */
528int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
529 struct qed_ptt *p_ptt,
530 enum qed_ov_client client);
531
532/**
533 * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state.
534 *
535 * @p_hwfn: HW device data.
536 * @p_ptt: P_ptt.
537 * @drv_state: Driver state.
538 *
539 * Return: Int - 0 - Operation was successul.
540 */
541int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
542 struct qed_ptt *p_ptt,
543 enum qed_ov_driver_state drv_state);
544
545/**
546 * qed_mcp_ov_update_mtu(): Send MTU size to MFW.
547 *
548 * @p_hwfn: HW device data.
549 * @p_ptt: P_ptt.
550 * @mtu: MTU size.
551 *
552 * Return: Int - 0 - Operation was successul.
553 */
554int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
555 struct qed_ptt *p_ptt, u16 mtu);
556
557/**
558 * qed_mcp_ov_update_mac(): Send MAC address to MFW.
559 *
560 * @p_hwfn: HW device data.
561 * @p_ptt: P_ptt.
562 * @mac: MAC address.
563 *
564 * Return: Int - 0 - Operation was successul.
565 */
566int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
567 struct qed_ptt *p_ptt, const u8 *mac);
568
569/**
570 * qed_mcp_ov_update_wol(): Send WOL mode to MFW.
571 *
572 * @p_hwfn: HW device data.
573 * @p_ptt: P_ptt.
574 * @wol: WOL mode.
575 *
576 * Return: Int - 0 - Operation was successul.
577 */
578int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
579 struct qed_ptt *p_ptt,
580 enum qed_ov_wol wol);
581
582/**
583 * qed_mcp_set_led(): Set LED status.
584 *
585 * @p_hwfn: HW device data.
586 * @p_ptt: P_ptt.
587 * @mode: LED mode.
588 *
589 * Return: Int - 0 - Operation was successul.
590 */
591int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
592 struct qed_ptt *p_ptt,
593 enum qed_led_mode mode);
594
595/**
596 * qed_mcp_nvm_read(): Read from NVM.
597 *
598 * @cdev: Qed dev pointer.
599 * @addr: NVM offset.
600 * @p_buf: NVM read buffer.
601 * @len: Buffer len.
602 *
603 * Return: Int - 0 - Operation was successul.
604 */
605int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
606
607/**
608 * qed_mcp_nvm_write(): Write to NVM.
609 *
610 * @cdev: Qed dev pointer.
611 * @addr: NVM offset.
612 * @cmd: NVM command.
613 * @p_buf: NVM write buffer.
614 * @len: Buffer len.
615 *
616 * Return: Int - 0 - Operation was successul.
617 */
618int qed_mcp_nvm_write(struct qed_dev *cdev,
619 u32 cmd, u32 addr, u8 *p_buf, u32 len);
620
621/**
622 * qed_mcp_nvm_resp(): Check latest response.
623 *
624 * @cdev: Qed dev pointer.
625 * @p_buf: NVM write buffer.
626 *
627 * Return: Int - 0 - Operation was successul.
628 */
629int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
630
631struct qed_nvm_image_att {
632 u32 start_addr;
633 u32 length;
634};
635
636/**
637 * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image.
638 *
639 * @p_hwfn: HW device data.
640 * @image_id: Image to get attributes for.
641 * @p_image_att: Image attributes structure into which to fill data.
642 *
643 * Return: Int - 0 - Operation was successul.
644 */
645int
646qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
647 enum qed_nvm_images image_id,
648 struct qed_nvm_image_att *p_image_att);
649
650/**
651 * qed_mcp_get_nvm_image(): Allows reading a whole nvram image.
652 *
653 * @p_hwfn: HW device data.
654 * @image_id: image requested for reading.
655 * @p_buffer: allocated buffer into which to fill data.
656 * @buffer_len: length of the allocated buffer.
657 *
658 * Return: 0 if p_buffer now contains the nvram image.
659 */
660int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
661 enum qed_nvm_images image_id,
662 u8 *p_buffer, u32 buffer_len);
663
664/**
665 * qed_mcp_bist_register_test(): Bist register test.
666 *
667 * @p_hwfn: HW device data.
668 * @p_ptt: PTT required for register access.
669 *
670 * Return: Int - 0 - Operation was successul.
671 */
672int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
673 struct qed_ptt *p_ptt);
674
675/**
676 * qed_mcp_bist_clock_test(): Bist clock test.
677 *
678 * @p_hwfn: HW device data.
679 * @p_ptt: PTT required for register access.
680 *
681 * Return: Int - 0 - Operation was successul.
682 */
683int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
684 struct qed_ptt *p_ptt);
685
686/**
687 * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images.
688 *
689 * @p_hwfn: HW device data.
690 * @p_ptt: PTT required for register access.
691 * @num_images: number of images if operation was
692 * successful. 0 if not.
693 *
694 * Return: Int - 0 - Operation was successul.
695 */
696int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
697 struct qed_ptt *p_ptt,
698 u32 *num_images);
699
700/**
701 * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes
702 * by index.
703 *
704 * @p_hwfn: HW device data.
705 * @p_ptt: PTT required for register access.
706 * @p_image_att: Attributes of image.
707 * @image_index: Index of image to get information for.
708 *
709 * Return: Int - 0 - Operation was successul.
710 */
711int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
712 struct qed_ptt *p_ptt,
713 struct bist_nvm_image_att *p_image_att,
714 u32 image_index);
715
716/**
717 * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e.,
718 * get the required TLV info
719 * from the qed client and send it to the MFW.
720 *
721 * @p_hwfn: HW device data.
722 * @p_ptt: P_ptt.
723 *
724 * Return: 0 upon success.
725 */
726int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
727
728/**
729 * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW
730 *
731 * @p_hwfn: HW device data.
732 * @p_ptt: P_ptt.
733 * @p_buf: raw debug data buffer.
734 * @size: Buffer size.
735 *
736 * Return : Int.
737 */
738int
739qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
740 struct qed_ptt *p_ptt, u8 *p_buf, u32 size);
741
742/* Using hwfn number (and not pf_num) is required since in CMT mode,
743 * same pf_num may be used by two different hwfn
744 * TODO - this shouldn't really be in .h file, but until all fields
745 * required during hw-init will be placed in their correct place in shmem
746 * we need it in qed_dev.c [for readin the nvram reflection in shmem].
747 */
748#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
749 ((rel_pfid) | \
750 ((p_hwfn)->abs_pf_id & 1) << 3) : \
751 rel_pfid)
752#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
753
754struct qed_mcp_info {
755 /* List for mailbox commands which were sent and wait for a response */
756 struct list_head cmd_list;
757
758 /* Spinlock used for protecting the access to the mailbox commands list
759 * and the sending of the commands.
760 */
761 spinlock_t cmd_lock;
762
763 /* Flag to indicate whether sending a MFW mailbox command is blocked */
764 bool b_block_cmd;
765
766 /* Spinlock used for syncing SW link-changes and link-changes
767 * originating from attention context.
768 */
769 spinlock_t link_lock;
770
771 u32 public_base;
772 u32 drv_mb_addr;
773 u32 mfw_mb_addr;
774 u32 port_addr;
775 u16 drv_mb_seq;
776 u16 drv_pulse_seq;
777 struct qed_mcp_link_params link_input;
778 struct qed_mcp_link_state link_output;
779 struct qed_mcp_link_capabilities link_capabilities;
780 struct qed_mcp_function_info func_info;
781 u8 *mfw_mb_cur;
782 u8 *mfw_mb_shadow;
783 u16 mfw_mb_length;
784 u32 mcp_hist;
785
786 /* Capabilties negotiated with the MFW */
787 u32 capabilities;
788
789 /* S/N for debug data mailbox commands */
790 atomic_t dbg_data_seq;
791
792 /* Spinlock used to sync the flag mcp_handling_status with
793 * the mfw events handler
794 */
795 spinlock_t unload_lock;
796 unsigned long mcp_handling_status;
797#define QED_MCP_BYPASS_PROC_BIT 0
798#define QED_MCP_IN_PROCESSING_BIT 1
799};
800
801struct qed_mcp_mb_params {
802 u32 cmd;
803 u32 param;
804 void *p_data_src;
805 void *p_data_dst;
806 u8 data_src_size;
807 u8 data_dst_size;
808 u32 mcp_resp;
809 u32 mcp_param;
810 u32 flags;
811#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
812#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
813#define QED_MB_FLAGS_IS_SET(params, flag) \
814 ({ typeof(params) __params = (params); \
815 (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
816};
817
818struct qed_drv_tlv_hdr {
819 u8 tlv_type;
820 u8 tlv_length; /* In dwords - not including this header */
821 u8 tlv_reserved;
822#define QED_DRV_TLV_FLAGS_CHANGED 0x01
823 u8 tlv_flags;
824};
825
826/**
827 * qed_mcp_is_ext_speed_supported() - Check if management firmware supports
828 * extended speeds.
829 * @p_hwfn: HW device data.
830 *
831 * Return: true if supported, false otherwise.
832 */
833static inline bool
834qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
835{
836 return !!(p_hwfn->mcp_info->capabilities &
837 FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL);
838}
839
840/**
841 * qed_mcp_cmd_init(): Initialize the interface with the MCP.
842 *
843 * @p_hwfn: HW device data.
844 * @p_ptt: PTT required for register access.
845 *
846 * Return: Int.
847 */
848int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
849 struct qed_ptt *p_ptt);
850
851/**
852 * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP
853 *
854 * @p_hwfn: HW device data.
855 * @p_ptt: P_ptt.
856 *
857 * Return: Void.
858 *
859 * Can only be called after `num_ports_in_engines' is set
860 */
861void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
862 struct qed_ptt *p_ptt);
863/**
864 * qed_mcp_free(): Releases resources allocated during the init process.
865 *
866 * @p_hwfn: HW function.
867 *
868 * Return: Int.
869 */
870
871int qed_mcp_free(struct qed_hwfn *p_hwfn);
872
873/**
874 * qed_mcp_handle_events(): This function is called from the DPC context.
875 * After pointing PTT to the mfw mb, check for events sent by
876 * the MCP to the driver and ack them. In case a critical event
877 * detected, it will be handled here, otherwise the work will be
878 * queued to a sleepable work-queue.
879 *
880 * @p_hwfn: HW function.
881 * @p_ptt: PTT required for register access.
882 *
883 * Return: Int - 0 - Operation was successul.
884 */
885int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
886 struct qed_ptt *p_ptt);
887
888enum qed_drv_role {
889 QED_DRV_ROLE_OS,
890 QED_DRV_ROLE_KDUMP,
891};
892
893struct qed_load_req_params {
894 /* Input params */
895 enum qed_drv_role drv_role;
896 u8 timeout_val;
897 bool avoid_eng_reset;
898 enum qed_override_force_load override_force_load;
899
900 /* Output params */
901 u32 load_code;
902};
903
904/**
905 * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the
906 * operation succeeds, returns whether this PF is
907 * the first on the engine/port or function.
908 *
909 * @p_hwfn: HW device data.
910 * @p_ptt: P_ptt.
911 * @p_params: Params.
912 *
913 * Return: Int - 0 - Operation was successul.
914 */
915int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
916 struct qed_ptt *p_ptt,
917 struct qed_load_req_params *p_params);
918
919/**
920 * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW.
921 *
922 * @p_hwfn: HW device data.
923 * @p_ptt: P_ptt.
924 *
925 * Return: Int - 0 - Operation was successul.
926 */
927int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
928
929/**
930 * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW.
931 *
932 * @p_hwfn: HW device data.
933 * @p_ptt: P_ptt.
934 *
935 * Return: Int - 0 - Operation was successul.
936 */
937int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
938
939/**
940 * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW
941 *
942 * @p_hwfn: HW device data.
943 * @p_ptt: P_ptt.
944 *
945 * Return: Int - 0 - Operation was successul.
946 */
947int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
948
949/**
950 * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer.
951 *
952 * @p_hwfn: HW device data.
953 * @p_ptt: P_ptt.
954 *
955 * Return: Void.
956 */
957void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
958 struct qed_ptt *p_ptt);
959
960/**
961 * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs
962 *
963 * @p_hwfn: HW device data.
964 * @p_ptt: P_ptt.
965 * @vfs_to_ack: bit mask of all engine VFs for which the PF acks.
966 *
967 * Return: Int - 0 - Operation was successul.
968 */
969int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
970 struct qed_ptt *p_ptt, u32 *vfs_to_ack);
971
972/**
973 * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of
974 * all function-related info.
975 *
976 * @p_hwfn: HW device data.
977 * @p_ptt: P_ptt.
978 *
979 * Return: 0 upon success.
980 */
981int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
982 struct qed_ptt *p_ptt);
983
984/**
985 * qed_mcp_reset(): Reset the MCP using mailbox command.
986 *
987 * @p_hwfn: HW device data.
988 * @p_ptt: P_ptt.
989 *
990 * Return: 0 upon success.
991 */
992int qed_mcp_reset(struct qed_hwfn *p_hwfn,
993 struct qed_ptt *p_ptt);
994
995/**
996 * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get
997 * a buffer.
998 *
999 * @p_hwfn: HW device data.
1000 * @p_ptt: P_ptt.
1001 * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or
1002 * DRV_MSG_CODE_NVM_READ_NVRAM commands.
1003 * @param: [0:23] - Offset [24:31] - Size.
1004 * @o_mcp_resp: MCP response.
1005 * @o_mcp_param: MCP response param.
1006 * @o_txn_size: Buffer size output.
1007 * @o_buf: Pointer to the buffer returned by the MFW.
1008 * @b_can_sleep: Can sleep.
1009 *
1010 * Return: 0 upon success.
1011 */
1012int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
1013 struct qed_ptt *p_ptt,
1014 u32 cmd,
1015 u32 param,
1016 u32 *o_mcp_resp,
1017 u32 *o_mcp_param,
1018 u32 *o_txn_size, u32 *o_buf, bool b_can_sleep);
1019
1020/**
1021 * qed_mcp_phy_sfp_read(): Read from sfp.
1022 *
1023 * @p_hwfn: HW device data.
1024 * @p_ptt: PTT required for register access.
1025 * @port: transceiver port.
1026 * @addr: I2C address.
1027 * @offset: offset in sfp.
1028 * @len: buffer length.
1029 * @p_buf: buffer to read into.
1030 *
1031 * Return: Int - 0 - Operation was successul.
1032 */
1033int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1034 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
1035
1036/**
1037 * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info]
1038 * are accessible
1039 *
1040 * @p_hwfn: HW device data.
1041 *
1042 * Return: true if MFW is running and mcp_info is initialized.
1043 */
1044bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
1045
1046/**
1047 * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF.
1048 *
1049 * @p_hwfn: HW device data.
1050 * @p_ptt: P_ptt.
1051 * @vf_id: absolute inside engine.
1052 * @num: number of entries to request.
1053 *
1054 * Return: Int.
1055 */
1056int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
1057 struct qed_ptt *p_ptt, u8 vf_id, u8 num);
1058
1059/**
1060 * qed_mcp_halt(): Halt the MCP.
1061 *
1062 * @p_hwfn: HW device data.
1063 * @p_ptt: P_ptt.
1064 *
1065 * Return: 0 upon success.
1066 */
1067int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1068
1069/**
1070 * qed_mcp_resume: Wake up the MCP.
1071 *
1072 * @p_hwfn: HW device data.
1073 * @p_ptt: P_ptt.
1074 *
1075 * Return: 0 upon success.
1076 */
1077int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1078
1079int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
1080int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
1081int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
1082 struct qed_ptt *p_ptt,
1083 struct qed_mcp_link_state *p_link,
1084 u8 max_bw);
1085int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
1086 struct qed_ptt *p_ptt,
1087 struct qed_mcp_link_state *p_link,
1088 u8 min_bw);
1089
1090int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
1091 struct qed_ptt *p_ptt, u32 mask_parities);
1092
1093/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW.
1094 *
1095 * @p_hwfn: HW device data.
1096 * @p_ptt: P_ptt.
1097 * @p_mdump_retain: mdump retain.
1098 *
1099 * Return: Int - 0 - Operation was successul.
1100 */
1101int
1102qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1103 struct qed_ptt *p_ptt,
1104 struct mdump_retain_data_stc *p_mdump_retain);
1105
1106/**
1107 * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource.
1108 *
1109 * @p_hwfn: HW device data.
1110 * @p_ptt: P_ptt.
1111 * @res_id: RES ID.
1112 * @resc_max_val: Resec max val.
1113 * @p_mcp_resp: MCP Resp
1114 *
1115 * Return: Int - 0 - Operation was successul.
1116 */
1117int
1118qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
1119 struct qed_ptt *p_ptt,
1120 enum qed_resources res_id,
1121 u32 resc_max_val, u32 *p_mcp_resp);
1122
1123/**
1124 * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given
1125 * resource.
1126 *
1127 * @p_hwfn: HW device data.
1128 * @p_ptt: P_ptt.
1129 * @res_id: Res ID.
1130 * @p_mcp_resp: MCP resp.
1131 * @p_resc_num: Resc num.
1132 * @p_resc_start: Resc start.
1133 *
1134 * Return: Int - 0 - Operation was successul.
1135 */
1136int
1137qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
1138 struct qed_ptt *p_ptt,
1139 enum qed_resources res_id,
1140 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
1141
1142/**
1143 * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW.
1144 *
1145 * @p_hwfn: HW device data.
1146 * @p_ptt: P_ptt.
1147 * @eswitch: eswitch mode.
1148 *
1149 * Return: Int - 0 - Operation was successul.
1150 */
1151int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
1152 struct qed_ptt *p_ptt,
1153 enum qed_ov_eswitch eswitch);
1154
1155#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
1156#define QED_MCP_RESC_LOCK_MAX_VAL 31
1157
1158enum qed_resc_lock {
1159 QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
1160 QED_RESC_LOCK_PTP_PORT0,
1161 QED_RESC_LOCK_PTP_PORT1,
1162 QED_RESC_LOCK_PTP_PORT2,
1163 QED_RESC_LOCK_PTP_PORT3,
1164 QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
1165 QED_RESC_LOCK_RESC_INVALID
1166};
1167
1168/**
1169 * qed_mcp_initiate_pf_flr(): Initiates PF FLR.
1170 *
1171 * @p_hwfn: HW device data.
1172 * @p_ptt: P_ptt.
1173 *
1174 * Return: Int - 0 - Operation was successul.
1175 */
1176int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1177struct qed_resc_lock_params {
1178 /* Resource number [valid values are 0..31] */
1179 u8 resource;
1180
1181 /* Lock timeout value in seconds [default, none or 1..254] */
1182 u8 timeout;
1183#define QED_MCP_RESC_LOCK_TO_DEFAULT 0
1184#define QED_MCP_RESC_LOCK_TO_NONE 255
1185
1186 /* Number of times to retry locking */
1187 u8 retry_num;
1188#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
1189
1190 /* The interval in usec between retries */
1191 u16 retry_interval;
1192#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
1193
1194 /* Use sleep or delay between retries */
1195 bool sleep_b4_retry;
1196
1197 /* Will be set as true if the resource is free and granted */
1198 bool b_granted;
1199
1200 /* Will be filled with the resource owner.
1201 * [0..15 = PF0-15, 16 = MFW]
1202 */
1203 u8 owner;
1204};
1205
1206/**
1207 * qed_mcp_resc_lock(): Acquires MFW generic resource lock.
1208 *
1209 * @p_hwfn: HW device data.
1210 * @p_ptt: P_ptt.
1211 * @p_params: Params.
1212 *
1213 * Return: Int - 0 - Operation was successul.
1214 */
1215int
1216qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
1217 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
1218
1219struct qed_resc_unlock_params {
1220 /* Resource number [valid values are 0..31] */
1221 u8 resource;
1222
1223 /* Allow to release a resource even if belongs to another PF */
1224 bool b_force;
1225
1226 /* Will be set as true if the resource is released */
1227 bool b_released;
1228};
1229
1230/**
1231 * qed_mcp_resc_unlock(): Releases MFW generic resource lock.
1232 *
1233 * @p_hwfn: HW device data.
1234 * @p_ptt: P_ptt.
1235 * @p_params: Params.
1236 *
1237 * Return: Int - 0 - Operation was successul.
1238 */
1239int
1240qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
1241 struct qed_ptt *p_ptt,
1242 struct qed_resc_unlock_params *p_params);
1243
1244/**
1245 * qed_mcp_resc_lock_default_init(): Default initialization for
1246 * lock/unlock resource structs.
1247 *
1248 * @p_lock: lock params struct to be initialized; Can be NULL.
1249 * @p_unlock: unlock params struct to be initialized; Can be NULL.
1250 * @resource: the requested resource.
1251 * @b_is_permanent: disable retries & aging when set.
1252 *
1253 * Return: Void.
1254 */
1255void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
1256 struct qed_resc_unlock_params *p_unlock,
1257 enum qed_resc_lock
1258 resource, bool b_is_permanent);
1259
1260/**
1261 * qed_mcp_is_smart_an_supported(): Return whether management firmware
1262 * support smart AN
1263 *
1264 * @p_hwfn: HW device data.
1265 *
1266 * Return: bool true if feature is supported.
1267 */
1268bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
1269
1270/**
1271 * qed_mcp_get_capabilities(): Learn of supported MFW features;
1272 * To be done during early init.
1273 *
1274 * @p_hwfn: HW device data.
1275 * @p_ptt: P_ptt.
1276 *
1277 * Return: Int.
1278 */
1279int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1280
1281/**
1282 * qed_mcp_set_capabilities(): Inform MFW of set of features supported
1283 * by driver. Should be done inside the content
1284 * of the LOAD_REQ.
1285 *
1286 * @p_hwfn: HW device data.
1287 * @p_ptt: P_ptt.
1288 *
1289 * Return: Int.
1290 */
1291int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1292
1293/**
1294 * qed_mcp_read_ufp_config(): Read ufp config from the shared memory.
1295 *
1296 * @p_hwfn: HW device data.
1297 * @p_ptt: P_ptt.
1298 *
1299 * Return: Void.
1300 */
1301void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1302
1303/**
1304 * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given
1305 * hardware function.
1306 *
1307 * @p_hwfn: HW device data.
1308 *
1309 * Return: Int.
1310 */
1311int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
1312
1313/**
1314 * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given
1315 * hardware function.
1316 *
1317 * @p_hwfn: HW device data.
1318 *
1319 * Return: Void.
1320 */
1321void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
1322
1323/**
1324 * qed_mcp_get_engine_config(): Get the engine affinity configuration.
1325 *
1326 * @p_hwfn: HW device data.
1327 * @p_ptt: P_ptt.
1328 *
1329 * Return: Int.
1330 */
1331int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1332
1333/**
1334 * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap.
1335 *
1336 * @p_hwfn: HW device data.
1337 * @p_ptt: P_ptt.
1338 *
1339 * Return: Int.
1340 */
1341int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1342
1343/**
1344 * qed_mcp_nvm_get_cfg(): Get NVM config attribute value.
1345 *
1346 * @p_hwfn: HW device data.
1347 * @p_ptt: P_ptt.
1348 * @option_id: Option ID.
1349 * @entity_id: Entity ID.
1350 * @flags: Flags.
1351 * @p_buf: Buf.
1352 * @p_len: Len.
1353 *
1354 * Return: Int.
1355 */
1356int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1357 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
1358 u32 *p_len);
1359
1360/**
1361 * qed_mcp_nvm_set_cfg(): Set NVM config attribute value.
1362 *
1363 * @p_hwfn: HW device data.
1364 * @p_ptt: P_ptt.
1365 * @option_id: Option ID.
1366 * @entity_id: Entity ID.
1367 * @flags: Flags.
1368 * @p_buf: Buf.
1369 * @len: Len.
1370 *
1371 * Return: Int.
1372 */
1373int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1374 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
1375 u32 len);
1376
1377/**
1378 * qed_mcp_is_esl_supported(): Return whether management firmware support ESL or not.
1379 *
1380 * @p_hwfn: hw function pointer
1381 *
1382 * Return: true if esl is supported, otherwise return false
1383 */
1384bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn);
1385
1386/**
1387 * qed_mcp_get_esl_status(): Get enhanced system lockdown status
1388 *
1389 * @p_hwfn: hw function pointer
1390 * @p_ptt: ptt resource pointer
1391 * @active: ESL active status data pointer
1392 *
1393 * Return: 0 with esl status info on success, otherwise return error
1394 */
1395int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active);
1396#endif
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _QED_MCP_H
34#define _QED_MCP_H
35
36#include <linux/types.h>
37#include <linux/delay.h>
38#include <linux/slab.h>
39#include <linux/spinlock.h>
40#include <linux/qed/qed_fcoe_if.h>
41#include "qed_hsi.h"
42#include "qed_dev_api.h"
43
44struct qed_mcp_link_speed_params {
45 bool autoneg;
46 u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
47 u32 forced_speed; /* In Mb/s */
48};
49
50struct qed_mcp_link_pause_params {
51 bool autoneg;
52 bool forced_rx;
53 bool forced_tx;
54};
55
56enum qed_mcp_eee_mode {
57 QED_MCP_EEE_DISABLED,
58 QED_MCP_EEE_ENABLED,
59 QED_MCP_EEE_UNSUPPORTED
60};
61
62struct qed_mcp_link_params {
63 struct qed_mcp_link_speed_params speed;
64 struct qed_mcp_link_pause_params pause;
65 u32 loopback_mode;
66 struct qed_link_eee_params eee;
67};
68
69struct qed_mcp_link_capabilities {
70 u32 speed_capabilities;
71 bool default_speed_autoneg;
72 enum qed_mcp_eee_mode default_eee;
73 u32 eee_lpi_timer;
74 u8 eee_speed_caps;
75};
76
77struct qed_mcp_link_state {
78 bool link_up;
79
80 u32 min_pf_rate;
81
82 /* Actual link speed in Mb/s */
83 u32 line_speed;
84
85 /* PF max speed in Mb/s, deduced from line_speed
86 * according to PF max bandwidth configuration.
87 */
88 u32 speed;
89 bool full_duplex;
90
91 bool an;
92 bool an_complete;
93 bool parallel_detection;
94 bool pfc_enabled;
95
96#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
97#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
98#define QED_LINK_PARTNER_SPEED_10G BIT(2)
99#define QED_LINK_PARTNER_SPEED_20G BIT(3)
100#define QED_LINK_PARTNER_SPEED_25G BIT(4)
101#define QED_LINK_PARTNER_SPEED_40G BIT(5)
102#define QED_LINK_PARTNER_SPEED_50G BIT(6)
103#define QED_LINK_PARTNER_SPEED_100G BIT(7)
104 u32 partner_adv_speed;
105
106 bool partner_tx_flow_ctrl_en;
107 bool partner_rx_flow_ctrl_en;
108
109#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
110#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
111#define QED_LINK_PARTNER_BOTH_PAUSE (3)
112 u8 partner_adv_pause;
113
114 bool sfp_tx_fault;
115 bool eee_active;
116 u8 eee_adv_caps;
117 u8 eee_lp_adv_caps;
118};
119
120struct qed_mcp_function_info {
121 u8 pause_on_host;
122
123 enum qed_pci_personality protocol;
124
125 u8 bandwidth_min;
126 u8 bandwidth_max;
127
128 u8 mac[ETH_ALEN];
129
130 u64 wwn_port;
131 u64 wwn_node;
132
133#define QED_MCP_VLAN_UNSET (0xffff)
134 u16 ovlan;
135
136 u16 mtu;
137};
138
139struct qed_mcp_nvm_common {
140 u32 offset;
141 u32 param;
142 u32 resp;
143 u32 cmd;
144};
145
146struct qed_mcp_drv_version {
147 u32 version;
148 u8 name[MCP_DRV_VER_STR_SIZE - 4];
149};
150
151struct qed_mcp_lan_stats {
152 u64 ucast_rx_pkts;
153 u64 ucast_tx_pkts;
154 u32 fcs_err;
155};
156
157struct qed_mcp_fcoe_stats {
158 u64 rx_pkts;
159 u64 tx_pkts;
160 u32 fcs_err;
161 u32 login_failure;
162};
163
164struct qed_mcp_iscsi_stats {
165 u64 rx_pdus;
166 u64 tx_pdus;
167 u64 rx_bytes;
168 u64 tx_bytes;
169};
170
171struct qed_mcp_rdma_stats {
172 u64 rx_pkts;
173 u64 tx_pkts;
174 u64 rx_bytes;
175 u64 tx_byts;
176};
177
178enum qed_mcp_protocol_type {
179 QED_MCP_LAN_STATS,
180 QED_MCP_FCOE_STATS,
181 QED_MCP_ISCSI_STATS,
182 QED_MCP_RDMA_STATS
183};
184
185union qed_mcp_protocol_stats {
186 struct qed_mcp_lan_stats lan_stats;
187 struct qed_mcp_fcoe_stats fcoe_stats;
188 struct qed_mcp_iscsi_stats iscsi_stats;
189 struct qed_mcp_rdma_stats rdma_stats;
190};
191
192enum qed_ov_eswitch {
193 QED_OV_ESWITCH_NONE,
194 QED_OV_ESWITCH_VEB,
195 QED_OV_ESWITCH_VEPA
196};
197
198enum qed_ov_client {
199 QED_OV_CLIENT_DRV,
200 QED_OV_CLIENT_USER,
201 QED_OV_CLIENT_VENDOR_SPEC
202};
203
204enum qed_ov_driver_state {
205 QED_OV_DRIVER_STATE_NOT_LOADED,
206 QED_OV_DRIVER_STATE_DISABLED,
207 QED_OV_DRIVER_STATE_ACTIVE
208};
209
210enum qed_ov_wol {
211 QED_OV_WOL_DEFAULT,
212 QED_OV_WOL_DISABLED,
213 QED_OV_WOL_ENABLED
214};
215
216enum qed_mfw_tlv_type {
217 QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
218 QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
219 QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
220 QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
221 QED_MFW_TLV_MAX = 0x16,
222};
223
224struct qed_mfw_tlv_generic {
225#define QED_MFW_TLV_FLAGS_SIZE 2
226 struct {
227 u8 ipv4_csum_offload;
228 u8 lso_supported;
229 bool b_set;
230 } flags;
231
232#define QED_MFW_TLV_MAC_COUNT 3
233 /* First entry for primary MAC, 2 secondary MACs possible */
234 u8 mac[QED_MFW_TLV_MAC_COUNT][6];
235 bool mac_set[QED_MFW_TLV_MAC_COUNT];
236
237 u64 rx_frames;
238 bool rx_frames_set;
239 u64 rx_bytes;
240 bool rx_bytes_set;
241 u64 tx_frames;
242 bool tx_frames_set;
243 u64 tx_bytes;
244 bool tx_bytes_set;
245};
246
247union qed_mfw_tlv_data {
248 struct qed_mfw_tlv_generic generic;
249 struct qed_mfw_tlv_eth eth;
250 struct qed_mfw_tlv_fcoe fcoe;
251 struct qed_mfw_tlv_iscsi iscsi;
252};
253
254#define QED_NVM_CFG_OPTION_ALL BIT(0)
255#define QED_NVM_CFG_OPTION_INIT BIT(1)
256#define QED_NVM_CFG_OPTION_COMMIT BIT(2)
257#define QED_NVM_CFG_OPTION_FREE BIT(3)
258#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
259
260/**
261 * @brief - returns the link params of the hw function
262 *
263 * @param p_hwfn
264 *
265 * @returns pointer to link params
266 */
267struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
268
269/**
270 * @brief - return the link state of the hw function
271 *
272 * @param p_hwfn
273 *
274 * @returns pointer to link state
275 */
276struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
277
278/**
279 * @brief - return the link capabilities of the hw function
280 *
281 * @param p_hwfn
282 *
283 * @returns pointer to link capabilities
284 */
285struct qed_mcp_link_capabilities
286 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
287
288/**
289 * @brief Request the MFW to set the the link according to 'link_input'.
290 *
291 * @param p_hwfn
292 * @param p_ptt
293 * @param b_up - raise link if `true'. Reset link if `false'.
294 *
295 * @return int
296 */
297int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
298 struct qed_ptt *p_ptt,
299 bool b_up);
300
301/**
302 * @brief Get the management firmware version value
303 *
304 * @param p_hwfn
305 * @param p_ptt
306 * @param p_mfw_ver - mfw version value
307 * @param p_running_bundle_id - image id in nvram; Optional.
308 *
309 * @return int - 0 - operation was successful.
310 */
311int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
312 struct qed_ptt *p_ptt,
313 u32 *p_mfw_ver, u32 *p_running_bundle_id);
314
315/**
316 * @brief Get the MBI version value
317 *
318 * @param p_hwfn
319 * @param p_ptt
320 * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
321 *
322 * @return int - 0 - operation was successful.
323 */
324int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
325 struct qed_ptt *p_ptt, u32 *p_mbi_ver);
326
327/**
328 * @brief Get media type value of the port.
329 *
330 * @param cdev - qed dev pointer
331 * @param p_ptt
332 * @param mfw_ver - media type value
333 *
334 * @return int -
335 * 0 - Operation was successul.
336 * -EBUSY - Operation failed
337 */
338int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
339 struct qed_ptt *p_ptt, u32 *media_type);
340
341/**
342 * @brief Get transceiver data of the port.
343 *
344 * @param cdev - qed dev pointer
345 * @param p_ptt
346 * @param p_transceiver_state - transceiver state.
347 * @param p_transceiver_type - media type value
348 *
349 * @return int -
350 * 0 - Operation was successful.
351 * -EBUSY - Operation failed
352 */
353int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
354 struct qed_ptt *p_ptt,
355 u32 *p_transceiver_state,
356 u32 *p_tranceiver_type);
357
358/**
359 * @brief Get transceiver supported speed mask.
360 *
361 * @param cdev - qed dev pointer
362 * @param p_ptt
363 * @param p_speed_mask - Bit mask of all supported speeds.
364 *
365 * @return int -
366 * 0 - Operation was successful.
367 * -EBUSY - Operation failed
368 */
369
370int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
371 struct qed_ptt *p_ptt, u32 *p_speed_mask);
372
373/**
374 * @brief Get board configuration.
375 *
376 * @param cdev - qed dev pointer
377 * @param p_ptt
378 * @param p_board_config - Board config.
379 *
380 * @return int -
381 * 0 - Operation was successful.
382 * -EBUSY - Operation failed
383 */
384int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
385 struct qed_ptt *p_ptt, u32 *p_board_config);
386
387/**
388 * @brief General function for sending commands to the MCP
389 * mailbox. It acquire mutex lock for the entire
390 * operation, from sending the request until the MCP
391 * response. Waiting for MCP response will be checked up
392 * to 5 seconds every 5ms.
393 *
394 * @param p_hwfn - hw function
395 * @param p_ptt - PTT required for register access
396 * @param cmd - command to be sent to the MCP.
397 * @param param - Optional param
398 * @param o_mcp_resp - The MCP response code (exclude sequence).
399 * @param o_mcp_param- Optional parameter provided by the MCP
400 * response
401 * @return int - 0 - operation
402 * was successul.
403 */
404int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
405 struct qed_ptt *p_ptt,
406 u32 cmd,
407 u32 param,
408 u32 *o_mcp_resp,
409 u32 *o_mcp_param);
410
411/**
412 * @brief - drains the nig, allowing completion to pass in case of pauses.
413 * (Should be called only from sleepable context)
414 *
415 * @param p_hwfn
416 * @param p_ptt
417 */
418int qed_mcp_drain(struct qed_hwfn *p_hwfn,
419 struct qed_ptt *p_ptt);
420
421/**
422 * @brief Get the flash size value
423 *
424 * @param p_hwfn
425 * @param p_ptt
426 * @param p_flash_size - flash size in bytes to be filled.
427 *
428 * @return int - 0 - operation was successul.
429 */
430int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
431 struct qed_ptt *p_ptt,
432 u32 *p_flash_size);
433
434/**
435 * @brief Send driver version to MFW
436 *
437 * @param p_hwfn
438 * @param p_ptt
439 * @param version - Version value
440 * @param name - Protocol driver name
441 *
442 * @return int - 0 - operation was successul.
443 */
444int
445qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
446 struct qed_ptt *p_ptt,
447 struct qed_mcp_drv_version *p_ver);
448
449/**
450 * @brief Read the MFW process kill counter
451 *
452 * @param p_hwfn
453 * @param p_ptt
454 *
455 * @return u32
456 */
457u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
458 struct qed_ptt *p_ptt);
459
460/**
461 * @brief Trigger a recovery process
462 *
463 * @param p_hwfn
464 * @param p_ptt
465 *
466 * @return int
467 */
468int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
469
470/**
471 * @brief A recovery handler must call this function as its first step.
472 * It is assumed that the handler is not run from an interrupt context.
473 *
474 * @param cdev
475 * @param p_ptt
476 *
477 * @return int
478 */
479int qed_recovery_prolog(struct qed_dev *cdev);
480
481/**
482 * @brief Notify MFW about the change in base device properties
483 *
484 * @param p_hwfn
485 * @param p_ptt
486 * @param client - qed client type
487 *
488 * @return int - 0 - operation was successful.
489 */
490int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
491 struct qed_ptt *p_ptt,
492 enum qed_ov_client client);
493
494/**
495 * @brief Notify MFW about the driver state
496 *
497 * @param p_hwfn
498 * @param p_ptt
499 * @param drv_state - Driver state
500 *
501 * @return int - 0 - operation was successful.
502 */
503int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
504 struct qed_ptt *p_ptt,
505 enum qed_ov_driver_state drv_state);
506
507/**
508 * @brief Send MTU size to MFW
509 *
510 * @param p_hwfn
511 * @param p_ptt
512 * @param mtu - MTU size
513 *
514 * @return int - 0 - operation was successful.
515 */
516int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
517 struct qed_ptt *p_ptt, u16 mtu);
518
519/**
520 * @brief Send MAC address to MFW
521 *
522 * @param p_hwfn
523 * @param p_ptt
524 * @param mac - MAC address
525 *
526 * @return int - 0 - operation was successful.
527 */
528int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
529 struct qed_ptt *p_ptt, u8 *mac);
530
531/**
532 * @brief Send WOL mode to MFW
533 *
534 * @param p_hwfn
535 * @param p_ptt
536 * @param wol - WOL mode
537 *
538 * @return int - 0 - operation was successful.
539 */
540int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
541 struct qed_ptt *p_ptt,
542 enum qed_ov_wol wol);
543
544/**
545 * @brief Set LED status
546 *
547 * @param p_hwfn
548 * @param p_ptt
549 * @param mode - LED mode
550 *
551 * @return int - 0 - operation was successful.
552 */
553int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
554 struct qed_ptt *p_ptt,
555 enum qed_led_mode mode);
556
557/**
558 * @brief Read from nvm
559 *
560 * @param cdev
561 * @param addr - nvm offset
562 * @param p_buf - nvm read buffer
563 * @param len - buffer len
564 *
565 * @return int - 0 - operation was successful.
566 */
567int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
568
569/**
570 * @brief Write to nvm
571 *
572 * @param cdev
573 * @param addr - nvm offset
574 * @param cmd - nvm command
575 * @param p_buf - nvm write buffer
576 * @param len - buffer len
577 *
578 * @return int - 0 - operation was successful.
579 */
580int qed_mcp_nvm_write(struct qed_dev *cdev,
581 u32 cmd, u32 addr, u8 *p_buf, u32 len);
582
583/**
584 * @brief Check latest response
585 *
586 * @param cdev
587 * @param p_buf - nvm write buffer
588 *
589 * @return int - 0 - operation was successful.
590 */
591int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
592
593struct qed_nvm_image_att {
594 u32 start_addr;
595 u32 length;
596};
597
598/**
599 * @brief Allows reading a whole nvram image
600 *
601 * @param p_hwfn
602 * @param image_id - image to get attributes for
603 * @param p_image_att - image attributes structure into which to fill data
604 *
605 * @return int - 0 - operation was successful.
606 */
607int
608qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
609 enum qed_nvm_images image_id,
610 struct qed_nvm_image_att *p_image_att);
611
612/**
613 * @brief Allows reading a whole nvram image
614 *
615 * @param p_hwfn
616 * @param image_id - image requested for reading
617 * @param p_buffer - allocated buffer into which to fill data
618 * @param buffer_len - length of the allocated buffer.
619 *
620 * @return 0 iff p_buffer now contains the nvram image.
621 */
622int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
623 enum qed_nvm_images image_id,
624 u8 *p_buffer, u32 buffer_len);
625
626/**
627 * @brief Bist register test
628 *
629 * @param p_hwfn - hw function
630 * @param p_ptt - PTT required for register access
631 *
632 * @return int - 0 - operation was successful.
633 */
634int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
635 struct qed_ptt *p_ptt);
636
637/**
638 * @brief Bist clock test
639 *
640 * @param p_hwfn - hw function
641 * @param p_ptt - PTT required for register access
642 *
643 * @return int - 0 - operation was successful.
644 */
645int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
646 struct qed_ptt *p_ptt);
647
648/**
649 * @brief Bist nvm test - get number of images
650 *
651 * @param p_hwfn - hw function
652 * @param p_ptt - PTT required for register access
653 * @param num_images - number of images if operation was
654 * successful. 0 if not.
655 *
656 * @return int - 0 - operation was successful.
657 */
658int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
659 struct qed_ptt *p_ptt,
660 u32 *num_images);
661
662/**
663 * @brief Bist nvm test - get image attributes by index
664 *
665 * @param p_hwfn - hw function
666 * @param p_ptt - PTT required for register access
667 * @param p_image_att - Attributes of image
668 * @param image_index - Index of image to get information for
669 *
670 * @return int - 0 - operation was successful.
671 */
672int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
673 struct qed_ptt *p_ptt,
674 struct bist_nvm_image_att *p_image_att,
675 u32 image_index);
676
677/**
678 * @brief - Processes the TLV request from MFW i.e., get the required TLV info
679 * from the qed client and send it to the MFW.
680 *
681 * @param p_hwfn
682 * @param p_ptt
683 *
684 * @param return 0 upon success.
685 */
686int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
687
688/* Using hwfn number (and not pf_num) is required since in CMT mode,
689 * same pf_num may be used by two different hwfn
690 * TODO - this shouldn't really be in .h file, but until all fields
691 * required during hw-init will be placed in their correct place in shmem
692 * we need it in qed_dev.c [for readin the nvram reflection in shmem].
693 */
694#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
695 ((rel_pfid) | \
696 ((p_hwfn)->abs_pf_id & 1) << 3) : \
697 rel_pfid)
698#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
699
700struct qed_mcp_info {
701 /* List for mailbox commands which were sent and wait for a response */
702 struct list_head cmd_list;
703
704 /* Spinlock used for protecting the access to the mailbox commands list
705 * and the sending of the commands.
706 */
707 spinlock_t cmd_lock;
708
709 /* Flag to indicate whether sending a MFW mailbox command is blocked */
710 bool b_block_cmd;
711
712 /* Spinlock used for syncing SW link-changes and link-changes
713 * originating from attention context.
714 */
715 spinlock_t link_lock;
716
717 u32 public_base;
718 u32 drv_mb_addr;
719 u32 mfw_mb_addr;
720 u32 port_addr;
721 u16 drv_mb_seq;
722 u16 drv_pulse_seq;
723 struct qed_mcp_link_params link_input;
724 struct qed_mcp_link_state link_output;
725 struct qed_mcp_link_capabilities link_capabilities;
726 struct qed_mcp_function_info func_info;
727 u8 *mfw_mb_cur;
728 u8 *mfw_mb_shadow;
729 u16 mfw_mb_length;
730 u32 mcp_hist;
731
732 /* Capabilties negotiated with the MFW */
733 u32 capabilities;
734};
735
736struct qed_mcp_mb_params {
737 u32 cmd;
738 u32 param;
739 void *p_data_src;
740 void *p_data_dst;
741 u8 data_src_size;
742 u8 data_dst_size;
743 u32 mcp_resp;
744 u32 mcp_param;
745 u32 flags;
746#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
747#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
748#define QED_MB_FLAGS_IS_SET(params, flag) \
749 ({ typeof(params) __params = (params); \
750 (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
751};
752
753struct qed_drv_tlv_hdr {
754 u8 tlv_type;
755 u8 tlv_length; /* In dwords - not including this header */
756 u8 tlv_reserved;
757#define QED_DRV_TLV_FLAGS_CHANGED 0x01
758 u8 tlv_flags;
759};
760
761/**
762 * @brief Initialize the interface with the MCP
763 *
764 * @param p_hwfn - HW func
765 * @param p_ptt - PTT required for register access
766 *
767 * @return int
768 */
769int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
770 struct qed_ptt *p_ptt);
771
772/**
773 * @brief Initialize the port interface with the MCP
774 *
775 * @param p_hwfn
776 * @param p_ptt
777 * Can only be called after `num_ports_in_engines' is set
778 */
779void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
780 struct qed_ptt *p_ptt);
781/**
782 * @brief Releases resources allocated during the init process.
783 *
784 * @param p_hwfn - HW func
785 * @param p_ptt - PTT required for register access
786 *
787 * @return int
788 */
789
790int qed_mcp_free(struct qed_hwfn *p_hwfn);
791
792/**
793 * @brief This function is called from the DPC context. After
794 * pointing PTT to the mfw mb, check for events sent by the MCP
795 * to the driver and ack them. In case a critical event
796 * detected, it will be handled here, otherwise the work will be
797 * queued to a sleepable work-queue.
798 *
799 * @param p_hwfn - HW function
800 * @param p_ptt - PTT required for register access
801 * @return int - 0 - operation
802 * was successul.
803 */
804int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
805 struct qed_ptt *p_ptt);
806
807enum qed_drv_role {
808 QED_DRV_ROLE_OS,
809 QED_DRV_ROLE_KDUMP,
810};
811
812struct qed_load_req_params {
813 /* Input params */
814 enum qed_drv_role drv_role;
815 u8 timeout_val;
816 bool avoid_eng_reset;
817 enum qed_override_force_load override_force_load;
818
819 /* Output params */
820 u32 load_code;
821};
822
823/**
824 * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
825 * returns whether this PF is the first on the engine/port or function.
826 *
827 * @param p_hwfn
828 * @param p_ptt
829 * @param p_params
830 *
831 * @return int - 0 - Operation was successful.
832 */
833int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
834 struct qed_ptt *p_ptt,
835 struct qed_load_req_params *p_params);
836
837/**
838 * @brief Sends a LOAD_DONE message to the MFW
839 *
840 * @param p_hwfn
841 * @param p_ptt
842 *
843 * @return int - 0 - Operation was successful.
844 */
845int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
846
847/**
848 * @brief Sends a UNLOAD_REQ message to the MFW
849 *
850 * @param p_hwfn
851 * @param p_ptt
852 *
853 * @return int - 0 - Operation was successful.
854 */
855int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
856
857/**
858 * @brief Sends a UNLOAD_DONE message to the MFW
859 *
860 * @param p_hwfn
861 * @param p_ptt
862 *
863 * @return int - 0 - Operation was successful.
864 */
865int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
866
867/**
868 * @brief Read the MFW mailbox into Current buffer.
869 *
870 * @param p_hwfn
871 * @param p_ptt
872 */
873void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
874 struct qed_ptt *p_ptt);
875
876/**
877 * @brief Ack to mfw that driver finished FLR process for VFs
878 *
879 * @param p_hwfn
880 * @param p_ptt
881 * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
882 *
883 * @param return int - 0 upon success.
884 */
885int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
886 struct qed_ptt *p_ptt, u32 *vfs_to_ack);
887
888/**
889 * @brief - calls during init to read shmem of all function-related info.
890 *
891 * @param p_hwfn
892 *
893 * @param return 0 upon success.
894 */
895int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
896 struct qed_ptt *p_ptt);
897
898/**
899 * @brief - Reset the MCP using mailbox command.
900 *
901 * @param p_hwfn
902 * @param p_ptt
903 *
904 * @param return 0 upon success.
905 */
906int qed_mcp_reset(struct qed_hwfn *p_hwfn,
907 struct qed_ptt *p_ptt);
908
909/**
910 * @brief - Sends an NVM read command request to the MFW to get
911 * a buffer.
912 *
913 * @param p_hwfn
914 * @param p_ptt
915 * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
916 * DRV_MSG_CODE_NVM_READ_NVRAM commands
917 * @param param - [0:23] - Offset [24:31] - Size
918 * @param o_mcp_resp - MCP response
919 * @param o_mcp_param - MCP response param
920 * @param o_txn_size - Buffer size output
921 * @param o_buf - Pointer to the buffer returned by the MFW.
922 *
923 * @param return 0 upon success.
924 */
925int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
926 struct qed_ptt *p_ptt,
927 u32 cmd,
928 u32 param,
929 u32 *o_mcp_resp,
930 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
931
932/**
933 * @brief Read from sfp
934 *
935 * @param p_hwfn - hw function
936 * @param p_ptt - PTT required for register access
937 * @param port - transceiver port
938 * @param addr - I2C address
939 * @param offset - offset in sfp
940 * @param len - buffer length
941 * @param p_buf - buffer to read into
942 *
943 * @return int - 0 - operation was successful.
944 */
945int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
946 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
947
948/**
949 * @brief indicates whether the MFW objects [under mcp_info] are accessible
950 *
951 * @param p_hwfn
952 *
953 * @return true iff MFW is running and mcp_info is initialized
954 */
955bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
956
957/**
958 * @brief request MFW to configure MSI-X for a VF
959 *
960 * @param p_hwfn
961 * @param p_ptt
962 * @param vf_id - absolute inside engine
963 * @param num_sbs - number of entries to request
964 *
965 * @return int
966 */
967int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
968 struct qed_ptt *p_ptt, u8 vf_id, u8 num);
969
970/**
971 * @brief - Halt the MCP.
972 *
973 * @param p_hwfn
974 * @param p_ptt
975 *
976 * @param return 0 upon success.
977 */
978int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
979
980/**
981 * @brief - Wake up the MCP.
982 *
983 * @param p_hwfn
984 * @param p_ptt
985 *
986 * @param return 0 upon success.
987 */
988int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
989
990int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
991int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
992int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
993 struct qed_ptt *p_ptt,
994 struct qed_mcp_link_state *p_link,
995 u8 max_bw);
996int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
997 struct qed_ptt *p_ptt,
998 struct qed_mcp_link_state *p_link,
999 u8 min_bw);
1000
1001int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
1002 struct qed_ptt *p_ptt, u32 mask_parities);
1003
1004/**
1005 * @brief - Sets the MFW's max value for the given resource
1006 *
1007 * @param p_hwfn
1008 * @param p_ptt
1009 * @param res_id
1010 * @param resc_max_val
1011 * @param p_mcp_resp
1012 *
1013 * @return int - 0 - operation was successful.
1014 */
1015int
1016qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
1017 struct qed_ptt *p_ptt,
1018 enum qed_resources res_id,
1019 u32 resc_max_val, u32 *p_mcp_resp);
1020
1021/**
1022 * @brief - Gets the MFW allocation info for the given resource
1023 *
1024 * @param p_hwfn
1025 * @param p_ptt
1026 * @param res_id
1027 * @param p_mcp_resp
1028 * @param p_resc_num
1029 * @param p_resc_start
1030 *
1031 * @return int - 0 - operation was successful.
1032 */
1033int
1034qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
1035 struct qed_ptt *p_ptt,
1036 enum qed_resources res_id,
1037 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
1038
1039/**
1040 * @brief Send eswitch mode to MFW
1041 *
1042 * @param p_hwfn
1043 * @param p_ptt
1044 * @param eswitch - eswitch mode
1045 *
1046 * @return int - 0 - operation was successful.
1047 */
1048int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
1049 struct qed_ptt *p_ptt,
1050 enum qed_ov_eswitch eswitch);
1051
1052#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
1053#define QED_MCP_RESC_LOCK_MAX_VAL 31
1054
1055enum qed_resc_lock {
1056 QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
1057 QED_RESC_LOCK_PTP_PORT0,
1058 QED_RESC_LOCK_PTP_PORT1,
1059 QED_RESC_LOCK_PTP_PORT2,
1060 QED_RESC_LOCK_PTP_PORT3,
1061 QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
1062 QED_RESC_LOCK_RESC_INVALID
1063};
1064
1065/**
1066 * @brief - Initiates PF FLR
1067 *
1068 * @param p_hwfn
1069 * @param p_ptt
1070 *
1071 * @return int - 0 - operation was successful.
1072 */
1073int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1074struct qed_resc_lock_params {
1075 /* Resource number [valid values are 0..31] */
1076 u8 resource;
1077
1078 /* Lock timeout value in seconds [default, none or 1..254] */
1079 u8 timeout;
1080#define QED_MCP_RESC_LOCK_TO_DEFAULT 0
1081#define QED_MCP_RESC_LOCK_TO_NONE 255
1082
1083 /* Number of times to retry locking */
1084 u8 retry_num;
1085#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
1086
1087 /* The interval in usec between retries */
1088 u16 retry_interval;
1089#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
1090
1091 /* Use sleep or delay between retries */
1092 bool sleep_b4_retry;
1093
1094 /* Will be set as true if the resource is free and granted */
1095 bool b_granted;
1096
1097 /* Will be filled with the resource owner.
1098 * [0..15 = PF0-15, 16 = MFW]
1099 */
1100 u8 owner;
1101};
1102
1103/**
1104 * @brief Acquires MFW generic resource lock
1105 *
1106 * @param p_hwfn
1107 * @param p_ptt
1108 * @param p_params
1109 *
1110 * @return int - 0 - operation was successful.
1111 */
1112int
1113qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
1114 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
1115
1116struct qed_resc_unlock_params {
1117 /* Resource number [valid values are 0..31] */
1118 u8 resource;
1119
1120 /* Allow to release a resource even if belongs to another PF */
1121 bool b_force;
1122
1123 /* Will be set as true if the resource is released */
1124 bool b_released;
1125};
1126
1127/**
1128 * @brief Releases MFW generic resource lock
1129 *
1130 * @param p_hwfn
1131 * @param p_ptt
1132 * @param p_params
1133 *
1134 * @return int - 0 - operation was successful.
1135 */
1136int
1137qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
1138 struct qed_ptt *p_ptt,
1139 struct qed_resc_unlock_params *p_params);
1140
1141/**
1142 * @brief - default initialization for lock/unlock resource structs
1143 *
1144 * @param p_lock - lock params struct to be initialized; Can be NULL
1145 * @param p_unlock - unlock params struct to be initialized; Can be NULL
1146 * @param resource - the requested resource
1147 * @paral b_is_permanent - disable retries & aging when set
1148 */
1149void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
1150 struct qed_resc_unlock_params *p_unlock,
1151 enum qed_resc_lock
1152 resource, bool b_is_permanent);
1153
1154/**
1155 * @brief - Return whether management firmware support smart AN
1156 *
1157 * @param p_hwfn
1158 *
1159 * @return bool - true if feature is supported.
1160 */
1161bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
1162
1163/**
1164 * @brief Learn of supported MFW features; To be done during early init
1165 *
1166 * @param p_hwfn
1167 * @param p_ptt
1168 */
1169int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1170
1171/**
1172 * @brief Inform MFW of set of features supported by driver. Should be done
1173 * inside the content of the LOAD_REQ.
1174 *
1175 * @param p_hwfn
1176 * @param p_ptt
1177 */
1178int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1179
1180/**
1181 * @brief Read ufp config from the shared memory.
1182 *
1183 * @param p_hwfn
1184 * @param p_ptt
1185 */
1186void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1187
1188/**
1189 * @brief Populate the nvm info shadow in the given hardware function
1190 *
1191 * @param p_hwfn
1192 */
1193int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
1194
1195/**
1196 * @brief Get the engine affinity configuration.
1197 *
1198 * @param p_hwfn
1199 * @param p_ptt
1200 */
1201int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1202
1203/**
1204 * @brief Get the PPFID bitmap.
1205 *
1206 * @param p_hwfn
1207 * @param p_ptt
1208 */
1209int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1210
1211/**
1212 * @brief Get NVM config attribute value.
1213 *
1214 * @param p_hwfn
1215 * @param p_ptt
1216 * @param option_id
1217 * @param entity_id
1218 * @param flags
1219 * @param p_buf
1220 * @param p_len
1221 */
1222int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1223 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
1224 u32 *p_len);
1225
1226/**
1227 * @brief Set NVM config attribute value.
1228 *
1229 * @param p_hwfn
1230 * @param p_ptt
1231 * @param option_id
1232 * @param entity_id
1233 * @param flags
1234 * @param p_buf
1235 * @param len
1236 */
1237int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1238 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
1239 u32 len);
1240#endif