Loading...
1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2/* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7#ifndef _QED_SRIOV_H
8#define _QED_SRIOV_H
9#include <linux/types.h>
10#include "qed_vf.h"
11
12#define QED_ETH_VF_NUM_MAC_FILTERS 1
13#define QED_ETH_VF_NUM_VLAN_FILTERS 2
14#define QED_VF_ARRAY_LENGTH (3)
15
16#ifdef CONFIG_QED_SRIOV
17#define IS_VF(cdev) ((cdev)->b_is_vf)
18#define IS_PF(cdev) (!((cdev)->b_is_vf))
19#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
20#else
21#define IS_VF(cdev) (0)
22#define IS_PF(cdev) (1)
23#define IS_PF_SRIOV(p_hwfn) (0)
24#endif
25#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
26
27#define QED_MAX_VF_CHAINS_PER_PF 16
28
29#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
30 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
31
32enum qed_iov_vport_update_flag {
33 QED_IOV_VP_UPDATE_ACTIVATE,
34 QED_IOV_VP_UPDATE_VLAN_STRIP,
35 QED_IOV_VP_UPDATE_TX_SWITCH,
36 QED_IOV_VP_UPDATE_MCAST,
37 QED_IOV_VP_UPDATE_ACCEPT_PARAM,
38 QED_IOV_VP_UPDATE_RSS,
39 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
40 QED_IOV_VP_UPDATE_SGE_TPA,
41 QED_IOV_VP_UPDATE_MAX,
42};
43
44struct qed_public_vf_info {
45 /* These copies will later be reflected in the bulletin board,
46 * but this copy should be newer.
47 */
48 u8 forced_mac[ETH_ALEN];
49 u16 forced_vlan;
50 u8 mac[ETH_ALEN];
51
52 /* IFLA_VF_LINK_STATE_<X> */
53 int link_state;
54
55 /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
56 int tx_rate;
57
58 /* Trusted VFs can configure promiscuous mode.
59 * Also store shadow promisc configuration if needed.
60 */
61 bool is_trusted_configured;
62 bool is_trusted_request;
63 u8 rx_accept_mode;
64 u8 tx_accept_mode;
65 bool accept_any_vlan;
66};
67
68struct qed_iov_vf_init_params {
69 u16 rel_vf_id;
70
71 /* Number of requested Queues; Currently, don't support different
72 * number of Rx/Tx queues.
73 */
74
75 u16 num_queues;
76
77 /* Allow the client to choose which qzones to use for Rx/Tx,
78 * and which queue_base to use for Tx queues on a per-queue basis.
79 * Notice values should be relative to the PF resources.
80 */
81 u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
82 u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
83};
84
85/* This struct is part of qed_dev and contains data relevant to all hwfns;
86 * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
87 */
88struct qed_hw_sriov_info {
89 int pos; /* capability position */
90 int nres; /* number of resources */
91 u32 cap; /* SR-IOV Capabilities */
92 u16 ctrl; /* SR-IOV Control */
93 u16 total_vfs; /* total VFs associated with the PF */
94 u16 num_vfs; /* number of vfs that have been started */
95 u16 initial_vfs; /* initial VFs associated with the PF */
96 u16 nr_virtfn; /* number of VFs available */
97 u16 offset; /* first VF Routing ID offset */
98 u16 stride; /* following VF stride */
99 u16 vf_device_id; /* VF device id */
100 u32 pgsz; /* page size for BAR alignment */
101 u8 link; /* Function Dependency Link */
102
103 u32 first_vf_in_pf;
104};
105
106/* This mailbox is maintained per VF in its PF contains all information
107 * required for sending / receiving a message.
108 */
109struct qed_iov_vf_mbx {
110 union vfpf_tlvs *req_virt;
111 dma_addr_t req_phys;
112 union pfvf_tlvs *reply_virt;
113 dma_addr_t reply_phys;
114
115 /* Address in VF where a pending message is located */
116 dma_addr_t pending_req;
117
118 /* Message from VF awaits handling */
119 bool b_pending_msg;
120
121 u8 *offset;
122
123 /* saved VF request header */
124 struct vfpf_first_tlv first_tlv;
125};
126
127#define QED_IOV_LEGACY_QID_RX (0)
128#define QED_IOV_LEGACY_QID_TX (1)
129#define QED_IOV_QID_INVALID (0xFE)
130
131struct qed_vf_queue_cid {
132 bool b_is_tx;
133 struct qed_queue_cid *p_cid;
134};
135
136/* Describes a qzone associated with the VF */
137struct qed_vf_queue {
138 u16 fw_rx_qid;
139 u16 fw_tx_qid;
140
141 struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
142};
143
144enum vf_state {
145 VF_FREE = 0, /* VF ready to be acquired holds no resc */
146 VF_ACQUIRED, /* VF, acquired, but not initialized */
147 VF_ENABLED, /* VF, Enabled */
148 VF_RESET, /* VF, FLR'd, pending cleanup */
149 VF_STOPPED /* VF, Stopped */
150};
151
152struct qed_vf_vlan_shadow {
153 bool used;
154 u16 vid;
155};
156
157struct qed_vf_shadow_config {
158 /* Shadow copy of all guest vlans */
159 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
160
161 /* Shadow copy of all configured MACs; Empty if forcing MACs */
162 u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
163 u8 inner_vlan_removal;
164};
165
166/* PFs maintain an array of this structure, per VF */
167struct qed_vf_info {
168 struct qed_iov_vf_mbx vf_mbx;
169 enum vf_state state;
170 bool b_init;
171 bool b_malicious;
172 u8 to_disable;
173
174 struct qed_bulletin bulletin;
175 dma_addr_t vf_bulletin;
176
177 /* PF saves a copy of the last VF acquire message */
178 struct vfpf_acquire_tlv acquire;
179
180 u32 concrete_fid;
181 u16 opaque_fid;
182 u16 mtu;
183
184 u8 vport_id;
185 u8 relative_vf_id;
186 u8 abs_vf_id;
187#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
188 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
189 (p_vf)->abs_vf_id)
190
191 u8 vport_instance;
192 u8 num_rxqs;
193 u8 num_txqs;
194
195 u16 rx_coal;
196 u16 tx_coal;
197
198 u8 num_sbs;
199
200 u8 num_mac_filters;
201 u8 num_vlan_filters;
202
203 struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
204 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
205 u8 num_active_rxqs;
206 struct qed_public_vf_info p_vf_info;
207 bool spoof_chk;
208 bool req_spoofchk_val;
209
210 /* Stores the configuration requested by VF */
211 struct qed_vf_shadow_config shadow_config;
212
213 /* A bitfield using bulletin's valid-map bits, used to indicate
214 * which of the bulletin board features have been configured.
215 */
216 u64 configured_features;
217#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
218 (1 << VLAN_ADDR_FORCED))
219};
220
221/* This structure is part of qed_hwfn and used only for PFs that have sriov
222 * capability enabled.
223 */
224struct qed_pf_iov {
225 struct qed_vf_info vfs_array[MAX_NUM_VFS];
226 u64 pending_flr[QED_VF_ARRAY_LENGTH];
227
228 /* Allocate message address continuosuly and split to each VF */
229 void *mbx_msg_virt_addr;
230 dma_addr_t mbx_msg_phys_addr;
231 u32 mbx_msg_size;
232 void *mbx_reply_virt_addr;
233 dma_addr_t mbx_reply_phys_addr;
234 u32 mbx_reply_size;
235 void *p_bulletins;
236 dma_addr_t bulletins_phys;
237 u32 bulletins_size;
238};
239
240enum qed_iov_wq_flag {
241 QED_IOV_WQ_MSG_FLAG,
242 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
243 QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
244 QED_IOV_WQ_STOP_WQ_FLAG,
245 QED_IOV_WQ_FLR_FLAG,
246 QED_IOV_WQ_TRUST_FLAG,
247 QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
248};
249
250extern const struct qed_iov_hv_ops qed_iov_ops_pass;
251
252#ifdef CONFIG_QED_SRIOV
253/**
254 * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid
255 * w.r.t. @b_enabled_only value
256 * if b_enabled_only = true - only enabled
257 * VF id is valid.
258 * else any VF id less than max_vfs is valid.
259 *
260 * @p_hwfn: HW device data.
261 * @rel_vf_id: Relative VF ID.
262 * @b_enabled_only: consider only enabled VF.
263 * @b_non_malicious: true iff we want to validate vf isn't malicious.
264 *
265 * Return: bool - true for valid VF ID
266 */
267bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
268 int rel_vf_id,
269 bool b_enabled_only, bool b_non_malicious);
270
271/**
272 * qed_iov_get_next_active_vf(): Given a VF index, return index of
273 * next [including that] active VF.
274 *
275 * @p_hwfn: HW device data.
276 * @rel_vf_id: VF ID.
277 *
278 * Return: MAX_NUM_VFS in case no further active VFs, otherwise index.
279 */
280u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
281
282void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
283 int vfid, u16 vxlan_port, u16 geneve_port);
284
285/**
286 * qed_iov_hw_info(): Read sriov related information and allocated resources
287 * reads from configuration space, shmem, etc.
288 *
289 * @p_hwfn: HW device data.
290 *
291 * Return: Int.
292 */
293int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
294
295/**
296 * qed_add_tlv(): place a given tlv on the tlv buffer at next offset
297 *
298 * @p_hwfn: HW device data.
299 * @offset: offset.
300 * @type: Type
301 * @length: Length.
302 *
303 * Return: pointer to the newly placed tlv
304 */
305void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
306
307/**
308 * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer
309 *
310 * @p_hwfn: HW device data.
311 * @tlvs_list: Tlvs_list.
312 *
313 * Return: Void.
314 */
315void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
316
317/**
318 * qed_sriov_vfpf_malicious(): Handle malicious VF/PF.
319 *
320 * @p_hwfn: HW device data.
321 * @p_data: Pointer to data.
322 *
323 * Return: Void.
324 */
325void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
326 struct fw_err_data *p_data);
327
328/**
329 * qed_sriov_eqe_event(): Callback for SRIOV events.
330 *
331 * @p_hwfn: HW device data.
332 * @opcode: Opcode.
333 * @echo: Echo.
334 * @data: data
335 * @fw_return_code: FW return code.
336 *
337 * Return: Int.
338 */
339int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
340 union event_ring_data *data, u8 fw_return_code);
341
342/**
343 * qed_iov_alloc(): allocate sriov related resources
344 *
345 * @p_hwfn: HW device data.
346 *
347 * Return: Int.
348 */
349int qed_iov_alloc(struct qed_hwfn *p_hwfn);
350
351/**
352 * qed_iov_setup(): setup sriov related resources
353 *
354 * @p_hwfn: HW device data.
355 *
356 * Return: Void.
357 */
358void qed_iov_setup(struct qed_hwfn *p_hwfn);
359
360/**
361 * qed_iov_free(): free sriov related resources
362 *
363 * @p_hwfn: HW device data.
364 *
365 * Return: Void.
366 */
367void qed_iov_free(struct qed_hwfn *p_hwfn);
368
369/**
370 * qed_iov_free_hw_info(): free sriov related memory that was
371 * allocated during hw_prepare
372 *
373 * @cdev: Qed dev pointer.
374 *
375 * Return: Void.
376 */
377void qed_iov_free_hw_info(struct qed_dev *cdev);
378
379/**
380 * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed.
381 *
382 * @p_hwfn: HW device data.
383 * @disabled_vfs: bitmask of all VFs on path that were FLRed
384 *
385 * Return: true iff one of the PF's vfs got FLRed. false otherwise.
386 */
387bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
388
389/**
390 * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer.
391 *
392 * @p_hwfn: HW device data.
393 * @p_tlvs_list: Pointer to tlvs list
394 * @req_type: Type of TLV
395 *
396 * Return: pointer to tlv type if found, otherwise returns NULL.
397 */
398void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
399 void *p_tlvs_list, u16 req_type);
400
401void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
402int qed_iov_wq_start(struct qed_dev *cdev);
403
404void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
405void qed_vf_start_iov_wq(struct qed_dev *cdev);
406int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
407void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
408#else
409static inline bool
410qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
411 int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
412{
413 return false;
414}
415
416static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
417 u16 rel_vf_id)
418{
419 return MAX_NUM_VFS;
420}
421
422static inline void
423qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
424 u16 vxlan_port, u16 geneve_port)
425{
426}
427
428static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
429{
430 return 0;
431}
432
433static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
434{
435 return 0;
436}
437
438static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
439{
440}
441
442static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
443{
444}
445
446static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
447{
448}
449
450static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
451 u32 *disabled_vfs)
452{
453 return false;
454}
455
456static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
457{
458}
459
460static inline int qed_iov_wq_start(struct qed_dev *cdev)
461{
462 return 0;
463}
464
465static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
466 enum qed_iov_wq_flag flag)
467{
468}
469
470static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
471{
472}
473
474static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
475{
476 return 0;
477}
478
479static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
480{
481}
482
483static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
484 struct fw_err_data *p_data)
485{
486}
487
488static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode,
489 __le16 echo, union event_ring_data *data,
490 u8 fw_return_code)
491{
492 return 0;
493}
494#endif
495
496#define qed_for_each_vf(_p_hwfn, _i) \
497 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
498 _i < MAX_NUM_VFS; \
499 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
500
501#endif
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef _QED_SRIOV_H
34#define _QED_SRIOV_H
35#include <linux/types.h>
36#include "qed_vf.h"
37
38#define QED_ETH_VF_NUM_MAC_FILTERS 1
39#define QED_ETH_VF_NUM_VLAN_FILTERS 2
40#define QED_VF_ARRAY_LENGTH (3)
41
42#ifdef CONFIG_QED_SRIOV
43#define IS_VF(cdev) ((cdev)->b_is_vf)
44#define IS_PF(cdev) (!((cdev)->b_is_vf))
45#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
46#else
47#define IS_VF(cdev) (0)
48#define IS_PF(cdev) (1)
49#define IS_PF_SRIOV(p_hwfn) (0)
50#endif
51#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
52
53#define QED_MAX_VF_CHAINS_PER_PF 16
54
55#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
56 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
57
58enum qed_iov_vport_update_flag {
59 QED_IOV_VP_UPDATE_ACTIVATE,
60 QED_IOV_VP_UPDATE_VLAN_STRIP,
61 QED_IOV_VP_UPDATE_TX_SWITCH,
62 QED_IOV_VP_UPDATE_MCAST,
63 QED_IOV_VP_UPDATE_ACCEPT_PARAM,
64 QED_IOV_VP_UPDATE_RSS,
65 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
66 QED_IOV_VP_UPDATE_SGE_TPA,
67 QED_IOV_VP_UPDATE_MAX,
68};
69
70struct qed_public_vf_info {
71 /* These copies will later be reflected in the bulletin board,
72 * but this copy should be newer.
73 */
74 u8 forced_mac[ETH_ALEN];
75 u16 forced_vlan;
76 u8 mac[ETH_ALEN];
77
78 /* IFLA_VF_LINK_STATE_<X> */
79 int link_state;
80
81 /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
82 int tx_rate;
83
84 /* Trusted VFs can configure promiscuous mode.
85 * Also store shadow promisc configuration if needed.
86 */
87 bool is_trusted_configured;
88 bool is_trusted_request;
89 u8 rx_accept_mode;
90 u8 tx_accept_mode;
91};
92
93struct qed_iov_vf_init_params {
94 u16 rel_vf_id;
95
96 /* Number of requested Queues; Currently, don't support different
97 * number of Rx/Tx queues.
98 */
99
100 u16 num_queues;
101
102 /* Allow the client to choose which qzones to use for Rx/Tx,
103 * and which queue_base to use for Tx queues on a per-queue basis.
104 * Notice values should be relative to the PF resources.
105 */
106 u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
107 u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
108};
109
110/* This struct is part of qed_dev and contains data relevant to all hwfns;
111 * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
112 */
113struct qed_hw_sriov_info {
114 int pos; /* capability position */
115 int nres; /* number of resources */
116 u32 cap; /* SR-IOV Capabilities */
117 u16 ctrl; /* SR-IOV Control */
118 u16 total_vfs; /* total VFs associated with the PF */
119 u16 num_vfs; /* number of vfs that have been started */
120 u16 initial_vfs; /* initial VFs associated with the PF */
121 u16 nr_virtfn; /* number of VFs available */
122 u16 offset; /* first VF Routing ID offset */
123 u16 stride; /* following VF stride */
124 u16 vf_device_id; /* VF device id */
125 u32 pgsz; /* page size for BAR alignment */
126 u8 link; /* Function Dependency Link */
127
128 u32 first_vf_in_pf;
129};
130
131/* This mailbox is maintained per VF in its PF contains all information
132 * required for sending / receiving a message.
133 */
134struct qed_iov_vf_mbx {
135 union vfpf_tlvs *req_virt;
136 dma_addr_t req_phys;
137 union pfvf_tlvs *reply_virt;
138 dma_addr_t reply_phys;
139
140 /* Address in VF where a pending message is located */
141 dma_addr_t pending_req;
142
143 /* Message from VF awaits handling */
144 bool b_pending_msg;
145
146 u8 *offset;
147
148 /* saved VF request header */
149 struct vfpf_first_tlv first_tlv;
150};
151
152#define QED_IOV_LEGACY_QID_RX (0)
153#define QED_IOV_LEGACY_QID_TX (1)
154#define QED_IOV_QID_INVALID (0xFE)
155
156struct qed_vf_queue_cid {
157 bool b_is_tx;
158 struct qed_queue_cid *p_cid;
159};
160
161/* Describes a qzone associated with the VF */
162struct qed_vf_queue {
163 u16 fw_rx_qid;
164 u16 fw_tx_qid;
165
166 struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
167};
168
169enum vf_state {
170 VF_FREE = 0, /* VF ready to be acquired holds no resc */
171 VF_ACQUIRED, /* VF, acquired, but not initalized */
172 VF_ENABLED, /* VF, Enabled */
173 VF_RESET, /* VF, FLR'd, pending cleanup */
174 VF_STOPPED /* VF, Stopped */
175};
176
177struct qed_vf_vlan_shadow {
178 bool used;
179 u16 vid;
180};
181
182struct qed_vf_shadow_config {
183 /* Shadow copy of all guest vlans */
184 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
185
186 /* Shadow copy of all configured MACs; Empty if forcing MACs */
187 u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
188 u8 inner_vlan_removal;
189};
190
191/* PFs maintain an array of this structure, per VF */
192struct qed_vf_info {
193 struct qed_iov_vf_mbx vf_mbx;
194 enum vf_state state;
195 bool b_init;
196 bool b_malicious;
197 u8 to_disable;
198
199 struct qed_bulletin bulletin;
200 dma_addr_t vf_bulletin;
201
202 /* PF saves a copy of the last VF acquire message */
203 struct vfpf_acquire_tlv acquire;
204
205 u32 concrete_fid;
206 u16 opaque_fid;
207 u16 mtu;
208
209 u8 vport_id;
210 u8 relative_vf_id;
211 u8 abs_vf_id;
212#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
213 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
214 (p_vf)->abs_vf_id)
215
216 u8 vport_instance;
217 u8 num_rxqs;
218 u8 num_txqs;
219
220 u16 rx_coal;
221 u16 tx_coal;
222
223 u8 num_sbs;
224
225 u8 num_mac_filters;
226 u8 num_vlan_filters;
227
228 struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
229 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
230 u8 num_active_rxqs;
231 struct qed_public_vf_info p_vf_info;
232 bool spoof_chk;
233 bool req_spoofchk_val;
234
235 /* Stores the configuration requested by VF */
236 struct qed_vf_shadow_config shadow_config;
237
238 /* A bitfield using bulletin's valid-map bits, used to indicate
239 * which of the bulletin board features have been configured.
240 */
241 u64 configured_features;
242#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
243 (1 << VLAN_ADDR_FORCED))
244};
245
246/* This structure is part of qed_hwfn and used only for PFs that have sriov
247 * capability enabled.
248 */
249struct qed_pf_iov {
250 struct qed_vf_info vfs_array[MAX_NUM_VFS];
251 u64 pending_flr[QED_VF_ARRAY_LENGTH];
252
253 /* Allocate message address continuosuly and split to each VF */
254 void *mbx_msg_virt_addr;
255 dma_addr_t mbx_msg_phys_addr;
256 u32 mbx_msg_size;
257 void *mbx_reply_virt_addr;
258 dma_addr_t mbx_reply_phys_addr;
259 u32 mbx_reply_size;
260 void *p_bulletins;
261 dma_addr_t bulletins_phys;
262 u32 bulletins_size;
263};
264
265enum qed_iov_wq_flag {
266 QED_IOV_WQ_MSG_FLAG,
267 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
268 QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
269 QED_IOV_WQ_STOP_WQ_FLAG,
270 QED_IOV_WQ_FLR_FLAG,
271 QED_IOV_WQ_TRUST_FLAG,
272 QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
273};
274
275#ifdef CONFIG_QED_SRIOV
276/**
277 * @brief Check if given VF ID @vfid is valid
278 * w.r.t. @b_enabled_only value
279 * if b_enabled_only = true - only enabled VF id is valid
280 * else any VF id less than max_vfs is valid
281 *
282 * @param p_hwfn
283 * @param rel_vf_id - Relative VF ID
284 * @param b_enabled_only - consider only enabled VF
285 * @param b_non_malicious - true iff we want to validate vf isn't malicious.
286 *
287 * @return bool - true for valid VF ID
288 */
289bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
290 int rel_vf_id,
291 bool b_enabled_only, bool b_non_malicious);
292
293/**
294 * @brief - Given a VF index, return index of next [including that] active VF.
295 *
296 * @param p_hwfn
297 * @param rel_vf_id
298 *
299 * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
300 */
301u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
302
303void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
304 int vfid, u16 vxlan_port, u16 geneve_port);
305
306/**
307 * @brief Read sriov related information and allocated resources
308 * reads from configuraiton space, shmem, etc.
309 *
310 * @param p_hwfn
311 *
312 * @return int
313 */
314int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
315
316/**
317 * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
318 *
319 * @param p_hwfn
320 * @param p_iov
321 * @param type
322 * @param length
323 *
324 * @return pointer to the newly placed tlv
325 */
326void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
327
328/**
329 * @brief list the types and lengths of the tlvs on the buffer
330 *
331 * @param p_hwfn
332 * @param tlvs_list
333 */
334void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
335
336/**
337 * @brief qed_iov_alloc - allocate sriov related resources
338 *
339 * @param p_hwfn
340 *
341 * @return int
342 */
343int qed_iov_alloc(struct qed_hwfn *p_hwfn);
344
345/**
346 * @brief qed_iov_setup - setup sriov related resources
347 *
348 * @param p_hwfn
349 */
350void qed_iov_setup(struct qed_hwfn *p_hwfn);
351
352/**
353 * @brief qed_iov_free - free sriov related resources
354 *
355 * @param p_hwfn
356 */
357void qed_iov_free(struct qed_hwfn *p_hwfn);
358
359/**
360 * @brief free sriov related memory that was allocated during hw_prepare
361 *
362 * @param cdev
363 */
364void qed_iov_free_hw_info(struct qed_dev *cdev);
365
366/**
367 * @brief Mark structs of vfs that have been FLR-ed.
368 *
369 * @param p_hwfn
370 * @param disabled_vfs - bitmask of all VFs on path that were FLRed
371 *
372 * @return true iff one of the PF's vfs got FLRed. false otherwise.
373 */
374bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
375
376/**
377 * @brief Search extended TLVs in request/reply buffer.
378 *
379 * @param p_hwfn
380 * @param p_tlvs_list - Pointer to tlvs list
381 * @param req_type - Type of TLV
382 *
383 * @return pointer to tlv type if found, otherwise returns NULL.
384 */
385void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
386 void *p_tlvs_list, u16 req_type);
387
388void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
389int qed_iov_wq_start(struct qed_dev *cdev);
390
391void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
392void qed_vf_start_iov_wq(struct qed_dev *cdev);
393int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
394void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
395#else
396static inline bool
397qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
398 int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
399{
400 return false;
401}
402
403static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
404 u16 rel_vf_id)
405{
406 return MAX_NUM_VFS;
407}
408
409static inline void
410qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
411 u16 vxlan_port, u16 geneve_port)
412{
413}
414
415static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
416{
417 return 0;
418}
419
420static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
421{
422 return 0;
423}
424
425static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
426{
427}
428
429static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
430{
431}
432
433static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
434{
435}
436
437static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
438 u32 *disabled_vfs)
439{
440 return false;
441}
442
443static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
444{
445}
446
447static inline int qed_iov_wq_start(struct qed_dev *cdev)
448{
449 return 0;
450}
451
452static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
453 enum qed_iov_wq_flag flag)
454{
455}
456
457static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
458{
459}
460
461static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
462{
463 return 0;
464}
465
466static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
467{
468}
469#endif
470
471#define qed_for_each_vf(_p_hwfn, _i) \
472 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
473 _i < MAX_NUM_VFS; \
474 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
475
476#endif