Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
  2/* QLogic qed NIC Driver
  3 * Copyright (c) 2015-2017  QLogic Corporation
  4 * Copyright (c) 2019-2020 Marvell International Ltd.
  5 */
  6
  7#ifndef _QED_CXT_H
  8#define _QED_CXT_H
  9
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/qed/qed_if.h>
 13#include "qed_hsi.h"
 14#include "qed.h"
 15
 16struct qed_cxt_info {
 17	void			*p_cxt;
 18	u32			iid;
 19	enum protocol_type	type;
 20};
 21
 22#define MAX_TID_BLOCKS                  512
 23struct qed_tid_mem {
 24	u32 tid_size;
 25	u32 num_tids_per_block;
 26	u32 waste;
 27	u8 *blocks[MAX_TID_BLOCKS];	/* 4K */
 28};
 29
 30/**
 31 * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
 32 *
 33 * @p_hwfn: HW device data.
 34 * @p_info: In/out.
 35 *
 36 * Return: Int.
 
 
 
 37 */
 38int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
 39			 struct qed_cxt_info *p_info);
 40
 41/**
 42 * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
 43 *
 44 * @p_hwfn: HW device data.
 45 * @p_info: in/out.
 46 *
 47 * Return: int.
 48 */
 49int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
 50			     struct qed_tid_mem *p_info);
 51
 52#define QED_CXT_TCP_ULP_TID_SEG	PROTOCOLID_TCP_ULP
 53#define QED_CXT_ROCE_TID_SEG	PROTOCOLID_ROCE
 54#define QED_CXT_FCOE_TID_SEG	PROTOCOLID_FCOE
 55enum qed_cxt_elem_type {
 56	QED_ELEM_CXT,
 57	QED_ELEM_SRQ,
 58	QED_ELEM_TASK,
 59	QED_ELEM_XRC_SRQ,
 60};
 61
 62u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
 63				enum protocol_type type, u32 *vf_cid);
 64
 65/**
 66 * qed_cxt_set_pf_params(): Set the PF params for cxt init.
 67 *
 68 * @p_hwfn: HW device data.
 69 * @rdma_tasks: Requested maximum.
 70 *
 71 * Return: int.
 
 
 72 */
 73int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
 74
 75/**
 76 * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
 77 *
 78 * @p_hwfn: HW device data.
 79 * @last_line: Last_line.
 80 *
 81 * Return: Int
 82 */
 83int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
 84
 85/**
 86 * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
 87 *
 88 * @p_hwfn: HW device data.
 89 * @used_lines: Used lines.
 90 *
 91 * Return: Int.
 
 92 */
 93u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
 94
 95/**
 96 * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
 97 *
 98 * @p_hwfn: HW device data.
 99 *
100 * Return: Int.
101 */
102int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
103
104/**
105 * qed_cxt_mngr_free() - Context manager free.
106 *
107 * @p_hwfn: HW device data.
108 *
109 * Return: Void.
110 */
111void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
112
113/**
114 * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
115 *
116 * @p_hwfn: HW device data.
117 *
118 * Return: Int.
119 */
120int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
121
122/**
123 * qed_cxt_mngr_setup(): Reset the acquired CIDs.
124 *
125 * @p_hwfn: HW device data.
126 */
127void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
128
129/**
130 * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
 
131 *
132 * @p_hwfn: HW device data.
133 *
134 * Return: Void.
135 */
136void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
137
138/**
139 * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
140 *
141 * @p_hwfn: HW device data.
142 * @p_ptt: P_ptt.
143 *
144 * Return: Void.
 
145 */
146void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
147
148/**
149 * qed_qm_init_pf(): Initailze the QM PF phase, per path.
150 *
151 * @p_hwfn: HW device data.
152 * @p_ptt: P_ptt.
153 * @is_pf_loading: Is pf pending.
154 *
155 * Return: Void.
156 */
157void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
158		    struct qed_ptt *p_ptt, bool is_pf_loading);
159
160/**
161 * qed_qm_reconf(): Reconfigures QM pf on the fly.
162 *
163 * @p_hwfn: HW device data.
164 * @p_ptt: P_ptt.
165 *
166 * Return: Int.
167 */
168int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
169
170#define QED_CXT_PF_CID (0xff)
171
172/**
173 * qed_cxt_release_cid(): Release a cid.
174 *
175 * @p_hwfn: HW device data.
176 * @cid: Cid.
177 *
178 * Return: Void.
 
179 */
180void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
181
182/**
183 * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
184 *
185 * @p_hwfn: HW device data.
186 * @cid: Cid.
187 * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
188 *
189 * Return: Void.
 
 
190 */
191void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
192
193/**
194 * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
195 *
196 * @p_hwfn: HW device data.
197 * @type: Type.
198 * @p_cid: Pointer cid.
199 *
200 * Return: Int.
201 */
202int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
203			enum protocol_type type, u32 *p_cid);
204
205/**
206 * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
207 *                         for a vf-queue.
208 *
209 * @p_hwfn: HW device data.
210 * @type: Type.
211 * @p_cid: Pointer cid.
212 * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
213 *
214 * Return: Int.
215 */
216int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
217			 enum protocol_type type, u32 *p_cid, u8 vfid);
218
219int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
220			      enum qed_cxt_elem_type elem_type, u32 iid);
221u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
222				enum protocol_type type);
223u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
224				enum protocol_type type);
225int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
226
227#define QED_CTX_WORKING_MEM 0
228#define QED_CTX_FL_MEM 1
229int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
230			 u32 tid, u8 ctx_type, void **task_ctx);
231
232/* Max number of connection types in HW (DQ/CDU etc.) */
233#define MAX_CONN_TYPES          PROTOCOLID_COMMON
234#define NUM_TASK_TYPES          2
235#define NUM_TASK_PF_SEGMENTS    4
236#define NUM_TASK_VF_SEGMENTS    1
237
238/* PF per protocl configuration object */
239#define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
240#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
241
242struct qed_tid_seg {
243	u32 count;
244	u8 type;
245	bool has_fl_mem;
246};
247
248struct qed_conn_type_cfg {
249	u32 cid_count;
250	u32 cids_per_vf;
251	struct qed_tid_seg tid_seg[TASK_SEGMENTS];
252};
253
254/* ILT Client configuration,
255 * Per connection type (protocol) resources (cids, tis, vf cids etc.)
256 * 1 - for connection context (CDUC) and for each task context we need two
257 * values, for regular task context and for force load memory
258 */
259#define ILT_CLI_PF_BLOCKS       (1 + NUM_TASK_PF_SEGMENTS * 2)
260#define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
261#define CDUC_BLK                (0)
262#define SRQ_BLK                 (0)
263#define CDUT_SEG_BLK(n)         (1 + (u8)(n))
264#define CDUT_FL_SEG_BLK(n, X)   (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
265
266struct ilt_cfg_pair {
267	u32 reg;
268	u32 val;
269};
270
271struct qed_ilt_cli_blk {
272	u32 total_size;		/* 0 means not active */
273	u32 real_size_in_page;
274	u32 start_line;
275	u32 dynamic_line_offset;
276	u32 dynamic_line_cnt;
277};
278
279struct qed_ilt_client_cfg {
280	bool active;
281
282	/* ILT boundaries */
283	struct ilt_cfg_pair first;
284	struct ilt_cfg_pair last;
285	struct ilt_cfg_pair p_size;
286
287	/* ILT client blocks for PF */
288	struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
289	u32 pf_total_lines;
290
291	/* ILT client blocks for VFs */
292	struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
293	u32 vf_total_lines;
294};
295
296struct qed_cid_acquired_map {
297	u32		start_cid;
298	u32		max_count;
299	unsigned long	*cid_map;
300};
301
302struct qed_src_t2 {
303	struct phys_mem_desc *dma_mem;
304	u32 num_pages;
305	u64 first_free;
306	u64 last_free;
307};
308
309struct qed_cxt_mngr {
310	/* Per protocl configuration */
311	struct qed_conn_type_cfg	conn_cfg[MAX_CONN_TYPES];
312
313	/* computed ILT structure */
314	struct qed_ilt_client_cfg	clients[MAX_ILT_CLIENTS];
315
316	/* Task type sizes */
317	u32 task_type_size[NUM_TASK_TYPES];
318
319	/* total number of VFs for this hwfn -
320	 * ALL VFs are symmetric in terms of HW resources
321	 */
322	u32 vf_count;
323	u32 first_vf_in_pf;
324
325	/* Acquired CIDs */
326	struct qed_cid_acquired_map	acquired[MAX_CONN_TYPES];
327
328	struct qed_cid_acquired_map
329	acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
330
331	/* ILT  shadow table */
332	struct phys_mem_desc *ilt_shadow;
333	u32 ilt_shadow_size;
334	u32 pf_start_line;
335
336	/* Mutex for a dynamic ILT allocation */
337	struct mutex mutex;
338
339	/* SRC T2 */
340	struct qed_src_t2 src_t2;
 
 
 
341
342	/* total number of SRQ's for this hwfn */
343	u32 srq_count;
344	u32 xrc_srq_count;
345
346	/* Maximal number of L2 steering filters */
347	u32 arfs_count;
348
349	u16 iscsi_task_pages;
350	u16 fcoe_task_pages;
351	u16 roce_task_pages;
352	u16 eth_task_pages;
353	u16 task_ctx_size;
354	u16 conn_ctx_size;
355};
356
357u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
358u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
359u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
360u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
361
362u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
363			      enum ilt_clients ilt_client);
364
365u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
366
367#endif
v5.9
  1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
  2/* QLogic qed NIC Driver
  3 * Copyright (c) 2015-2017  QLogic Corporation
  4 * Copyright (c) 2019-2020 Marvell International Ltd.
  5 */
  6
  7#ifndef _QED_CXT_H
  8#define _QED_CXT_H
  9
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/qed/qed_if.h>
 13#include "qed_hsi.h"
 14#include "qed.h"
 15
 16struct qed_cxt_info {
 17	void			*p_cxt;
 18	u32			iid;
 19	enum protocol_type	type;
 20};
 21
 22#define MAX_TID_BLOCKS                  512
 23struct qed_tid_mem {
 24	u32 tid_size;
 25	u32 num_tids_per_block;
 26	u32 waste;
 27	u8 *blocks[MAX_TID_BLOCKS];	/* 4K */
 28};
 29
 30/**
 31 * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
 32 *
 
 
 33 *
 34 * @param p_hwfn
 35 * @param p_info in/out
 36 *
 37 * @return int
 38 */
 39int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
 40			 struct qed_cxt_info *p_info);
 41
 42/**
 43 * @brief qed_cxt_get_tid_mem_info
 44 *
 45 * @param p_hwfn
 46 * @param p_info
 47 *
 48 * @return int
 49 */
 50int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
 51			     struct qed_tid_mem *p_info);
 52
 53#define QED_CXT_ISCSI_TID_SEG	PROTOCOLID_ISCSI
 54#define QED_CXT_ROCE_TID_SEG	PROTOCOLID_ROCE
 55#define QED_CXT_FCOE_TID_SEG	PROTOCOLID_FCOE
 56enum qed_cxt_elem_type {
 57	QED_ELEM_CXT,
 58	QED_ELEM_SRQ,
 59	QED_ELEM_TASK,
 60	QED_ELEM_XRC_SRQ,
 61};
 62
 63u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
 64				enum protocol_type type, u32 *vf_cid);
 65
 66/**
 67 * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
 
 
 
 68 *
 69 * @param p_hwfn
 70 * @param rdma_tasks - requested maximum
 71 * @return int
 72 */
 73int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
 74
 75/**
 76 * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
 77 *
 78 * @param p_hwfn
 79 * @param last_line
 80 *
 81 * @return int
 82 */
 83int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
 84
 85/**
 86 * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
 
 
 
 87 *
 88 * @param p_hwfn
 89 * @param used_lines
 90 */
 91u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
 92
 93/**
 94 * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
 95 *
 96 * @param p_hwfn
 97 *
 98 * @return int
 99 */
100int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
101
102/**
103 * @brief qed_cxt_mngr_free
104 *
105 * @param p_hwfn
 
 
106 */
107void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
108
109/**
110 * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
111 *
112 * @param p_hwfn
113 *
114 * @return int
115 */
116int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
117
118/**
119 * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
120 *
121 * @param p_hwfn
122 */
123void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
124
125/**
126 * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
127 *
128 *
 
129 *
130 * @param p_hwfn
131 */
132void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
133
134/**
135 * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
 
 
 
136 *
137 * @param p_hwfn
138 * @param p_ptt
139 */
140void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
141
142/**
143 * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
144 *
145 * @param p_hwfn
146 * @param p_ptt
147 * @param is_pf_loading
 
 
148 */
149void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
150		    struct qed_ptt *p_ptt, bool is_pf_loading);
151
152/**
153 * @brief Reconfigures QM pf on the fly
154 *
155 * @param p_hwfn
156 * @param p_ptt
157 *
158 * @return int
159 */
160int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
161
162#define QED_CXT_PF_CID (0xff)
163
164/**
165 * @brief qed_cxt_release - Release a cid
 
 
 
166 *
167 * @param p_hwfn
168 * @param cid
169 */
170void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
171
172/**
173 * @brief qed_cxt_release - Release a cid belonging to a vf-queue
 
 
 
 
174 *
175 * @param p_hwfn
176 * @param cid
177 * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
178 */
179void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
180
181/**
182 * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
183 *
184 * @param p_hwfn
185 * @param type
186 * @param p_cid
187 *
188 * @return int
189 */
190int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
191			enum protocol_type type, u32 *p_cid);
192
193/**
194 * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
195 *                           for a vf-queue
196 *
197 * @param p_hwfn
198 * @param type
199 * @param p_cid
200 * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
201 *
202 * @return int
203 */
204int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
205			 enum protocol_type type, u32 *p_cid, u8 vfid);
206
207int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
208			      enum qed_cxt_elem_type elem_type, u32 iid);
209u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
210				enum protocol_type type);
211u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
212				enum protocol_type type);
213int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
214
215#define QED_CTX_WORKING_MEM 0
216#define QED_CTX_FL_MEM 1
217int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
218			 u32 tid, u8 ctx_type, void **task_ctx);
219
220/* Max number of connection types in HW (DQ/CDU etc.) */
221#define MAX_CONN_TYPES          PROTOCOLID_COMMON
222#define NUM_TASK_TYPES          2
223#define NUM_TASK_PF_SEGMENTS    4
224#define NUM_TASK_VF_SEGMENTS    1
225
226/* PF per protocl configuration object */
227#define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
228#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
229
230struct qed_tid_seg {
231	u32 count;
232	u8 type;
233	bool has_fl_mem;
234};
235
236struct qed_conn_type_cfg {
237	u32 cid_count;
238	u32 cids_per_vf;
239	struct qed_tid_seg tid_seg[TASK_SEGMENTS];
240};
241
242/* ILT Client configuration,
243 * Per connection type (protocol) resources (cids, tis, vf cids etc.)
244 * 1 - for connection context (CDUC) and for each task context we need two
245 * values, for regular task context and for force load memory
246 */
247#define ILT_CLI_PF_BLOCKS       (1 + NUM_TASK_PF_SEGMENTS * 2)
248#define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
249#define CDUC_BLK                (0)
250#define SRQ_BLK                 (0)
251#define CDUT_SEG_BLK(n)         (1 + (u8)(n))
252#define CDUT_FL_SEG_BLK(n, X)   (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
253
254struct ilt_cfg_pair {
255	u32 reg;
256	u32 val;
257};
258
259struct qed_ilt_cli_blk {
260	u32 total_size;		/* 0 means not active */
261	u32 real_size_in_page;
262	u32 start_line;
263	u32 dynamic_line_offset;
264	u32 dynamic_line_cnt;
265};
266
267struct qed_ilt_client_cfg {
268	bool active;
269
270	/* ILT boundaries */
271	struct ilt_cfg_pair first;
272	struct ilt_cfg_pair last;
273	struct ilt_cfg_pair p_size;
274
275	/* ILT client blocks for PF */
276	struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
277	u32 pf_total_lines;
278
279	/* ILT client blocks for VFs */
280	struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
281	u32 vf_total_lines;
282};
283
284struct qed_cid_acquired_map {
285	u32		start_cid;
286	u32		max_count;
287	unsigned long	*cid_map;
288};
289
290struct qed_src_t2 {
291	struct phys_mem_desc *dma_mem;
292	u32 num_pages;
293	u64 first_free;
294	u64 last_free;
295};
296
297struct qed_cxt_mngr {
298	/* Per protocl configuration */
299	struct qed_conn_type_cfg	conn_cfg[MAX_CONN_TYPES];
300
301	/* computed ILT structure */
302	struct qed_ilt_client_cfg	clients[MAX_ILT_CLIENTS];
303
304	/* Task type sizes */
305	u32 task_type_size[NUM_TASK_TYPES];
306
307	/* total number of VFs for this hwfn -
308	 * ALL VFs are symmetric in terms of HW resources
309	 */
310	u32 vf_count;
311	u32 first_vf_in_pf;
312
313	/* Acquired CIDs */
314	struct qed_cid_acquired_map	acquired[MAX_CONN_TYPES];
315
316	struct qed_cid_acquired_map
317	acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
318
319	/* ILT  shadow table */
320	struct phys_mem_desc *ilt_shadow;
321	u32 ilt_shadow_size;
322	u32 pf_start_line;
323
324	/* Mutex for a dynamic ILT allocation */
325	struct mutex mutex;
326
327	/* SRC T2 */
328	struct qed_src_t2 src_t2;
329	u32 t2_num_pages;
330	u64 first_free;
331	u64 last_free;
332
333	/* total number of SRQ's for this hwfn */
334	u32 srq_count;
335	u32 xrc_srq_count;
336
337	/* Maximal number of L2 steering filters */
338	u32 arfs_count;
339
340	u8 task_type_id;
 
 
 
341	u16 task_ctx_size;
342	u16 conn_ctx_size;
343};
344
345u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
346u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
347u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
348u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
349
350u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
351			      enum ilt_clients ilt_client);
352
353u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
354
355#endif