Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* QLogic qed NIC Driver
  2 * Copyright (c) 2015 QLogic Corporation
  3 *
  4 * This software is available under the terms of the GNU General Public License
  5 * (GPL) Version 2, available from the file COPYING in the main directory of
  6 * this source tree.
  7 */
  8
  9#include <linux/types.h>
 10#include <linux/delay.h>
 11#include <linux/kernel.h>
 12#include <linux/slab.h>
 13#include <linux/string.h>
 14#include "qed_hsi.h"
 15#include "qed_hw.h"
 16#include "qed_init_ops.h"
 17#include "qed_reg_addr.h"
 18
 19enum cminterface {
 20	MCM_SEC,
 21	MCM_PRI,
 22	UCM_SEC,
 23	UCM_PRI,
 24	TCM_SEC,
 25	TCM_PRI,
 26	YCM_SEC,
 27	YCM_PRI,
 28	XCM_SEC,
 29	XCM_PRI,
 30	NUM_OF_CM_INTERFACES
 31};
 32
 33/* general constants */
 34#define QM_PQ_MEM_4KB(pq_size)	(pq_size ? DIV_ROUND_UP((pq_size + 1) *	\
 35							QM_PQ_ELEMENT_SIZE, \
 36							0x1000) : 0)
 37#define QM_PQ_SIZE_256B(pq_size)	(pq_size ? DIV_ROUND_UP(pq_size, \
 38								0x100) - 1 : 0)
 39#define QM_INVALID_PQ_ID                        0xffff
 40/* feature enable */
 41#define QM_BYPASS_EN                            1
 42#define QM_BYTE_CRD_EN                          1
 43/* other PQ constants */
 44#define QM_OTHER_PQS_PER_PF                     4
 45/* WFQ constants */
 46#define QM_WFQ_UPPER_BOUND		62500000
 47#define QM_WFQ_VP_PQ_VOQ_SHIFT          0
 48#define QM_WFQ_VP_PQ_PF_SHIFT           5
 49#define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
 50#define QM_WFQ_MAX_INC_VAL                      43750000
 51
 52/* RL constants */
 53#define QM_RL_UPPER_BOUND                       62500000
 54#define QM_RL_PERIOD                            5               /* in us */
 55#define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
 56#define QM_RL_MAX_INC_VAL                       43750000
 57#define QM_RL_INC_VAL(rate)		max_t(u32,	\
 58					      (u32)(((rate ? rate : \
 59						      1000000) *    \
 60						     QM_RL_PERIOD * \
 61						     101) / (8 * 100)), 1)
 62/* AFullOprtnstcCrdMask constants */
 63#define QM_OPPOR_LINE_VOQ_DEF           1
 64#define QM_OPPOR_FW_STOP_DEF            0
 65#define QM_OPPOR_PQ_EMPTY_DEF           1
 66/* Command Queue constants */
 67#define PBF_CMDQ_PURE_LB_LINES                          150
 68#define PBF_CMDQ_LINES_RT_OFFSET(voq)           (		 \
 69		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
 70		(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET -	 \
 71		 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
 72#define PBF_BTB_GUARANTEED_RT_OFFSET(voq)       (	      \
 73		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
 74		(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET -      \
 75		 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
 76#define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - \
 77						   4) *		     \
 78						  2) | QM_LINE_CRD_REG_SIGN_BIT)
 79/* BTB: blocks constants (block size = 256B) */
 80#define BTB_JUMBO_PKT_BLOCKS            38
 81#define BTB_HEADROOM_BLOCKS                     BTB_JUMBO_PKT_BLOCKS
 82#define BTB_PURE_LB_FACTOR                      10
 83#define BTB_PURE_LB_RATIO                       7
 84/* QM stop command constants */
 85#define QM_STOP_PQ_MASK_WIDTH                   32
 86#define QM_STOP_CMD_ADDR                                0x2
 87#define QM_STOP_CMD_STRUCT_SIZE                 2
 88#define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
 89#define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
 90#define QM_STOP_CMD_PAUSE_MASK_MASK             -1
 91#define QM_STOP_CMD_GROUP_ID_OFFSET             1
 92#define QM_STOP_CMD_GROUP_ID_SHIFT              16
 93#define QM_STOP_CMD_GROUP_ID_MASK               15
 94#define QM_STOP_CMD_PQ_TYPE_OFFSET              1
 95#define QM_STOP_CMD_PQ_TYPE_SHIFT               24
 96#define QM_STOP_CMD_PQ_TYPE_MASK                1
 97#define QM_STOP_CMD_MAX_POLL_COUNT              100
 98#define QM_STOP_CMD_POLL_PERIOD_US              500
 99/* QM command macros */
100#define QM_CMD_STRUCT_SIZE(cmd)			cmd ## \
101	_STRUCT_SIZE
102#define QM_CMD_SET_FIELD(var, cmd, field,				  \
103			 value)        SET_FIELD(var[cmd ## _ ## field ## \
104						     _OFFSET],		  \
105						 cmd ## _ ## field,	  \
106						 value)
107/* QM: VOQ macros */
108#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) *	\
109						   (max_phys_tcs_per_port) + \
110						   (tc))
111#define LB_VOQ(port)				( \
112		MAX_PHYS_VOQS + (port))
113#define VOQ(port, tc, max_phy_tcs_pr_port)	\
114	((tc) <		\
115	 LB_TC ? PHYS_VOQ(port,		\
116			  tc,			 \
117			  max_phy_tcs_pr_port) \
118		: LB_VOQ(port))
119/******************** INTERNAL IMPLEMENTATION *********************/
120/* Prepare PF RL enable/disable runtime init values */
121static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
122{
123	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
124	if (pf_rl_en) {
125		/* enable RLs for all VOQs */
126		STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
127			     (1 << MAX_NUM_VOQS) - 1);
128		/* write RL period */
129		STORE_RT_REG(p_hwfn,
130			     QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
131		STORE_RT_REG(p_hwfn,
132			     QM_REG_RLPFPERIODTIMER_RT_OFFSET,
133			     QM_RL_PERIOD_CLK_25M);
134		/* set credit threshold for QM bypass flow */
135		if (QM_BYPASS_EN)
136			STORE_RT_REG(p_hwfn,
137				     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
138				     QM_RL_UPPER_BOUND);
139	}
140}
141
142/* Prepare PF WFQ enable/disable runtime init values */
143static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
144{
145	STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
146	/* set credit threshold for QM bypass flow */
147	if (pf_wfq_en && QM_BYPASS_EN)
148		STORE_RT_REG(p_hwfn,
149			     QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
150			     QM_WFQ_UPPER_BOUND);
151}
152
153/* Prepare VPORT RL enable/disable runtime init values */
154static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
155{
156	STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
157		     vport_rl_en ? 1 : 0);
158	if (vport_rl_en) {
159		/* write RL period (use timer 0 only) */
160		STORE_RT_REG(p_hwfn,
161			     QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
162			     QM_RL_PERIOD_CLK_25M);
163		STORE_RT_REG(p_hwfn,
164			     QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
165			     QM_RL_PERIOD_CLK_25M);
166		/* set credit threshold for QM bypass flow */
167		if (QM_BYPASS_EN)
168			STORE_RT_REG(p_hwfn,
169				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
170				     QM_RL_UPPER_BOUND);
171	}
172}
173
174/* Prepare VPORT WFQ enable/disable runtime init values */
175static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
176{
177	STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
178		     vport_wfq_en ? 1 : 0);
179	/* set credit threshold for QM bypass flow */
180	if (vport_wfq_en && QM_BYPASS_EN)
181		STORE_RT_REG(p_hwfn,
182			     QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
183			     QM_WFQ_UPPER_BOUND);
184}
185
186/* Prepare runtime init values to allocate PBF command queue lines for
187 * the specified VOQ
188 */
189static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
190				       u8 voq, u16 cmdq_lines)
191{
192	u32 qm_line_crd;
193
194	/* In A0 - Limit the size of pbf queue so that only 511 commands with
195	 * the minimum size of 4 (FCoE minimum size)
196	 */
197	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
198
199	if (is_bb_a0)
200		cmdq_lines = min_t(u32, cmdq_lines, 1022);
201	qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
202	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
203			 (u32)cmdq_lines);
204	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
205	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
206		     qm_line_crd);
207}
208
209/* Prepare runtime init values to allocate PBF command queue lines. */
210static void qed_cmdq_lines_rt_init(
211	struct qed_hwfn *p_hwfn,
212	u8 max_ports_per_engine,
213	u8 max_phys_tcs_per_port,
214	struct init_qm_port_params port_params[MAX_NUM_PORTS])
215{
216	u8 tc, voq, port_id, num_tcs_in_port;
217
218	/* clear PBF lines for all VOQs */
219	for (voq = 0; voq < MAX_NUM_VOQS; voq++)
220		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
221	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
222		if (port_params[port_id].active) {
223			u16 phys_lines, phys_lines_per_tc;
224
225			/* find #lines to divide between active phys TCs */
226			phys_lines = port_params[port_id].num_pbf_cmd_lines -
227				     PBF_CMDQ_PURE_LB_LINES;
228			/* find #lines per active physical TC */
229			num_tcs_in_port = 0;
230			for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
231				if (((port_params[port_id].active_phys_tcs >>
232				      tc) & 0x1) == 1)
233					num_tcs_in_port++;
234			}
235
236			phys_lines_per_tc = phys_lines / num_tcs_in_port;
237			/* init registers per active TC */
238			for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
239				if (((port_params[port_id].active_phys_tcs >>
240				      tc) & 0x1) != 1)
241					continue;
242
243				voq = PHYS_VOQ(port_id, tc,
244					       max_phys_tcs_per_port);
245				qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
246							   phys_lines_per_tc);
247			}
248
249			/* init registers for pure LB TC */
250			qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
251						   PBF_CMDQ_PURE_LB_LINES);
252		}
253	}
254}
255
256static void qed_btb_blocks_rt_init(
257	struct qed_hwfn *p_hwfn,
258	u8 max_ports_per_engine,
259	u8 max_phys_tcs_per_port,
260	struct init_qm_port_params port_params[MAX_NUM_PORTS])
261{
262	u32 usable_blocks, pure_lb_blocks, phys_blocks;
263	u8 tc, voq, port_id, num_tcs_in_port;
264
265	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
266		u32 temp;
267
268		if (!port_params[port_id].active)
269			continue;
270
271		/* subtract headroom blocks */
272		usable_blocks = port_params[port_id].num_btb_blocks -
273				BTB_HEADROOM_BLOCKS;
274
275		/* find blocks per physical TC */
276		num_tcs_in_port = 0;
277		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
278			if (((port_params[port_id].active_phys_tcs >>
279			      tc) & 0x1) == 1)
280				num_tcs_in_port++;
281		}
282
283		pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
284				 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
285				  BTB_PURE_LB_RATIO);
286		pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
287				       pure_lb_blocks / BTB_PURE_LB_FACTOR);
288		phys_blocks = (usable_blocks - pure_lb_blocks) /
289			      num_tcs_in_port;
290
291		/* init physical TCs */
292		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
293			if (((port_params[port_id].active_phys_tcs >>
294			      tc) & 0x1) != 1)
295				continue;
296
297			voq = PHYS_VOQ(port_id, tc,
298				       max_phys_tcs_per_port);
299			STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
300				     phys_blocks);
301		}
302
303		/* init pure LB TC */
304		temp = LB_VOQ(port_id);
305		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
306			     pure_lb_blocks);
307	}
308}
309
310/* Prepare Tx PQ mapping runtime init values for the specified PF */
311static void qed_tx_pq_map_rt_init(
312	struct qed_hwfn *p_hwfn,
313	struct qed_ptt *p_ptt,
314	struct qed_qm_pf_rt_init_params *p_params,
315	u32 base_mem_addr_4kb)
316{
317	struct init_qm_vport_params *vport_params = p_params->vport_params;
318	u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
319	u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
320	u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
321			    QM_PF_QUEUE_GROUP_SIZE;
322	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
323	u16 i, pq_id, pq_group;
324
325	/* a bit per Tx PQ indicating if the PQ is associated with a VF */
326	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
327	u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
328	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
329	u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
330	u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
331	u32 mem_addr_4kb = base_mem_addr_4kb;
332
333	/* set mapping from PQ group to PF */
334	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
335		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
336			     (u32)(p_params->pf_id));
337	/* set PQ sizes */
338	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
339		     QM_PQ_SIZE_256B(p_params->num_pf_cids));
340	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
341		     QM_PQ_SIZE_256B(p_params->num_vf_cids));
342
343	/* go over all Tx PQs */
344	for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
345		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
346			     p_params->max_phys_tcs_per_port);
347		bool is_vf_pq = (i >= p_params->num_pf_pqs);
348		struct qm_rf_pq_map tx_pq_map;
349
350		/* update first Tx PQ of VPORT/TC */
351		u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
352				    p_params->start_vport;
353		u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
354		u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
355
356		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
357			/* create new VP PQ */
358			pq_ids[p_params->pq_params[i].tc_id] = pq_id;
359			first_tx_pq_id = pq_id;
360			/* map VP PQ to VOQ and PF */
361			STORE_RT_REG(p_hwfn,
362				     QM_REG_WFQVPMAP_RT_OFFSET +
363				     first_tx_pq_id,
364				     (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
365				     (p_params->pf_id <<
366				      QM_WFQ_VP_PQ_PF_SHIFT));
367		}
368		/* fill PQ map entry */
369		memset(&tx_pq_map, 0, sizeof(tx_pq_map));
370		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
371		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
372			  p_params->pq_params[i].rl_valid ? 1 : 0);
373		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
374		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
375			  p_params->pq_params[i].rl_valid ?
376			  p_params->pq_params[i].vport_id : 0);
377		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
378		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
379			  p_params->pq_params[i].wrr_group);
380		/* write PQ map entry to CAM */
381		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
382			     *((u32 *)&tx_pq_map));
383		/* set base address */
384		STORE_RT_REG(p_hwfn,
385			     QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
386			     mem_addr_4kb);
387		/* check if VF PQ */
388		if (is_vf_pq) {
389			/* if PQ is associated with a VF, add indication
390			 * to PQ VF mask
391			 */
392			tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
393				(1 << (pq_id % tx_pq_vf_mask_width));
394			mem_addr_4kb += vport_pq_mem_4kb;
395		} else {
396			mem_addr_4kb += pq_mem_4kb;
397		}
398	}
399
400	/* store Tx PQ VF mask to size select register */
401	for (i = 0; i < num_tx_pq_vf_masks; i++) {
402		if (tx_pq_vf_mask[i]) {
403			u32 addr;
404
405			addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
406			STORE_RT_REG(p_hwfn, addr,
407				     tx_pq_vf_mask[i]);
408		}
409	}
410}
411
412/* Prepare Other PQ mapping runtime init values for the specified PF */
413static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
414				     u8 port_id,
415				     u8 pf_id,
416				     u32 num_pf_cids,
417				     u32 num_tids, u32 base_mem_addr_4kb)
418{
419	u16 i, pq_id;
420
421	/* a single other PQ group is used in each PF,
422	 * where PQ group i is used in PF i.
423	 */
424	u16 pq_group = pf_id;
425	u32 pq_size = num_pf_cids + num_tids;
426	u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
427	u32 mem_addr_4kb = base_mem_addr_4kb;
428
429	/* map PQ group to PF */
430	STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
431		     (u32)(pf_id));
432	/* set PQ sizes */
433	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
434		     QM_PQ_SIZE_256B(pq_size));
435	/* set base address */
436	for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
437	     i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
438		STORE_RT_REG(p_hwfn,
439			     QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
440			     mem_addr_4kb);
441		mem_addr_4kb += pq_mem_4kb;
442	}
443}
444
445/* Prepare PF WFQ runtime init values for the specified PF.
446 * Return -1 on error.
447 */
448static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
449			      struct qed_qm_pf_rt_init_params *p_params)
450{
451	u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
452	u32 crd_reg_offset;
453	u32 inc_val;
454	u16 i;
455
456	if (p_params->pf_id < MAX_NUM_PFS_BB)
457		crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
458	else
459		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
460				 (p_params->pf_id % MAX_NUM_PFS_BB);
461
462	inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
463	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
464		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
465		return -1;
466	}
467
468	for (i = 0; i < num_tx_pqs; i++) {
469		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
470			     p_params->max_phys_tcs_per_port);
471
472		OVERWRITE_RT_REG(p_hwfn,
473				 crd_reg_offset + voq * MAX_NUM_PFS_BB,
474				 QM_WFQ_CRD_REG_SIGN_BIT);
475	}
476
477	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
478		     inc_val);
479	STORE_RT_REG(p_hwfn,
480		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
481		     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
482	return 0;
483}
484
485/* Prepare PF RL runtime init values for the specified PF.
486 * Return -1 on error.
487 */
488static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
489{
490	u32 inc_val = QM_RL_INC_VAL(pf_rl);
491
492	if (inc_val > QM_RL_MAX_INC_VAL) {
493		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
494		return -1;
495	}
496	STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
497		     QM_RL_CRD_REG_SIGN_BIT);
498	STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
499		     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
500	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
501	return 0;
502}
503
504/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
505 * Return -1 on error.
506 */
507static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
508			      u8 num_vports,
509			      struct init_qm_vport_params *vport_params)
510{
511	u32 inc_val;
512	u8 tc, i;
513
514	/* go over all PF VPORTs */
515	for (i = 0; i < num_vports; i++) {
516
517		if (!vport_params[i].vport_wfq)
518			continue;
519
520		inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
521		if (inc_val > QM_WFQ_MAX_INC_VAL) {
522			DP_NOTICE(p_hwfn,
523				  "Invalid VPORT WFQ weight configuration");
524			return -1;
525		}
526
527		/* each VPORT can have several VPORT PQ IDs for
528		 * different TCs
529		 */
530		for (tc = 0; tc < NUM_OF_TCS; tc++) {
531			u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
532
533			if (vport_pq_id != QM_INVALID_PQ_ID) {
534				STORE_RT_REG(p_hwfn,
535					     QM_REG_WFQVPCRD_RT_OFFSET +
536					     vport_pq_id,
537					     QM_WFQ_CRD_REG_SIGN_BIT);
538				STORE_RT_REG(p_hwfn,
539					     QM_REG_WFQVPWEIGHT_RT_OFFSET +
540					     vport_pq_id, inc_val);
541			}
542		}
543	}
544
545	return 0;
546}
547
548static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
549				u8 start_vport,
550				u8 num_vports,
551				struct init_qm_vport_params *vport_params)
552{
553	u8 i, vport_id;
554
555	/* go over all PF VPORTs */
556	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
557		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
558
559		if (inc_val > QM_RL_MAX_INC_VAL) {
560			DP_NOTICE(p_hwfn,
561				  "Invalid VPORT rate-limit configuration");
562			return -1;
563		}
564
565		STORE_RT_REG(p_hwfn,
566			     QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
567			     QM_RL_CRD_REG_SIGN_BIT);
568		STORE_RT_REG(p_hwfn,
569			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
570			     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
571		STORE_RT_REG(p_hwfn,
572			     QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
573			     inc_val);
574	}
575
576	return 0;
577}
578
579static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
580				     struct qed_ptt *p_ptt)
581{
582	u32 reg_val, i;
583
584	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
585	     i++) {
586		udelay(QM_STOP_CMD_POLL_PERIOD_US);
587		reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
588	}
589
590	/* check if timeout while waiting for SDM command ready */
591	if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
592		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
593			   "Timeout when waiting for QM SDM command ready signal\n");
594		return false;
595	}
596
597	return true;
598}
599
600static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
601			    struct qed_ptt *p_ptt,
602			    u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
603{
604	if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
605		return false;
606
607	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
608	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
609	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
610	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
611	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
612
613	return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
614}
615
616/******************** INTERFACE IMPLEMENTATION *********************/
617u32 qed_qm_pf_mem_size(u8 pf_id,
618		       u32 num_pf_cids,
619		       u32 num_vf_cids,
620		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
621{
622	return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
623	       QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
624	       QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
625}
626
627int qed_qm_common_rt_init(
628	struct qed_hwfn *p_hwfn,
629	struct qed_qm_common_rt_init_params *p_params)
630{
631	/* init AFullOprtnstcCrdMask */
632	u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
633		    QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
634		   (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
635		   (p_params->pf_wfq_en <<
636		    QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
637		   (p_params->vport_wfq_en <<
638		    QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
639		   (p_params->pf_rl_en <<
640		    QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
641		   (p_params->vport_rl_en <<
642		    QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
643		   (QM_OPPOR_FW_STOP_DEF <<
644		    QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
645		   (QM_OPPOR_PQ_EMPTY_DEF <<
646		    QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
647
648	STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
649	qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
650	qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
651	qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
652	qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
653	qed_cmdq_lines_rt_init(p_hwfn,
654			       p_params->max_ports_per_engine,
655			       p_params->max_phys_tcs_per_port,
656			       p_params->port_params);
657	qed_btb_blocks_rt_init(p_hwfn,
658			       p_params->max_ports_per_engine,
659			       p_params->max_phys_tcs_per_port,
660			       p_params->port_params);
661	return 0;
662}
663
664int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
665		      struct qed_ptt *p_ptt,
666		      struct qed_qm_pf_rt_init_params *p_params)
667{
668	struct init_qm_vport_params *vport_params = p_params->vport_params;
669	u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
670					       p_params->num_tids) *
671				 QM_OTHER_PQS_PER_PF;
672	u8 tc, i;
673
674	/* clear first Tx PQ ID array for each VPORT */
675	for (i = 0; i < p_params->num_vports; i++)
676		for (tc = 0; tc < NUM_OF_TCS; tc++)
677			vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
678
679	/* map Other PQs (if any) */
680	qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
681				 p_params->num_pf_cids, p_params->num_tids, 0);
682
683	/* map Tx PQs */
684	qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
685
686	if (p_params->pf_wfq)
687		if (qed_pf_wfq_rt_init(p_hwfn, p_params))
688			return -1;
689
690	if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
691		return -1;
692
693	if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
694		return -1;
695
696	if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
697				 p_params->num_vports, vport_params))
698		return -1;
699
700	return 0;
701}
702
703int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
704		    struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
705{
706	u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
707
708	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
709		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
710		return -1;
711	}
712
713	qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
714	return 0;
715}
716
717int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
718		   struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
719{
720	u32 inc_val = QM_RL_INC_VAL(pf_rl);
721
722	if (inc_val > QM_RL_MAX_INC_VAL) {
723		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
724		return -1;
725	}
726
727	qed_wr(p_hwfn, p_ptt,
728	       QM_REG_RLPFCRD + pf_id * 4,
729	       QM_RL_CRD_REG_SIGN_BIT);
730	qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
731
732	return 0;
733}
734
735int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
736		       struct qed_ptt *p_ptt,
737		       u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
738{
739	u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
740	u8 tc;
741
742	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
743		DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
744		return -1;
745	}
746
747	for (tc = 0; tc < NUM_OF_TCS; tc++) {
748		u16 vport_pq_id = first_tx_pq_id[tc];
749
750		if (vport_pq_id != QM_INVALID_PQ_ID)
751			qed_wr(p_hwfn, p_ptt,
752			       QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
753			       inc_val);
754	}
755
756	return 0;
757}
758
759int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
760		      struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
761{
762	u32 inc_val = QM_RL_INC_VAL(vport_rl);
763
764	if (inc_val > QM_RL_MAX_INC_VAL) {
765		DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
766		return -1;
767	}
768
769	qed_wr(p_hwfn, p_ptt,
770	       QM_REG_RLGLBLCRD + vport_id * 4,
771	       QM_RL_CRD_REG_SIGN_BIT);
772	qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
773
774	return 0;
775}
776
777bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
778			  struct qed_ptt *p_ptt,
779			  bool is_release_cmd,
780			  bool is_tx_pq, u16 start_pq, u16 num_pqs)
781{
782	u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
783	u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
784
785	/* set command's PQ type */
786	QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
787
788	for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
789		/* set PQ bit in mask (stop command only) */
790		if (!is_release_cmd)
791			pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
792
793		/* if last PQ or end of PQ mask, write command */
794		if ((pq_id == last_pq) ||
795		    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
796		     (QM_STOP_PQ_MASK_WIDTH - 1))) {
797			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
798					 PAUSE_MASK, pq_mask);
799			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
800					 GROUP_ID,
801					 pq_id / QM_STOP_PQ_MASK_WIDTH);
802			if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
803					     cmd_arr[0], cmd_arr[1]))
804				return false;
805			pq_mask = 0;
806		}
807	}
808
809	return true;
810}
811
812static void
813qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
814{
815	if (enable)
816		set_bit(bit, var);
817	else
818		clear_bit(bit, var);
819}
820
821#define PRS_ETH_TUNN_FIC_FORMAT	-188897008
822
823void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
824			     struct qed_ptt *p_ptt, u16 dest_port)
825{
826	qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
827	qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
828	qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
829}
830
831void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
832			  struct qed_ptt *p_ptt, bool vxlan_enable)
833{
834	unsigned long reg_val = 0;
835	u8 shift;
836
837	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
838	shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
839	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
840
841	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
842
843	if (reg_val)
844		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
845		       PRS_ETH_TUNN_FIC_FORMAT);
846
847	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
848	shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
849	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
850
851	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
852
853	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
854	       vxlan_enable ? 1 : 0);
855}
856
857void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
858			bool eth_gre_enable, bool ip_gre_enable)
859{
860	unsigned long reg_val = 0;
861	u8 shift;
862
863	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
864	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
865	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
866
867	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
868	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
869	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
870	if (reg_val)
871		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
872		       PRS_ETH_TUNN_FIC_FORMAT);
873
874	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
875	shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
876	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
877
878	shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
879	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
880	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
881
882	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
883	       eth_gre_enable ? 1 : 0);
884	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
885	       ip_gre_enable ? 1 : 0);
886}
887
888void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
889			      struct qed_ptt *p_ptt, u16 dest_port)
890{
891	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
892	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
893	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
894}
895
896void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
897			   struct qed_ptt *p_ptt,
898			   bool eth_geneve_enable, bool ip_geneve_enable)
899{
900	unsigned long reg_val = 0;
901	u8 shift;
902
903	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
904	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
905	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
906
907	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
908	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
909
910	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
911	if (reg_val)
912		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
913		       PRS_ETH_TUNN_FIC_FORMAT);
914
915	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
916	       eth_geneve_enable ? 1 : 0);
917	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
918
919	/* comp ver */
920	reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
921	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
922	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
923	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
924
925	/* EDPM with geneve tunnel not supported in BB_B0 */
926	if (QED_IS_BB_B0(p_hwfn->cdev))
927		return;
928
929	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
930	       eth_geneve_enable ? 1 : 0);
931	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
932	       ip_geneve_enable ? 1 : 0);
933}