Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
 
  3 * All rights reserved
  4 * www.brocade.com
  5 *
  6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License (GPL) Version 2 as
 10 * published by the Free Software Foundation
 11 *
 12 * This program is distributed in the hope that it will be useful, but
 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 15 * General Public License for more details.
 16 */
 17
 18#include "bfad_drv.h"
 19#include "bfa_ioc.h"
 20#include "bfi_reg.h"
 21#include "bfa_defs.h"
 22
 23BFA_TRC_FILE(CNA, IOC_CT);
 24
 25#define bfa_ioc_ct_sync_pos(__ioc)      \
 26		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
 27#define BFA_IOC_SYNC_REQD_SH    16
 28#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
 29#define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
 30#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
 31#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
 32			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
 33
 34/*
 35 * forward declarations
 36 */
 37static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
 38static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
 39static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 40static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
 41static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
 42static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
 43static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
 44static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
 45static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
 
 
 
 
 
 
 46
 47static struct bfa_ioc_hwif_s hwif_ct;
 48static struct bfa_ioc_hwif_s hwif_ct2;
 49
 50/*
 51 * Return true if firmware of current driver matches the running firmware.
 52 */
 53static bfa_boolean_t
 54bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 55{
 56	enum bfi_ioc_state ioc_fwstate;
 57	u32 usecnt;
 58	struct bfi_ioc_image_hdr_s fwhdr;
 59
 60	/*
 61	 * If bios boot (flash based) -- do not increment usage count
 62	 */
 63	if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
 64						BFA_IOC_FWIMG_MINSZ)
 65		return BFA_TRUE;
 66
 67	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 68	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
 69
 70	/*
 71	 * If usage count is 0, always return TRUE.
 72	 */
 73	if (usecnt == 0) {
 74		writel(1, ioc->ioc_regs.ioc_usage_reg);
 75		readl(ioc->ioc_regs.ioc_usage_sem_reg);
 76		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 77		writel(0, ioc->ioc_regs.ioc_fail_sync);
 78		bfa_trc(ioc, usecnt);
 79		return BFA_TRUE;
 80	}
 81
 82	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 83	bfa_trc(ioc, ioc_fwstate);
 84
 85	/*
 86	 * Use count cannot be non-zero and chip in uninitialized state.
 87	 */
 88	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
 89
 90	/*
 91	 * Check if another driver with a different firmware is active
 92	 */
 93	bfa_ioc_fwver_get(ioc, &fwhdr);
 94	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
 95		readl(ioc->ioc_regs.ioc_usage_sem_reg);
 96		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 97		bfa_trc(ioc, usecnt);
 98		return BFA_FALSE;
 99	}
100
101	/*
102	 * Same firmware version. Increment the reference count.
103	 */
104	usecnt++;
105	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
106	readl(ioc->ioc_regs.ioc_usage_sem_reg);
107	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
108	bfa_trc(ioc, usecnt);
109	return BFA_TRUE;
110}
111
112static void
113bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
114{
115	u32 usecnt;
116
117	/*
118	 * If bios boot (flash based) -- do not decrement usage count
119	 */
120	if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
121						BFA_IOC_FWIMG_MINSZ)
122		return;
123
124	/*
125	 * decrement usage count
126	 */
127	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
128	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
129	WARN_ON(usecnt <= 0);
130
131	usecnt--;
132	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
133	bfa_trc(ioc, usecnt);
134
135	readl(ioc->ioc_regs.ioc_usage_sem_reg);
136	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
137}
138
139/*
140 * Notify other functions on HB failure.
141 */
142static void
143bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
144{
145	if (bfa_ioc_is_cna(ioc)) {
146		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
147		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
148		/* Wait for halt to take effect */
149		readl(ioc->ioc_regs.ll_halt);
150		readl(ioc->ioc_regs.alt_ll_halt);
151	} else {
152		writel(~0U, ioc->ioc_regs.err_set);
153		readl(ioc->ioc_regs.err_set);
154	}
155}
156
157/*
158 * Host to LPU mailbox message addresses
159 */
160static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
161	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
162	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
163	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
164	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
165};
166
167/*
168 * Host <-> LPU mailbox command/status registers - port 0
169 */
170static struct { u32 hfn, lpu; } ct_p0reg[] = {
171	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
172	{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
173	{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
174	{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
175};
176
177/*
178 * Host <-> LPU mailbox command/status registers - port 1
179 */
180static struct { u32 hfn, lpu; } ct_p1reg[] = {
181	{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
182	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
183	{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
184	{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
185};
186
187static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
188	ct2_reg[] = {
189	{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
190	  CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
191	  CT2_HOSTFN_LPU0_READ_STAT},
192	{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
193	  CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
194	  CT2_HOSTFN_LPU1_READ_STAT},
195};
196
197static void
198bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
199{
200	void __iomem *rb;
201	int		pcifn = bfa_ioc_pcifn(ioc);
202
203	rb = bfa_ioc_bar0(ioc);
204
205	ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
206	ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
207	ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
208
209	if (ioc->port_id == 0) {
210		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
211		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
212		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
213		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
214		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
215		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
216		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
217	} else {
218		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
219		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
220		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
221		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
222		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
223		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
224		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
225	}
226
227	/*
228	 * PSS control registers
229	 */
230	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
231	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
232	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
233	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
234
235	/*
236	 * IOC semaphore registers and serialization
237	 */
238	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
239	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
240	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
241	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
242	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
243
244	/*
245	 * sram memory access
246	 */
247	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
248	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
249
250	/*
251	 * err set reg : for notification of hb failure in fcmode
252	 */
253	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
254}
255
256static void
257bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
258{
259	void __iomem *rb;
260	int	port = bfa_ioc_portid(ioc);
261
262	rb = bfa_ioc_bar0(ioc);
263
264	ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
265	ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
266	ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
267	ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
268	ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
269	ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
270
271	if (port == 0) {
272		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
273		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
274		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
275		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
276		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
277	} else {
278		ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
279		ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
280		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
281		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
282		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
283	}
284
285	/*
286	 * PSS control registers
287	 */
288	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
289	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
290	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
291	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
292
293	/*
294	 * IOC semaphore registers and serialization
295	 */
296	ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
297	ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
298	ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
299	ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
300	ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
301
302	/*
303	 * sram memory access
304	 */
305	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
306	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
307
308	/*
309	 * err set reg : for notification of hb failure in fcmode
310	 */
311	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
312}
313
314/*
315 * Initialize IOC to port mapping.
316 */
317
318#define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
319static void
320bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
321{
322	void __iomem *rb = ioc->pcidev.pci_bar_kva;
323	u32	r32;
324
325	/*
326	 * For catapult, base port id on personality register and IOC type
327	 */
328	r32 = readl(rb + FNC_PERS_REG);
329	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
330	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
331
332	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
333	bfa_trc(ioc, ioc->port_id);
334}
335
336static void
337bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
338{
339	void __iomem	*rb = ioc->pcidev.pci_bar_kva;
340	u32	r32;
341
342	r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
343	ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
344
345	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
346	bfa_trc(ioc, ioc->port_id);
347}
348
349/*
350 * Set interrupt mode for a function: INTX or MSIX
351 */
352static void
353bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
354{
355	void __iomem *rb = ioc->pcidev.pci_bar_kva;
356	u32	r32, mode;
357
358	r32 = readl(rb + FNC_PERS_REG);
359	bfa_trc(ioc, r32);
360
361	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
362		__F0_INTX_STATUS;
363
364	/*
365	 * If already in desired mode, do not change anything
366	 */
367	if ((!msix && mode) || (msix && !mode))
368		return;
369
370	if (msix)
371		mode = __F0_INTX_STATUS_MSIX;
372	else
373		mode = __F0_INTX_STATUS_INTA;
374
375	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
376	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
377	bfa_trc(ioc, r32);
378
379	writel(r32, rb + FNC_PERS_REG);
380}
381
382bfa_boolean_t
383bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
384{
385	u32	r32;
386
387	r32 = readl(ioc->ioc_regs.lpu_read_stat);
388	if (r32) {
389		writel(1, ioc->ioc_regs.lpu_read_stat);
390		return BFA_TRUE;
391	}
392
393	return BFA_FALSE;
394}
395
396/*
397 * Cleanup hw semaphore and usecnt registers
398 */
399static void
400bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
401{
402
403	if (bfa_ioc_is_cna(ioc)) {
404		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
405		writel(0, ioc->ioc_regs.ioc_usage_reg);
406		readl(ioc->ioc_regs.ioc_usage_sem_reg);
407		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
408	}
409
 
410	/*
411	 * Read the hw sem reg to make sure that it is locked
412	 * before we clear it. If it is not locked, writing 1
413	 * will lock it instead of clearing it.
414	 */
415	readl(ioc->ioc_regs.ioc_sem_reg);
416	writel(1, ioc->ioc_regs.ioc_sem_reg);
417}
418
419static bfa_boolean_t
420bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
421{
422	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
423	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
424
425	/*
426	 * Driver load time.  If the sync required bit for this PCI fn
427	 * is set, it is due to an unclean exit by the driver for this
428	 * PCI fn in the previous incarnation. Whoever comes here first
429	 * should clean it up, no matter which PCI fn.
430	 */
431
432	if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
433		writel(0, ioc->ioc_regs.ioc_fail_sync);
434		writel(1, ioc->ioc_regs.ioc_usage_reg);
435		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
436		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
437		return BFA_TRUE;
438	}
439
440	return bfa_ioc_ct_sync_complete(ioc);
441}
442
443/*
444 * Synchronized IOC failure processing routines
445 */
446static void
447bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
448{
449	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
450	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
451
452	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
453}
454
455static void
456bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
457{
458	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
459	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
460					bfa_ioc_ct_sync_pos(ioc);
461
462	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
463}
464
465static void
466bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
467{
468	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
469
470	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
471		ioc->ioc_regs.ioc_fail_sync);
472}
473
474static bfa_boolean_t
475bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
476{
477	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
478	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
479	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
480	uint32_t tmp_ackd;
481
482	if (sync_ackd == 0)
483		return BFA_TRUE;
484
485	/*
486	 * The check below is to see whether any other PCI fn
487	 * has reinitialized the ASIC (reset sync_ackd bits)
488	 * and failed again while this IOC was waiting for hw
489	 * semaphore (in bfa_iocpf_sm_semwait()).
490	 */
491	tmp_ackd = sync_ackd;
492	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
493		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
494		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
495
496	if (sync_reqd == sync_ackd) {
497		writel(bfa_ioc_ct_clear_sync_ackd(r32),
498			ioc->ioc_regs.ioc_fail_sync);
499		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
500		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
501		return BFA_TRUE;
502	}
503
504	/*
505	 * If another PCI fn reinitialized and failed again while
506	 * this IOC was waiting for hw sem, the sync_ackd bit for
507	 * this IOC need to be set again to allow reinitialization.
508	 */
509	if (tmp_ackd != sync_ackd)
510		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
511
512	return BFA_FALSE;
513}
514
515/**
516 * Called from bfa_ioc_attach() to map asic specific calls.
517 */
518static void
519bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
520{
521	hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
522	hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
523	hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
524	hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
525	hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
526	hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
527	hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
528	hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
529	hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
 
 
 
 
530}
531
532/**
533 * Called from bfa_ioc_attach() to map asic specific calls.
534 */
535void
536bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
537{
538	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
539
540	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
541	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
542	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
543	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
544	ioc->ioc_hwif = &hwif_ct;
545}
546
547/**
548 * Called from bfa_ioc_attach() to map asic specific calls.
549 */
550void
551bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
552{
553	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
554
555	hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
556	hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
557	hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
558	hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
559	hwif_ct2.ioc_isr_mode_set = NULL;
560	ioc->ioc_hwif = &hwif_ct2;
561}
562
563/*
564 * Workaround for MSI-X resource allocation for catapult-2 with no asic block
565 */
566#define HOSTFN_MSIX_DEFAULT		64
567#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR	0x30138
568#define HOSTFN_MSIX_VT_OFST_NUMVT	0x3013c
569#define __MSIX_VT_NUMVT__MK		0x003ff800
570#define __MSIX_VT_NUMVT__SH		11
571#define __MSIX_VT_NUMVT_(_v)		((_v) << __MSIX_VT_NUMVT__SH)
572#define __MSIX_VT_OFST_			0x000007ff
573void
574bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
575{
576	void __iomem *rb = ioc->pcidev.pci_bar_kva;
577	u32	r32;
578
579	r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
580	if (r32 & __MSIX_VT_NUMVT__MK) {
581		writel(r32 & __MSIX_VT_OFST_,
582			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
583		return;
584	}
585
586	writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
587		HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
588		rb + HOSTFN_MSIX_VT_OFST_NUMVT);
589	writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
590		rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
591}
592
593bfa_status_t
594bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
595{
596	u32	pll_sclk, pll_fclk, r32;
597	bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
598
599	pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
600		__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
601		__APP_PLL_SCLK_JITLMT0_1(3U) |
602		__APP_PLL_SCLK_CNTLMT0_1(1U);
603	pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
604		__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
605		__APP_PLL_LCLK_JITLMT0_1(3U) |
606		__APP_PLL_LCLK_CNTLMT0_1(1U);
607
608	if (fcmode) {
609		writel(0, (rb + OP_MODE));
610		writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
611			 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
612	} else {
613		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
614		writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
615	}
616	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
617	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
618	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
619	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
620	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
621	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
622	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
623	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
624	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
625			rb + APP_PLL_SCLK_CTL_REG);
626	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
627			rb + APP_PLL_LCLK_CTL_REG);
628	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
629		__APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
630	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
631		__APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
632	readl(rb + HOSTFN0_INT_MSK);
633	udelay(2000);
634	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
635	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
636	writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
637	writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
638
639	if (!fcmode) {
640		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
641		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
642	}
643	r32 = readl((rb + PSS_CTL_REG));
644	r32 &= ~__PSS_LMEM_RESET;
645	writel(r32, (rb + PSS_CTL_REG));
646	udelay(1000);
647	if (!fcmode) {
648		writel(0, (rb + PMM_1T_RESET_REG_P0));
649		writel(0, (rb + PMM_1T_RESET_REG_P1));
650	}
651
652	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
653	udelay(1000);
654	r32 = readl((rb + MBIST_STAT_REG));
655	writel(0, (rb + MBIST_CTL_REG));
656	return BFA_STATUS_OK;
657}
658
659static void
660bfa_ioc_ct2_sclk_init(void __iomem *rb)
661{
662	u32 r32;
663
664	/*
665	 * put s_clk PLL and PLL FSM in reset
666	 */
667	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
668	r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
669	r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
670		__APP_PLL_SCLK_LOGIC_SOFT_RESET);
671	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
672
673	/*
674	 * Ignore mode and program for the max clock (which is FC16)
675	 * Firmware/NFC will do the PLL init appropiately
676	 */
677	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
678	r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
679	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
680
681	/*
682	 * while doing PLL init dont clock gate ethernet subsystem
683	 */
684	r32 = readl((rb + CT2_CHIP_MISC_PRG));
685	writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
686
687	r32 = readl((rb + CT2_PCIE_MISC_REG));
688	writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
689
690	/*
691	 * set sclk value
692	 */
693	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
694	r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
695		__APP_PLL_SCLK_CLK_DIV2);
696	writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
697
698	/*
699	 * poll for s_clk lock or delay 1ms
700	 */
701	udelay(1000);
702}
703
704static void
705bfa_ioc_ct2_lclk_init(void __iomem *rb)
706{
707	u32 r32;
708
709	/*
710	 * put l_clk PLL and PLL FSM in reset
711	 */
712	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
713	r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
714	r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
715		__APP_PLL_LCLK_LOGIC_SOFT_RESET);
716	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
717
718	/*
719	 * set LPU speed (set for FC16 which will work for other modes)
720	 */
721	r32 = readl((rb + CT2_CHIP_MISC_PRG));
722	writel(r32, (rb + CT2_CHIP_MISC_PRG));
723
724	/*
725	 * set LPU half speed (set for FC16 which will work for other modes)
726	 */
727	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
728	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
729
730	/*
731	 * set lclk for mode (set for FC16)
732	 */
733	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
734	r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
735	r32 |= 0x20c1731b;
736	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
737
738	/*
739	 * poll for s_clk lock or delay 1ms
740	 */
741	udelay(1000);
742}
743
744static void
745bfa_ioc_ct2_mem_init(void __iomem *rb)
746{
747	u32	r32;
748
749	r32 = readl((rb + PSS_CTL_REG));
750	r32 &= ~__PSS_LMEM_RESET;
751	writel(r32, (rb + PSS_CTL_REG));
752	udelay(1000);
753
754	writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
755	udelay(1000);
756	writel(0, (rb + CT2_MBIST_CTL_REG));
757}
758
759void
760bfa_ioc_ct2_mac_reset(void __iomem *rb)
761{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
762	u32	r32;
763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764	bfa_ioc_ct2_sclk_init(rb);
765	bfa_ioc_ct2_lclk_init(rb);
766
767	/*
768	 * release soft reset on s_clk & l_clk
769	 */
770	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
771	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
772		(rb + CT2_APP_PLL_SCLK_CTL_REG));
773
774	/*
775	 * release soft reset on s_clk & l_clk
776	 */
777	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
778	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
779		(rb + CT2_APP_PLL_LCLK_CTL_REG));
780
781	/* put port0, port1 MAC & AHB in reset */
782	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
783		rb + CT2_CSI_MAC_CONTROL_REG(0));
784	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
785		rb + CT2_CSI_MAC_CONTROL_REG(1));
786}
787
788#define CT2_NFC_MAX_DELAY	1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
789bfa_status_t
790bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
791{
792	u32	wgn, r32;
793	int i;
794
795	/*
796	 * Initialize PLL if not already done by NFC
797	 */
798	wgn = readl(rb + CT2_WGN_STATUS);
799	if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
800		writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
801		for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
802			r32 = readl(rb + CT2_NFC_CSR_SET_REG);
803			if (r32 & __NFC_CONTROLLER_HALTED)
804				break;
805			udelay(1000);
806		}
807	}
808
809	/*
810	 * Mask the interrupts and clear any
811	 * pending interrupts.
812	 */
813	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
814	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
815
816	r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
817	if (r32 == 1) {
818		writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
819		readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
820	}
821	r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
822	if (r32 == 1) {
823		writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
824		readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
825	}
826
827	bfa_ioc_ct2_mac_reset(rb);
828	bfa_ioc_ct2_sclk_init(rb);
829	bfa_ioc_ct2_lclk_init(rb);
830
831	/*
832	 * release soft reset on s_clk & l_clk
833	 */
834	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
835	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
836		(rb + CT2_APP_PLL_SCLK_CTL_REG));
 
 
 
 
 
837
 
 
 
 
 
 
838	/*
839	 * release soft reset on s_clk & l_clk
840	 */
841	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
842	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
843		(rb + CT2_APP_PLL_LCLK_CTL_REG));
 
 
 
 
844
845	/*
846	 * Announce flash device presence, if flash was corrupted.
 
847	 */
848	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
849		r32 = readl((rb + PSS_GPIO_OUT_REG));
850		writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
851		r32 = readl((rb + PSS_GPIO_OE_REG));
852		writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
 
 
 
 
 
 
 
 
 
 
 
 
853	}
854
855	bfa_ioc_ct2_mem_init(rb);
856
857	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
858	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
 
859	return BFA_STATUS_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
860}
v4.6
  1/*
  2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  3 * Copyright (c) 2014- QLogic Corporation.
  4 * All rights reserved
  5 * www.qlogic.com
  6 *
  7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
  8 *
  9 * This program is free software; you can redistribute it and/or modify it
 10 * under the terms of the GNU General Public License (GPL) Version 2 as
 11 * published by the Free Software Foundation
 12 *
 13 * This program is distributed in the hope that it will be useful, but
 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 16 * General Public License for more details.
 17 */
 18
 19#include "bfad_drv.h"
 20#include "bfa_ioc.h"
 21#include "bfi_reg.h"
 22#include "bfa_defs.h"
 23
 24BFA_TRC_FILE(CNA, IOC_CT);
 25
 26#define bfa_ioc_ct_sync_pos(__ioc)      \
 27		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
 28#define BFA_IOC_SYNC_REQD_SH    16
 29#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
 30#define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
 31#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
 32#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
 33			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
 34
 35/*
 36 * forward declarations
 37 */
 38static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
 39static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
 40static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 41static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
 42static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
 43static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
 44static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
 45static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
 46static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
 47static void bfa_ioc_ct_set_cur_ioc_fwstate(
 48			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
 49static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
 50static void bfa_ioc_ct_set_alt_ioc_fwstate(
 51			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
 52static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
 53
 54static struct bfa_ioc_hwif_s hwif_ct;
 55static struct bfa_ioc_hwif_s hwif_ct2;
 56
 57/*
 58 * Return true if firmware of current driver matches the running firmware.
 59 */
 60static bfa_boolean_t
 61bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 62{
 63	enum bfi_ioc_state ioc_fwstate;
 64	u32 usecnt;
 65	struct bfi_ioc_image_hdr_s fwhdr;
 66
 
 
 
 
 
 
 
 67	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 68	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
 69
 70	/*
 71	 * If usage count is 0, always return TRUE.
 72	 */
 73	if (usecnt == 0) {
 74		writel(1, ioc->ioc_regs.ioc_usage_reg);
 75		readl(ioc->ioc_regs.ioc_usage_sem_reg);
 76		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 77		writel(0, ioc->ioc_regs.ioc_fail_sync);
 78		bfa_trc(ioc, usecnt);
 79		return BFA_TRUE;
 80	}
 81
 82	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 83	bfa_trc(ioc, ioc_fwstate);
 84
 85	/*
 86	 * Use count cannot be non-zero and chip in uninitialized state.
 87	 */
 88	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
 89
 90	/*
 91	 * Check if another driver with a different firmware is active
 92	 */
 93	bfa_ioc_fwver_get(ioc, &fwhdr);
 94	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
 95		readl(ioc->ioc_regs.ioc_usage_sem_reg);
 96		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 97		bfa_trc(ioc, usecnt);
 98		return BFA_FALSE;
 99	}
100
101	/*
102	 * Same firmware version. Increment the reference count.
103	 */
104	usecnt++;
105	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
106	readl(ioc->ioc_regs.ioc_usage_sem_reg);
107	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
108	bfa_trc(ioc, usecnt);
109	return BFA_TRUE;
110}
111
112static void
113bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
114{
115	u32 usecnt;
116
117	/*
 
 
 
 
 
 
 
118	 * decrement usage count
119	 */
120	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
121	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
122	WARN_ON(usecnt <= 0);
123
124	usecnt--;
125	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
126	bfa_trc(ioc, usecnt);
127
128	readl(ioc->ioc_regs.ioc_usage_sem_reg);
129	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
130}
131
132/*
133 * Notify other functions on HB failure.
134 */
135static void
136bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
137{
138	if (bfa_ioc_is_cna(ioc)) {
139		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
140		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
141		/* Wait for halt to take effect */
142		readl(ioc->ioc_regs.ll_halt);
143		readl(ioc->ioc_regs.alt_ll_halt);
144	} else {
145		writel(~0U, ioc->ioc_regs.err_set);
146		readl(ioc->ioc_regs.err_set);
147	}
148}
149
150/*
151 * Host to LPU mailbox message addresses
152 */
153static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
154	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
155	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
156	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
157	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
158};
159
160/*
161 * Host <-> LPU mailbox command/status registers - port 0
162 */
163static struct { u32 hfn, lpu; } ct_p0reg[] = {
164	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
165	{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
166	{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
167	{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
168};
169
170/*
171 * Host <-> LPU mailbox command/status registers - port 1
172 */
173static struct { u32 hfn, lpu; } ct_p1reg[] = {
174	{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
175	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
176	{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
177	{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
178};
179
180static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
181	ct2_reg[] = {
182	{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
183	  CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
184	  CT2_HOSTFN_LPU0_READ_STAT},
185	{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
186	  CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
187	  CT2_HOSTFN_LPU1_READ_STAT},
188};
189
190static void
191bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
192{
193	void __iomem *rb;
194	int		pcifn = bfa_ioc_pcifn(ioc);
195
196	rb = bfa_ioc_bar0(ioc);
197
198	ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
199	ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
200	ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
201
202	if (ioc->port_id == 0) {
203		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
204		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
205		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
206		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
207		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
208		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
209		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
210	} else {
211		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
212		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
213		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
214		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
215		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
216		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
217		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
218	}
219
220	/*
221	 * PSS control registers
222	 */
223	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
224	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
225	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
226	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
227
228	/*
229	 * IOC semaphore registers and serialization
230	 */
231	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
232	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
233	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
234	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
235	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
236
237	/*
238	 * sram memory access
239	 */
240	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
241	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
242
243	/*
244	 * err set reg : for notification of hb failure in fcmode
245	 */
246	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
247}
248
249static void
250bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
251{
252	void __iomem *rb;
253	int	port = bfa_ioc_portid(ioc);
254
255	rb = bfa_ioc_bar0(ioc);
256
257	ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
258	ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
259	ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
260	ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
261	ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
262	ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
263
264	if (port == 0) {
265		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
266		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
267		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
268		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
269		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
270	} else {
271		ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
272		ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
273		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
274		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
275		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
276	}
277
278	/*
279	 * PSS control registers
280	 */
281	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
282	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
283	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
284	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
285
286	/*
287	 * IOC semaphore registers and serialization
288	 */
289	ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
290	ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
291	ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
292	ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
293	ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
294
295	/*
296	 * sram memory access
297	 */
298	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
299	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
300
301	/*
302	 * err set reg : for notification of hb failure in fcmode
303	 */
304	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
305}
306
307/*
308 * Initialize IOC to port mapping.
309 */
310
311#define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
312static void
313bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
314{
315	void __iomem *rb = ioc->pcidev.pci_bar_kva;
316	u32	r32;
317
318	/*
319	 * For catapult, base port id on personality register and IOC type
320	 */
321	r32 = readl(rb + FNC_PERS_REG);
322	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
323	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
324
325	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
326	bfa_trc(ioc, ioc->port_id);
327}
328
329static void
330bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
331{
332	void __iomem	*rb = ioc->pcidev.pci_bar_kva;
333	u32	r32;
334
335	r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
336	ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
337
338	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
339	bfa_trc(ioc, ioc->port_id);
340}
341
342/*
343 * Set interrupt mode for a function: INTX or MSIX
344 */
345static void
346bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
347{
348	void __iomem *rb = ioc->pcidev.pci_bar_kva;
349	u32	r32, mode;
350
351	r32 = readl(rb + FNC_PERS_REG);
352	bfa_trc(ioc, r32);
353
354	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
355		__F0_INTX_STATUS;
356
357	/*
358	 * If already in desired mode, do not change anything
359	 */
360	if ((!msix && mode) || (msix && !mode))
361		return;
362
363	if (msix)
364		mode = __F0_INTX_STATUS_MSIX;
365	else
366		mode = __F0_INTX_STATUS_INTA;
367
368	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
369	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
370	bfa_trc(ioc, r32);
371
372	writel(r32, rb + FNC_PERS_REG);
373}
374
375bfa_boolean_t
376bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
377{
378	u32	r32;
379
380	r32 = readl(ioc->ioc_regs.lpu_read_stat);
381	if (r32) {
382		writel(1, ioc->ioc_regs.lpu_read_stat);
383		return BFA_TRUE;
384	}
385
386	return BFA_FALSE;
387}
388
389/*
390 * Cleanup hw semaphore and usecnt registers
391 */
392static void
393bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
394{
395
396	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
397	writel(0, ioc->ioc_regs.ioc_usage_reg);
398	readl(ioc->ioc_regs.ioc_usage_sem_reg);
399	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 
 
400
401	writel(0, ioc->ioc_regs.ioc_fail_sync);
402	/*
403	 * Read the hw sem reg to make sure that it is locked
404	 * before we clear it. If it is not locked, writing 1
405	 * will lock it instead of clearing it.
406	 */
407	readl(ioc->ioc_regs.ioc_sem_reg);
408	writel(1, ioc->ioc_regs.ioc_sem_reg);
409}
410
411static bfa_boolean_t
412bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
413{
414	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
415	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
416
417	/*
418	 * Driver load time.  If the sync required bit for this PCI fn
419	 * is set, it is due to an unclean exit by the driver for this
420	 * PCI fn in the previous incarnation. Whoever comes here first
421	 * should clean it up, no matter which PCI fn.
422	 */
423
424	if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
425		writel(0, ioc->ioc_regs.ioc_fail_sync);
426		writel(1, ioc->ioc_regs.ioc_usage_reg);
427		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
428		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
429		return BFA_TRUE;
430	}
431
432	return bfa_ioc_ct_sync_complete(ioc);
433}
434
435/*
436 * Synchronized IOC failure processing routines
437 */
438static void
439bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
440{
441	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
442	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
443
444	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
445}
446
447static void
448bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
449{
450	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
451	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
452					bfa_ioc_ct_sync_pos(ioc);
453
454	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
455}
456
457static void
458bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
459{
460	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
461
462	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
463		ioc->ioc_regs.ioc_fail_sync);
464}
465
466static bfa_boolean_t
467bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
468{
469	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
470	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
471	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
472	uint32_t tmp_ackd;
473
474	if (sync_ackd == 0)
475		return BFA_TRUE;
476
477	/*
478	 * The check below is to see whether any other PCI fn
479	 * has reinitialized the ASIC (reset sync_ackd bits)
480	 * and failed again while this IOC was waiting for hw
481	 * semaphore (in bfa_iocpf_sm_semwait()).
482	 */
483	tmp_ackd = sync_ackd;
484	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
485		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
486		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
487
488	if (sync_reqd == sync_ackd) {
489		writel(bfa_ioc_ct_clear_sync_ackd(r32),
490			ioc->ioc_regs.ioc_fail_sync);
491		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
492		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
493		return BFA_TRUE;
494	}
495
496	/*
497	 * If another PCI fn reinitialized and failed again while
498	 * this IOC was waiting for hw sem, the sync_ackd bit for
499	 * this IOC need to be set again to allow reinitialization.
500	 */
501	if (tmp_ackd != sync_ackd)
502		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
503
504	return BFA_FALSE;
505}
506
507/**
508 * Called from bfa_ioc_attach() to map asic specific calls.
509 */
510static void
511bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
512{
513	hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
514	hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
515	hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
516	hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
517	hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
518	hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
519	hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
520	hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
521	hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
522	hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
523	hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
524	hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
525	hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
526}
527
528/**
529 * Called from bfa_ioc_attach() to map asic specific calls.
530 */
531void
532bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
533{
534	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
535
536	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
537	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
538	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
539	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
540	ioc->ioc_hwif = &hwif_ct;
541}
542
543/**
544 * Called from bfa_ioc_attach() to map asic specific calls.
545 */
546void
547bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
548{
549	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
550
551	hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
552	hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
553	hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
554	hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
555	hwif_ct2.ioc_isr_mode_set = NULL;
556	ioc->ioc_hwif = &hwif_ct2;
557}
558
559/*
560 * Workaround for MSI-X resource allocation for catapult-2 with no asic block
561 */
562#define HOSTFN_MSIX_DEFAULT		64
563#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR	0x30138
564#define HOSTFN_MSIX_VT_OFST_NUMVT	0x3013c
565#define __MSIX_VT_NUMVT__MK		0x003ff800
566#define __MSIX_VT_NUMVT__SH		11
567#define __MSIX_VT_NUMVT_(_v)		((_v) << __MSIX_VT_NUMVT__SH)
568#define __MSIX_VT_OFST_			0x000007ff
569void
570bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
571{
572	void __iomem *rb = ioc->pcidev.pci_bar_kva;
573	u32	r32;
574
575	r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
576	if (r32 & __MSIX_VT_NUMVT__MK) {
577		writel(r32 & __MSIX_VT_OFST_,
578			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
579		return;
580	}
581
582	writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
583		HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
584		rb + HOSTFN_MSIX_VT_OFST_NUMVT);
585	writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
586		rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
587}
588
589bfa_status_t
590bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
591{
592	u32	pll_sclk, pll_fclk, r32;
593	bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
594
595	pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
596		__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
597		__APP_PLL_SCLK_JITLMT0_1(3U) |
598		__APP_PLL_SCLK_CNTLMT0_1(1U);
599	pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
600		__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
601		__APP_PLL_LCLK_JITLMT0_1(3U) |
602		__APP_PLL_LCLK_CNTLMT0_1(1U);
603
604	if (fcmode) {
605		writel(0, (rb + OP_MODE));
606		writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
607			 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
608	} else {
609		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
610		writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
611	}
612	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
613	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
614	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
615	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
616	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
617	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
618	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
619	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
620	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
621			rb + APP_PLL_SCLK_CTL_REG);
622	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
623			rb + APP_PLL_LCLK_CTL_REG);
624	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
625		__APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
626	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
627		__APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
628	readl(rb + HOSTFN0_INT_MSK);
629	udelay(2000);
630	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
631	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
632	writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
633	writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
634
635	if (!fcmode) {
636		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
637		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
638	}
639	r32 = readl((rb + PSS_CTL_REG));
640	r32 &= ~__PSS_LMEM_RESET;
641	writel(r32, (rb + PSS_CTL_REG));
642	udelay(1000);
643	if (!fcmode) {
644		writel(0, (rb + PMM_1T_RESET_REG_P0));
645		writel(0, (rb + PMM_1T_RESET_REG_P1));
646	}
647
648	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
649	udelay(1000);
650	r32 = readl((rb + MBIST_STAT_REG));
651	writel(0, (rb + MBIST_CTL_REG));
652	return BFA_STATUS_OK;
653}
654
655static void
656bfa_ioc_ct2_sclk_init(void __iomem *rb)
657{
658	u32 r32;
659
660	/*
661	 * put s_clk PLL and PLL FSM in reset
662	 */
663	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
664	r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
665	r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
666		__APP_PLL_SCLK_LOGIC_SOFT_RESET);
667	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
668
669	/*
670	 * Ignore mode and program for the max clock (which is FC16)
671	 * Firmware/NFC will do the PLL init appropiately
672	 */
673	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
674	r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
675	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
676
677	/*
678	 * while doing PLL init dont clock gate ethernet subsystem
679	 */
680	r32 = readl((rb + CT2_CHIP_MISC_PRG));
681	writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
682
683	r32 = readl((rb + CT2_PCIE_MISC_REG));
684	writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
685
686	/*
687	 * set sclk value
688	 */
689	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
690	r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
691		__APP_PLL_SCLK_CLK_DIV2);
692	writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
693
694	/*
695	 * poll for s_clk lock or delay 1ms
696	 */
697	udelay(1000);
698}
699
700static void
701bfa_ioc_ct2_lclk_init(void __iomem *rb)
702{
703	u32 r32;
704
705	/*
706	 * put l_clk PLL and PLL FSM in reset
707	 */
708	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
709	r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
710	r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
711		__APP_PLL_LCLK_LOGIC_SOFT_RESET);
712	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
713
714	/*
715	 * set LPU speed (set for FC16 which will work for other modes)
716	 */
717	r32 = readl((rb + CT2_CHIP_MISC_PRG));
718	writel(r32, (rb + CT2_CHIP_MISC_PRG));
719
720	/*
721	 * set LPU half speed (set for FC16 which will work for other modes)
722	 */
723	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
724	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
725
726	/*
727	 * set lclk for mode (set for FC16)
728	 */
729	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
730	r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
731	r32 |= 0x20c1731b;
732	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
733
734	/*
735	 * poll for s_clk lock or delay 1ms
736	 */
737	udelay(1000);
738}
739
740static void
741bfa_ioc_ct2_mem_init(void __iomem *rb)
742{
743	u32	r32;
744
745	r32 = readl((rb + PSS_CTL_REG));
746	r32 &= ~__PSS_LMEM_RESET;
747	writel(r32, (rb + PSS_CTL_REG));
748	udelay(1000);
749
750	writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
751	udelay(1000);
752	writel(0, (rb + CT2_MBIST_CTL_REG));
753}
754
755void
756bfa_ioc_ct2_mac_reset(void __iomem *rb)
757{
758	/* put port0, port1 MAC & AHB in reset */
759	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
760		rb + CT2_CSI_MAC_CONTROL_REG(0));
761	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
762		rb + CT2_CSI_MAC_CONTROL_REG(1));
763}
764
765static void
766bfa_ioc_ct2_enable_flash(void __iomem *rb)
767{
768	u32 r32;
769
770	r32 = readl((rb + PSS_GPIO_OUT_REG));
771	writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
772	r32 = readl((rb + PSS_GPIO_OE_REG));
773	writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
774}
775
776#define CT2_NFC_MAX_DELAY	1000
777#define CT2_NFC_PAUSE_MAX_DELAY 4000
778#define CT2_NFC_VER_VALID	0x147
779#define CT2_NFC_STATE_RUNNING   0x20000001
780#define BFA_IOC_PLL_POLL	1000000
781
782static bfa_boolean_t
783bfa_ioc_ct2_nfc_halted(void __iomem *rb)
784{
785	u32	r32;
786
787	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
788	if (r32 & __NFC_CONTROLLER_HALTED)
789		return BFA_TRUE;
790
791	return BFA_FALSE;
792}
793
794static void
795bfa_ioc_ct2_nfc_halt(void __iomem *rb)
796{
797	int	i;
798
799	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
800	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
801		if (bfa_ioc_ct2_nfc_halted(rb))
802			break;
803		udelay(1000);
804	}
805	WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
806}
807
808static void
809bfa_ioc_ct2_nfc_resume(void __iomem *rb)
810{
811	u32	r32;
812	int i;
813
814	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
815	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
816		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
817		if (!(r32 & __NFC_CONTROLLER_HALTED))
818			return;
819		udelay(1000);
820	}
821	WARN_ON(1);
822}
823
824static void
825bfa_ioc_ct2_clk_reset(void __iomem *rb)
826{
827	u32 r32;
828
829	bfa_ioc_ct2_sclk_init(rb);
830	bfa_ioc_ct2_lclk_init(rb);
831
832	/*
833	 * release soft reset on s_clk & l_clk
834	 */
835	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
836	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
837			(rb + CT2_APP_PLL_SCLK_CTL_REG));
838
 
 
 
839	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
840	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
841			(rb + CT2_APP_PLL_LCLK_CTL_REG));
842
 
 
 
 
 
843}
844
845static void
846bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
847{
848	u32 r32, i;
849
850	r32 = readl((rb + PSS_CTL_REG));
851	r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
852	writel(r32, (rb + PSS_CTL_REG));
853
854	writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
855
856	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
857		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
858
859		if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
860			break;
861	}
862	WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
863
864	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
865		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
866
867		if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
868			break;
869	}
870	WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
871
872	r32 = readl(rb + CT2_CSI_FW_CTL_REG);
873	WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
874}
875
876static void
877bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
878{
879	u32 r32;
880	int i;
881
882	if (bfa_ioc_ct2_nfc_halted(rb))
883		bfa_ioc_ct2_nfc_resume(rb);
884	for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
885		r32 = readl(rb + CT2_NFC_STS_REG);
886		if (r32 == CT2_NFC_STATE_RUNNING)
887			return;
888		udelay(1000);
889	}
890
891	r32 = readl(rb + CT2_NFC_STS_REG);
892	WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
893}
894
895bfa_status_t
896bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
897{
898	u32 wgn, r32, nfc_ver;
 
899
 
 
 
900	wgn = readl(rb + CT2_WGN_STATUS);
 
 
 
 
 
 
 
 
 
901
902	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
903		/*
904		 * If flash is corrupted, enable flash explicitly
905		 */
906		bfa_ioc_ct2_clk_reset(rb);
907		bfa_ioc_ct2_enable_flash(rb);
908
909		bfa_ioc_ct2_mac_reset(rb);
 
 
 
 
 
 
 
 
 
910
911		bfa_ioc_ct2_clk_reset(rb);
912		bfa_ioc_ct2_enable_flash(rb);
 
913
914	} else {
915		nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
916
917		if ((nfc_ver >= CT2_NFC_VER_VALID) &&
918		    (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
919
920			bfa_ioc_ct2_wait_till_nfc_running(rb);
921
922			bfa_ioc_ct2_nfc_clk_reset(rb);
923		} else {
924			bfa_ioc_ct2_nfc_halt(rb);
925
926			bfa_ioc_ct2_clk_reset(rb);
927			bfa_ioc_ct2_mac_reset(rb);
928			bfa_ioc_ct2_clk_reset(rb);
929
930		}
931	}
932	/*
933	* The very first PCIe DMA Read done by LPU fails with a fatal error,
934	* when Address Translation Cache (ATC) has been enabled by system BIOS.
935	*
936	* Workaround:
937	* Disable Invalidated Tag Match Enable capability by setting the bit 26
938	* of CHIP_MISC_PRG to 0, by default it is set to 1.
939	*/
940	r32 = readl(rb + CT2_CHIP_MISC_PRG);
941	writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
942
943	/*
944	 * Mask the interrupts and clear any
945	 * pending interrupts left by BIOS/EFI
946	 */
947
948	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
949	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
950
951	/* For first time initialization, no need to clear interrupts */
952	r32 = readl(rb + HOST_SEM5_REG);
953	if (r32 & 0x1) {
954		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
955		if (r32 == 1) {
956			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
957			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
958		}
959		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
960		if (r32 == 1) {
961			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
962			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
963		}
964	}
965
966	bfa_ioc_ct2_mem_init(rb);
967
968	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
969	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
970
971	return BFA_STATUS_OK;
972}
973
974static void
975bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
976		enum bfi_ioc_state fwstate)
977{
978	writel(fwstate, ioc->ioc_regs.ioc_fwstate);
979}
980
981static enum bfi_ioc_state
982bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
983{
984	return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
985}
986
987static void
988bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
989		enum bfi_ioc_state fwstate)
990{
991	writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
992}
993
994static enum bfi_ioc_state
995bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
996{
997	return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
998}