Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
   3 * All rights reserved
   4 * www.brocade.com
   5 *
   6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License (GPL) Version 2 as
  10 * published by the Free Software Foundation
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#ifndef __BFA_IOC_H__
  19#define __BFA_IOC_H__
  20
  21#include "bfad_drv.h"
  22#include "bfa_cs.h"
  23#include "bfi.h"
  24
  25#define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
  26#define BFA_DBG_FWTRC_LEN					\
  27	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
  28	(sizeof(struct bfa_trc_mod_s) -				\
  29	BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
  30/*
  31 * BFA timer declarations
  32 */
  33typedef void (*bfa_timer_cbfn_t)(void *);
  34
  35/*
  36 * BFA timer data structure
  37 */
  38struct bfa_timer_s {
  39	struct list_head	qe;
  40	bfa_timer_cbfn_t timercb;
  41	void		*arg;
  42	int		timeout;	/* in millisecs */
  43};
  44
  45/*
  46 * Timer module structure
  47 */
  48struct bfa_timer_mod_s {
  49	struct list_head timer_q;
  50};
  51
  52#define BFA_TIMER_FREQ 200 /* specified in millisecs */
  53
  54void bfa_timer_beat(struct bfa_timer_mod_s *mod);
  55void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  56			bfa_timer_cbfn_t timercb, void *arg,
  57			unsigned int timeout);
  58void bfa_timer_stop(struct bfa_timer_s *timer);
  59
  60/*
  61 * Generic Scatter Gather Element used by driver
  62 */
  63struct bfa_sge_s {
  64	u32	sg_len;
  65	void		*sg_addr;
  66};
  67
  68#define bfa_sge_word_swap(__sge) do {					     \
  69	((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]);      \
  70	((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]);      \
  71	((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]);      \
  72} while (0)
  73
  74#define bfa_swap_words(_x)  (	\
  75	((_x) << 32) | ((_x) >> 32))
  76
  77#ifdef __BIG_ENDIAN
  78#define bfa_sge_to_be(_x)
  79#define bfa_sge_to_le(_x)	bfa_sge_word_swap(_x)
  80#define bfa_sgaddr_le(_x)	bfa_swap_words(_x)
  81#else
  82#define	bfa_sge_to_be(_x)	bfa_sge_word_swap(_x)
  83#define bfa_sge_to_le(_x)
  84#define bfa_sgaddr_le(_x)	(_x)
  85#endif
  86
  87/*
  88 * BFA memory resources
  89 */
  90struct bfa_mem_dma_s {
  91	struct list_head qe;		/* Queue of DMA elements */
  92	u32		mem_len;	/* Total Length in Bytes */
  93	u8		*kva;		/* kernel virtual address */
  94	u64		dma;		/* dma address if DMA memory */
  95	u8		*kva_curp;	/* kva allocation cursor */
  96	u64		dma_curp;	/* dma allocation cursor */
  97};
  98#define bfa_mem_dma_t struct bfa_mem_dma_s
  99
 100struct bfa_mem_kva_s {
 101	struct list_head qe;		/* Queue of KVA elements */
 102	u32		mem_len;	/* Total Length in Bytes */
 103	u8		*kva;		/* kernel virtual address */
 104	u8		*kva_curp;	/* kva allocation cursor */
 105};
 106#define bfa_mem_kva_t struct bfa_mem_kva_s
 107
 108struct bfa_meminfo_s {
 109	struct bfa_mem_dma_s dma_info;
 110	struct bfa_mem_kva_s kva_info;
 111};
 112
 113/* BFA memory segment setup macros */
 114#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do {	\
 115	((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz);	\
 116	if (_seg_sz)						\
 117		list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe,	\
 118			      &(_meminfo)->dma_info.qe);	\
 119} while (0)
 120
 121#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do {	\
 122	((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz);	\
 123	if (_seg_sz)						\
 124		list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe,	\
 125			      &(_meminfo)->kva_info.qe);	\
 126} while (0)
 127
 128/* BFA dma memory segments iterator */
 129#define bfa_mem_dma_sptr(_mod, _i)	(&(_mod)->dma_seg[(_i)])
 130#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i)			\
 131	for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr);	\
 132	     _i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
 133
 134#define bfa_mem_kva_curp(_mod)	((_mod)->kva_seg.kva_curp)
 135#define bfa_mem_dma_virt(_sptr)	((_sptr)->kva_curp)
 136#define bfa_mem_dma_phys(_sptr)	((_sptr)->dma_curp)
 137#define bfa_mem_dma_len(_sptr)	((_sptr)->mem_len)
 138
 139/* Get the corresponding dma buf kva for a req - from the tag */
 140#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz)			      \
 141	(((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
 142	 BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
 143
 144/* Get the corresponding dma buf pa for a req - from the tag */
 145#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz)			\
 146	((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp +	\
 147	 BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
 148
 149/*
 150 * PCI device information required by IOC
 151 */
 152struct bfa_pcidev_s {
 153	int		pci_slot;
 154	u8		pci_func;
 155	u16		device_id;
 156	u16		ssid;
 157	void __iomem	*pci_bar_kva;
 158};
 159
 160/*
 161 * Structure used to remember the DMA-able memory block's KVA and Physical
 162 * Address
 163 */
 164struct bfa_dma_s {
 165	void		*kva;	/* ! Kernel virtual address	*/
 166	u64	pa;	/* ! Physical address		*/
 167};
 168
 169#define BFA_DMA_ALIGN_SZ	256
 170#define BFA_ROUNDUP(_l, _s)	(((_l) + ((_s) - 1)) & ~((_s) - 1))
 171
 172/*
 173 * smem size for Crossbow and Catapult
 174 */
 175#define BFI_SMEM_CB_SIZE	0x200000U	/* ! 2MB for crossbow	*/
 176#define BFI_SMEM_CT_SIZE	0x280000U	/* ! 2.5MB for catapult	*/
 177
 178#define bfa_dma_be_addr_set(dma_addr, pa)	\
 179		__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
 180static inline void
 181__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 182{
 183	dma_addr->a32.addr_lo = cpu_to_be32(pa);
 184	dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
 185}
 186
 187#define bfa_alen_set(__alen, __len, __pa)	\
 188	__bfa_alen_set(__alen, __len, (u64)__pa)
 189
 190static inline void
 191__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa)
 192{
 193	alen->al_len = cpu_to_be32(len);
 194	bfa_dma_be_addr_set(alen->al_addr, pa);
 195}
 196
 197struct bfa_ioc_regs_s {
 198	void __iomem *hfn_mbox_cmd;
 199	void __iomem *hfn_mbox;
 200	void __iomem *lpu_mbox_cmd;
 201	void __iomem *lpu_mbox;
 202	void __iomem *lpu_read_stat;
 203	void __iomem *pss_ctl_reg;
 204	void __iomem *pss_err_status_reg;
 205	void __iomem *app_pll_fast_ctl_reg;
 206	void __iomem *app_pll_slow_ctl_reg;
 207	void __iomem *ioc_sem_reg;
 208	void __iomem *ioc_usage_sem_reg;
 209	void __iomem *ioc_init_sem_reg;
 210	void __iomem *ioc_usage_reg;
 211	void __iomem *host_page_num_fn;
 212	void __iomem *heartbeat;
 213	void __iomem *ioc_fwstate;
 214	void __iomem *alt_ioc_fwstate;
 215	void __iomem *ll_halt;
 216	void __iomem *alt_ll_halt;
 217	void __iomem *err_set;
 218	void __iomem *ioc_fail_sync;
 219	void __iomem *shirq_isr_next;
 220	void __iomem *shirq_msk_next;
 221	void __iomem *smem_page_start;
 222	u32	smem_pg0;
 223};
 224
 225#define bfa_mem_read(_raddr, _off)	swab32(readl(((_raddr) + (_off))))
 226#define bfa_mem_write(_raddr, _off, _val)	\
 227			writel(swab32((_val)), ((_raddr) + (_off)))
 228/*
 229 * IOC Mailbox structures
 230 */
 231struct bfa_mbox_cmd_s {
 232	struct list_head	qe;
 233	u32	msg[BFI_IOC_MSGSZ];
 234};
 235
 236/*
 237 * IOC mailbox module
 238 */
 239typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
 240struct bfa_ioc_mbox_mod_s {
 241	struct list_head		cmd_q;	/*  pending mbox queue	*/
 242	int			nmclass;	/*  number of handlers */
 243	struct {
 244		bfa_ioc_mbox_mcfunc_t	cbfn;	/*  message handlers	*/
 245		void			*cbarg;
 246	} mbhdlr[BFI_MC_MAX];
 247};
 248
 249/*
 250 * IOC callback function interfaces
 251 */
 252typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
 253typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
 254typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
 255typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
 256struct bfa_ioc_cbfn_s {
 257	bfa_ioc_enable_cbfn_t	enable_cbfn;
 258	bfa_ioc_disable_cbfn_t	disable_cbfn;
 259	bfa_ioc_hbfail_cbfn_t	hbfail_cbfn;
 260	bfa_ioc_reset_cbfn_t	reset_cbfn;
 261};
 262
 263/*
 264 * IOC event notification mechanism.
 265 */
 266enum bfa_ioc_event_e {
 267	BFA_IOC_E_ENABLED	= 1,
 268	BFA_IOC_E_DISABLED	= 2,
 269	BFA_IOC_E_FAILED	= 3,
 270};
 271
 272typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e);
 273
 274struct bfa_ioc_notify_s {
 275	struct list_head		qe;
 276	bfa_ioc_notify_cbfn_t	cbfn;
 277	void			*cbarg;
 278};
 279
 280/*
 281 * Initialize a IOC event notification structure
 282 */
 283#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do {	\
 284	(__notify)->cbfn = (__cbfn);      \
 285	(__notify)->cbarg = (__cbarg);      \
 286} while (0)
 287
 288struct bfa_iocpf_s {
 289	bfa_fsm_t		fsm;
 290	struct bfa_ioc_s	*ioc;
 291	bfa_boolean_t		fw_mismatch_notified;
 292	bfa_boolean_t		auto_recover;
 293	u32			poll_time;
 294};
 295
 296struct bfa_ioc_s {
 297	bfa_fsm_t		fsm;
 298	struct bfa_s		*bfa;
 299	struct bfa_pcidev_s	pcidev;
 300	struct bfa_timer_mod_s	*timer_mod;
 301	struct bfa_timer_s	ioc_timer;
 302	struct bfa_timer_s	sem_timer;
 303	struct bfa_timer_s	hb_timer;
 304	u32		hb_count;
 305	struct list_head	notify_q;
 306	void			*dbg_fwsave;
 307	int			dbg_fwsave_len;
 308	bfa_boolean_t		dbg_fwsave_once;
 309	enum bfi_pcifn_class	clscode;
 310	struct bfa_ioc_regs_s	ioc_regs;
 311	struct bfa_trc_mod_s	*trcmod;
 312	struct bfa_ioc_drv_stats_s	stats;
 313	bfa_boolean_t		fcmode;
 314	bfa_boolean_t		pllinit;
 315	bfa_boolean_t		stats_busy;	/*  outstanding stats */
 316	u8			port_id;
 317	struct bfa_dma_s	attr_dma;
 318	struct bfi_ioc_attr_s	*attr;
 319	struct bfa_ioc_cbfn_s	*cbfn;
 320	struct bfa_ioc_mbox_mod_s mbox_mod;
 321	struct bfa_ioc_hwif_s	*ioc_hwif;
 322	struct bfa_iocpf_s	iocpf;
 323	enum bfi_asic_gen	asic_gen;
 324	enum bfi_asic_mode	asic_mode;
 325	enum bfi_port_mode	port0_mode;
 326	enum bfi_port_mode	port1_mode;
 327	enum bfa_mode_s		port_mode;
 328	u8			ad_cap_bm;	/* adapter cap bit mask */
 329	u8			port_mode_cfg;	/* config port mode */
 330	int			ioc_aen_seq;
 331};
 332
 333struct bfa_ioc_hwif_s {
 334	bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
 335	bfa_boolean_t	(*ioc_firmware_lock)	(struct bfa_ioc_s *ioc);
 336	void		(*ioc_firmware_unlock)	(struct bfa_ioc_s *ioc);
 337	void		(*ioc_reg_init)	(struct bfa_ioc_s *ioc);
 338	void		(*ioc_map_port)	(struct bfa_ioc_s *ioc);
 339	void		(*ioc_isr_mode_set)	(struct bfa_ioc_s *ioc,
 340					bfa_boolean_t msix);
 341	void		(*ioc_notify_fail)	(struct bfa_ioc_s *ioc);
 342	void		(*ioc_ownership_reset)	(struct bfa_ioc_s *ioc);
 343	bfa_boolean_t   (*ioc_sync_start)       (struct bfa_ioc_s *ioc);
 344	void		(*ioc_sync_join)	(struct bfa_ioc_s *ioc);
 345	void		(*ioc_sync_leave)	(struct bfa_ioc_s *ioc);
 346	void		(*ioc_sync_ack)		(struct bfa_ioc_s *ioc);
 347	bfa_boolean_t	(*ioc_sync_complete)	(struct bfa_ioc_s *ioc);
 348	bfa_boolean_t	(*ioc_lpu_read_stat)	(struct bfa_ioc_s *ioc);
 349	void		(*ioc_set_fwstate)	(struct bfa_ioc_s *ioc,
 350					enum bfi_ioc_state fwstate);
 351	enum bfi_ioc_state	(*ioc_get_fwstate)	(struct bfa_ioc_s *ioc);
 352	void		(*ioc_set_alt_fwstate)	(struct bfa_ioc_s *ioc,
 353					enum bfi_ioc_state fwstate);
 354	enum bfi_ioc_state	(*ioc_get_alt_fwstate)	(struct bfa_ioc_s *ioc);
 355};
 356
 357/*
 358 * Queue element to wait for room in request queue. FIFO order is
 359 * maintained when fullfilling requests.
 360 */
 361struct bfa_reqq_wait_s {
 362	struct list_head	qe;
 363	void	(*qresume) (void *cbarg);
 364	void	*cbarg;
 365};
 366
 367typedef void	(*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
 368
 369/*
 370 * Generic BFA callback element.
 371 */
 372struct bfa_cb_qe_s {
 373	struct list_head	qe;
 374	bfa_cb_cbfn_t	cbfn;
 375	bfa_boolean_t	once;
 376	bfa_boolean_t	pre_rmv;	/* set for stack based qe(s) */
 377	bfa_status_t	fw_status;	/* to access fw status in comp proc */
 378	void		*cbarg;
 379};
 380
 381/*
 382 * IOCFC state machine definitions/declarations
 383 */
 384enum iocfc_event {
 385	IOCFC_E_INIT		= 1,	/* IOCFC init request		*/
 386	IOCFC_E_START		= 2,	/* IOCFC mod start request	*/
 387	IOCFC_E_STOP		= 3,	/* IOCFC stop request		*/
 388	IOCFC_E_ENABLE		= 4,	/* IOCFC enable request		*/
 389	IOCFC_E_DISABLE		= 5,	/* IOCFC disable request	*/
 390	IOCFC_E_IOC_ENABLED	= 6,	/* IOC enabled message		*/
 391	IOCFC_E_IOC_DISABLED	= 7,	/* IOC disabled message		*/
 392	IOCFC_E_IOC_FAILED	= 8,	/* failure notice by IOC sm	*/
 393	IOCFC_E_DCONF_DONE	= 9,	/* dconf read/write done	*/
 394	IOCFC_E_CFG_DONE	= 10,	/* IOCFC config complete	*/
 395};
 396
 397/*
 398 * ASIC block configurtion related
 399 */
 400
 401typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status);
 402
 403struct bfa_ablk_s {
 404	struct bfa_ioc_s	*ioc;
 405	struct bfa_ablk_cfg_s	*cfg;
 406	u16			*pcifn;
 407	struct bfa_dma_s	dma_addr;
 408	bfa_boolean_t		busy;
 409	struct bfa_mbox_cmd_s	mb;
 410	bfa_ablk_cbfn_t		cbfn;
 411	void			*cbarg;
 412	struct bfa_ioc_notify_s	ioc_notify;
 413	struct bfa_mem_dma_s	ablk_dma;
 414};
 415#define BFA_MEM_ABLK_DMA(__bfa)		(&((__bfa)->modules.ablk.ablk_dma))
 416
 417/*
 418 *	SFP module specific
 419 */
 420typedef void	(*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status);
 421
 422struct bfa_sfp_s {
 423	void	*dev;
 424	struct bfa_ioc_s	*ioc;
 425	struct bfa_trc_mod_s	*trcmod;
 426	struct sfp_mem_s	*sfpmem;
 427	bfa_cb_sfp_t		cbfn;
 428	void			*cbarg;
 429	enum bfi_sfp_mem_e	memtype; /* mem access type   */
 430	u32			status;
 431	struct bfa_mbox_cmd_s	mbcmd;
 432	u8			*dbuf_kva; /* dma buf virtual address */
 433	u64			dbuf_pa;   /* dma buf physical address */
 434	struct bfa_ioc_notify_s	ioc_notify;
 435	enum bfa_defs_sfp_media_e *media;
 436	enum bfa_port_speed	portspeed;
 437	bfa_cb_sfp_t		state_query_cbfn;
 438	void			*state_query_cbarg;
 439	u8			lock;
 440	u8			data_valid; /* data in dbuf is valid */
 441	u8			state;	    /* sfp state  */
 442	u8			state_query_lock;
 443	struct bfa_mem_dma_s	sfp_dma;
 444	u8			is_elb;	    /* eloopback  */
 445};
 446
 447#define BFA_SFP_MOD(__bfa)	(&(__bfa)->modules.sfp)
 448#define BFA_MEM_SFP_DMA(__bfa)	(&(BFA_SFP_MOD(__bfa)->sfp_dma))
 449
 450u32	bfa_sfp_meminfo(void);
 451
 452void	bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc,
 453			void *dev, struct bfa_trc_mod_s *trcmod);
 454
 455void	bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa);
 456void	bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg);
 457
 458bfa_status_t	bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
 459			     bfa_cb_sfp_t cbfn, void *cbarg);
 460
 461bfa_status_t	bfa_sfp_media(struct bfa_sfp_s *sfp,
 462			enum bfa_defs_sfp_media_e *media,
 463			bfa_cb_sfp_t cbfn, void *cbarg);
 464
 465bfa_status_t	bfa_sfp_speed(struct bfa_sfp_s *sfp,
 466			enum bfa_port_speed portspeed,
 467			bfa_cb_sfp_t cbfn, void *cbarg);
 468
 469/*
 470 *	Flash module specific
 471 */
 472typedef void	(*bfa_cb_flash_t) (void *cbarg, bfa_status_t status);
 473
 474struct bfa_flash_s {
 475	struct bfa_ioc_s *ioc;		/* back pointer to ioc */
 476	struct bfa_trc_mod_s *trcmod;
 477	u32		type;           /* partition type */
 478	u8		instance;       /* partition instance */
 479	u8		rsv[3];
 480	u32		op_busy;        /*  operation busy flag */
 481	u32		residue;        /*  residual length */
 482	u32		offset;         /*  offset */
 483	bfa_status_t	status;         /*  status */
 484	u8		*dbuf_kva;      /*  dma buf virtual address */
 485	u64		dbuf_pa;        /*  dma buf physical address */
 486	struct bfa_reqq_wait_s	reqq_wait; /*  to wait for room in reqq */
 487	bfa_cb_flash_t	cbfn;           /*  user callback function */
 488	void		*cbarg;         /*  user callback arg */
 489	u8		*ubuf;          /*  user supplied buffer */
 490	struct bfa_cb_qe_s	hcb_qe; /*  comp: BFA callback qelem */
 491	u32		addr_off;       /*  partition address offset */
 492	struct bfa_mbox_cmd_s	mb;       /*  mailbox */
 493	struct bfa_ioc_notify_s	ioc_notify; /*  ioc event notify */
 494	struct bfa_mem_dma_s	flash_dma;
 495};
 496
 497#define BFA_FLASH(__bfa)		(&(__bfa)->modules.flash)
 498#define BFA_MEM_FLASH_DMA(__bfa)	(&(BFA_FLASH(__bfa)->flash_dma))
 499
 500bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash,
 501			struct bfa_flash_attr_s *attr,
 502			bfa_cb_flash_t cbfn, void *cbarg);
 503bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash,
 504			enum bfa_flash_part_type type, u8 instance,
 505			bfa_cb_flash_t cbfn, void *cbarg);
 506bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash,
 507			enum bfa_flash_part_type type, u8 instance,
 508			void *buf, u32 len, u32 offset,
 509			bfa_cb_flash_t cbfn, void *cbarg);
 510bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash,
 511			enum bfa_flash_part_type type, u8 instance, void *buf,
 512			u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg);
 513u32	bfa_flash_meminfo(bfa_boolean_t mincfg);
 514void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
 515		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
 516void bfa_flash_memclaim(struct bfa_flash_s *flash,
 517		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
 518bfa_status_t    bfa_flash_raw_read(void __iomem *pci_bar_kva,
 519				u32 offset, char *buf, u32 len);
 520
 521/*
 522 *	DIAG module specific
 523 */
 524
 525typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status);
 526typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon,
 527			bfa_boolean_t link_e2e_beacon);
 528
 529/*
 530 *      Firmware ping test results
 531 */
 532struct bfa_diag_results_fwping {
 533	u32     data;   /* store the corrupted data */
 534	u32     status;
 535	u32     dmastatus;
 536	u8      rsvd[4];
 537};
 538
 539struct bfa_diag_qtest_result_s {
 540	u32	status;
 541	u16	count;	/* successful queue test count */
 542	u8	queue;
 543	u8	rsvd;	/* 64-bit align */
 544};
 545
 546/*
 547 * Firmware ping test results
 548 */
 549struct bfa_diag_fwping_s {
 550	struct bfa_diag_results_fwping *result;
 551	bfa_cb_diag_t  cbfn;
 552	void            *cbarg;
 553	u32             data;
 554	u8              lock;
 555	u8              rsv[3];
 556	u32             status;
 557	u32             count;
 558	struct bfa_mbox_cmd_s   mbcmd;
 559	u8              *dbuf_kva;      /* dma buf virtual address */
 560	u64             dbuf_pa;        /* dma buf physical address */
 561};
 562
 563/*
 564 *      Temperature sensor query results
 565 */
 566struct bfa_diag_results_tempsensor_s {
 567	u32     status;
 568	u16     temp;           /* 10-bit A/D value */
 569	u16     brd_temp;       /* 9-bit board temp */
 570	u8      ts_junc;        /* show junction tempsensor   */
 571	u8      ts_brd;         /* show board tempsensor      */
 572	u8      rsvd[6];        /* keep 8 bytes alignment     */
 573};
 574
 575struct bfa_diag_tsensor_s {
 576	bfa_cb_diag_t   cbfn;
 577	void            *cbarg;
 578	struct bfa_diag_results_tempsensor_s *temp;
 579	u8              lock;
 580	u8              rsv[3];
 581	u32             status;
 582	struct bfa_mbox_cmd_s   mbcmd;
 583};
 584
 585struct bfa_diag_sfpshow_s {
 586	struct sfp_mem_s        *sfpmem;
 587	bfa_cb_diag_t           cbfn;
 588	void                    *cbarg;
 589	u8      lock;
 590	u8      static_data;
 591	u8      rsv[2];
 592	u32     status;
 593	struct bfa_mbox_cmd_s    mbcmd;
 594	u8      *dbuf_kva;      /* dma buf virtual address */
 595	u64     dbuf_pa;        /* dma buf physical address */
 596};
 597
 598struct bfa_diag_led_s {
 599	struct bfa_mbox_cmd_s   mbcmd;
 600	bfa_boolean_t   lock;   /* 1: ledtest is operating */
 601};
 602
 603struct bfa_diag_beacon_s {
 604	struct bfa_mbox_cmd_s   mbcmd;
 605	bfa_boolean_t   state;          /* port beacon state */
 606	bfa_boolean_t   link_e2e;       /* link beacon state */
 607};
 608
 609struct bfa_diag_s {
 610	void	*dev;
 611	struct bfa_ioc_s		*ioc;
 612	struct bfa_trc_mod_s		*trcmod;
 613	struct bfa_diag_fwping_s	fwping;
 614	struct bfa_diag_tsensor_s	tsensor;
 615	struct bfa_diag_sfpshow_s	sfpshow;
 616	struct bfa_diag_led_s		ledtest;
 617	struct bfa_diag_beacon_s	beacon;
 618	void	*result;
 619	struct bfa_timer_s timer;
 620	bfa_cb_diag_beacon_t  cbfn_beacon;
 621	bfa_cb_diag_t  cbfn;
 622	void		*cbarg;
 623	u8		block;
 624	u8		timer_active;
 625	u8		rsvd[2];
 626	u32		status;
 627	struct bfa_ioc_notify_s	ioc_notify;
 628	struct bfa_mem_dma_s	diag_dma;
 629};
 630
 631#define BFA_DIAG_MOD(__bfa)     (&(__bfa)->modules.diag_mod)
 632#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
 633
 634u32	bfa_diag_meminfo(void);
 635void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa);
 636void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
 637		     bfa_cb_diag_beacon_t cbfn_beacon,
 638		     struct bfa_trc_mod_s *trcmod);
 639bfa_status_t	bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset,
 640			u32 len, u32 *buf, u32 force);
 641bfa_status_t	bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset,
 642			u32 len, u32 value, u32 force);
 643bfa_status_t	bfa_diag_tsensor_query(struct bfa_diag_s *diag,
 644			struct bfa_diag_results_tempsensor_s *result,
 645			bfa_cb_diag_t cbfn, void *cbarg);
 646bfa_status_t	bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt,
 647			u32 pattern, struct bfa_diag_results_fwping *result,
 648			bfa_cb_diag_t cbfn, void *cbarg);
 649bfa_status_t	bfa_diag_sfpshow(struct bfa_diag_s *diag,
 650			struct sfp_mem_s *sfpmem, u8 static_data,
 651			bfa_cb_diag_t cbfn, void *cbarg);
 652bfa_status_t	bfa_diag_memtest(struct bfa_diag_s *diag,
 653			struct bfa_diag_memtest_s *memtest, u32 pattern,
 654			struct bfa_diag_memtest_result *result,
 655			bfa_cb_diag_t cbfn, void *cbarg);
 656bfa_status_t	bfa_diag_ledtest(struct bfa_diag_s *diag,
 657			struct bfa_diag_ledtest_s *ledtest);
 658bfa_status_t	bfa_diag_beacon_port(struct bfa_diag_s *diag,
 659			bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon,
 660			u32 sec);
 661
 662/*
 663 *	PHY module specific
 664 */
 665typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status);
 666
 667struct bfa_phy_s {
 668	struct bfa_ioc_s *ioc;          /* back pointer to ioc */
 669	struct bfa_trc_mod_s *trcmod;   /* trace module */
 670	u8	instance;       /* port instance */
 671	u8	op_busy;        /* operation busy flag */
 672	u8	rsv[2];
 673	u32	residue;        /* residual length */
 674	u32	offset;         /* offset */
 675	bfa_status_t	status;         /* status */
 676	u8	*dbuf_kva;      /* dma buf virtual address */
 677	u64	dbuf_pa;        /* dma buf physical address */
 678	struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
 679	bfa_cb_phy_t	cbfn;           /* user callback function */
 680	void		*cbarg;         /* user callback arg */
 681	u8		*ubuf;          /* user supplied buffer */
 682	struct bfa_cb_qe_s	hcb_qe; /* comp: BFA callback qelem */
 683	u32	addr_off;       /* phy address offset */
 684	struct bfa_mbox_cmd_s	mb;       /* mailbox */
 685	struct bfa_ioc_notify_s	ioc_notify; /* ioc event notify */
 686	struct bfa_mem_dma_s	phy_dma;
 687};
 688#define BFA_PHY(__bfa)	(&(__bfa)->modules.phy)
 689#define BFA_MEM_PHY_DMA(__bfa)	(&(BFA_PHY(__bfa)->phy_dma))
 690
 691bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc);
 692bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
 693			struct bfa_phy_attr_s *attr,
 694			bfa_cb_phy_t cbfn, void *cbarg);
 695bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
 696			struct bfa_phy_stats_s *stats,
 697			bfa_cb_phy_t cbfn, void *cbarg);
 698bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
 699			void *buf, u32 len, u32 offset,
 700			bfa_cb_phy_t cbfn, void *cbarg);
 701bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
 702			void *buf, u32 len, u32 offset,
 703			bfa_cb_phy_t cbfn, void *cbarg);
 704
 705u32	bfa_phy_meminfo(bfa_boolean_t mincfg);
 706void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc,
 707		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
 708void bfa_phy_memclaim(struct bfa_phy_s *phy,
 709		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
 710void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
 711
 712/*
 713 * FRU module specific
 714 */
 715typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status);
 716
 717struct bfa_fru_s {
 718	struct bfa_ioc_s *ioc;		/* back pointer to ioc */
 719	struct bfa_trc_mod_s *trcmod;	/* trace module */
 720	u8		op_busy;	/* operation busy flag */
 721	u8		rsv[3];
 722	u32		residue;	/* residual length */
 723	u32		offset;		/* offset */
 724	bfa_status_t	status;		/* status */
 725	u8		*dbuf_kva;	/* dma buf virtual address */
 726	u64		dbuf_pa;	/* dma buf physical address */
 727	struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
 728	bfa_cb_fru_t	cbfn;		/* user callback function */
 729	void		*cbarg;		/* user callback arg */
 730	u8		*ubuf;		/* user supplied buffer */
 731	struct bfa_cb_qe_s	hcb_qe;	/* comp: BFA callback qelem */
 732	u32		addr_off;	/* fru address offset */
 733	struct bfa_mbox_cmd_s mb;	/* mailbox */
 734	struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
 735	struct bfa_mem_dma_s	fru_dma;
 736	u8		trfr_cmpl;
 737};
 738
 739#define BFA_FRU(__bfa)	(&(__bfa)->modules.fru)
 740#define BFA_MEM_FRU_DMA(__bfa)	(&(BFA_FRU(__bfa)->fru_dma))
 741
 742bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru,
 743			void *buf, u32 len, u32 offset,
 744			bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl);
 745bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru,
 746			void *buf, u32 len, u32 offset,
 747			bfa_cb_fru_t cbfn, void *cbarg);
 748bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size);
 749bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru,
 750			void *buf, u32 len, u32 offset,
 751			bfa_cb_fru_t cbfn, void *cbarg);
 752bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru,
 753			void *buf, u32 len, u32 offset,
 754			bfa_cb_fru_t cbfn, void *cbarg);
 755u32	bfa_fru_meminfo(bfa_boolean_t mincfg);
 756void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc,
 757		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
 758void bfa_fru_memclaim(struct bfa_fru_s *fru,
 759		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
 760void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg);
 761
 762/*
 763 * Driver Config( dconf) specific
 764 */
 765#define BFI_DCONF_SIGNATURE	0xabcdabcd
 766#define BFI_DCONF_VERSION	1
 767
 768#pragma pack(1)
 769struct bfa_dconf_hdr_s {
 770	u32	signature;
 771	u32	version;
 772};
 773
 774struct bfa_dconf_s {
 775	struct bfa_dconf_hdr_s		hdr;
 776	struct bfa_lunmask_cfg_s	lun_mask;
 777	struct bfa_throttle_cfg_s	throttle_cfg;
 778};
 779#pragma pack()
 780
 781struct bfa_dconf_mod_s {
 782	bfa_sm_t		sm;
 783	u8			instance;
 784	bfa_boolean_t		read_data_valid;
 785	bfa_boolean_t		min_cfg;
 786	struct bfa_timer_s	timer;
 787	struct bfa_s		*bfa;
 788	void			*bfad;
 789	void			*trcmod;
 790	struct bfa_dconf_s	*dconf;
 791	struct bfa_mem_kva_s	kva_seg;
 792};
 793
 794#define BFA_DCONF_MOD(__bfa)	\
 795	(&(__bfa)->modules.dconf_mod)
 796#define BFA_MEM_DCONF_KVA(__bfa)	(&(BFA_DCONF_MOD(__bfa)->kva_seg))
 797#define bfa_dconf_read_data_valid(__bfa)	\
 798	(BFA_DCONF_MOD(__bfa)->read_data_valid)
 799#define BFA_DCONF_UPDATE_TOV	5000	/* memtest timeout in msec */
 800#define bfa_dconf_get_min_cfg(__bfa)	\
 801	(BFA_DCONF_MOD(__bfa)->min_cfg)
 802
 803void	bfa_dconf_modinit(struct bfa_s *bfa);
 804void	bfa_dconf_modexit(struct bfa_s *bfa);
 805bfa_status_t	bfa_dconf_update(struct bfa_s *bfa);
 806
 807/*
 808 *	IOC specfic macros
 809 */
 810#define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
 811#define bfa_ioc_devid(__ioc)		((__ioc)->pcidev.device_id)
 812#define bfa_ioc_bar0(__ioc)		((__ioc)->pcidev.pci_bar_kva)
 813#define bfa_ioc_portid(__ioc)		((__ioc)->port_id)
 814#define bfa_ioc_asic_gen(__ioc)		((__ioc)->asic_gen)
 815#define bfa_ioc_is_cna(__ioc)	\
 816	((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) ||	\
 817	 (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
 818#define bfa_ioc_fetch_stats(__ioc, __stats) \
 819		(((__stats)->drv_stats) = (__ioc)->stats)
 820#define bfa_ioc_clr_stats(__ioc)	\
 821		memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
 822#define bfa_ioc_maxfrsize(__ioc)	((__ioc)->attr->maxfrsize)
 823#define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 824#define bfa_ioc_speed_sup(__ioc)	\
 825	((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS :	\
 826	 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop))
 827#define bfa_ioc_get_nports(__ioc)	\
 828	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
 829
 830#define bfa_ioc_stats(_ioc, _stats)	((_ioc)->stats._stats++)
 831#define BFA_IOC_FWIMG_MINSZ	(16 * 1024)
 832#define BFA_IOC_FW_SMEM_SIZE(__ioc)			\
 833	((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB)	\
 834	 ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
 835#define BFA_IOC_FLASH_CHUNK_NO(off)		(off / BFI_FLASH_CHUNK_SZ_WORDS)
 836#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)	(off % BFI_FLASH_CHUNK_SZ_WORDS)
 837#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
 838
 839/*
 840 * IOC mailbox interface
 841 */
 842void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
 843void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
 844		bfa_ioc_mbox_mcfunc_t *mcfuncs);
 845void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
 846void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
 847bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
 848void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
 849		bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
 850
 851/*
 852 * IOC interfaces
 853 */
 854
 855#define bfa_ioc_pll_init_asic(__ioc) \
 856	((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
 857			   (__ioc)->asic_mode))
 858
 859bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
 860bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
 861bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
 862bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
 863
 864#define bfa_ioc_isr_mode_set(__ioc, __msix) do {			\
 865	if ((__ioc)->ioc_hwif->ioc_isr_mode_set)			\
 866		((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix));	\
 867} while (0)
 868#define	bfa_ioc_ownership_reset(__ioc)				\
 869			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
 870#define bfa_ioc_get_fcmode(__ioc)	((__ioc)->fcmode)
 871#define bfa_ioc_lpu_read_stat(__ioc) do {			\
 872	if ((__ioc)->ioc_hwif->ioc_lpu_read_stat)		\
 873		((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc));	\
 874} while (0)
 875
 876void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
 877void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
 878void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc);
 879void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc);
 880
 881void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
 882		struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
 883void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
 884void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 885void bfa_ioc_suspend(struct bfa_ioc_s *ioc);
 886void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 887		enum bfi_pcifn_class clscode);
 888void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
 889void bfa_ioc_enable(struct bfa_ioc_s *ioc);
 890void bfa_ioc_disable(struct bfa_ioc_s *ioc);
 891bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
 892
 893bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
 894		u32 boot_env);
 895void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
 896void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
 897bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
 898bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
 899bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
 900bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc);
 901bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 902bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
 903void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
 904enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
 905void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
 906void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
 907void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
 908void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
 909void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
 910		char *manufacturer);
 911void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
 912enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
 913
 914void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
 915void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 916		struct bfa_adapter_attr_s *ad_attr);
 917void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
 918bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
 919		int *trclen);
 920bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
 921				 int *trclen);
 922bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
 923	u32 *offset, int *buflen);
 924bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc);
 925bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
 926void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
 927			struct bfi_ioc_image_hdr_s *fwhdr);
 928bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
 929			struct bfi_ioc_image_hdr_s *fwhdr);
 930void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
 931bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
 932bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
 933void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
 934
 935/*
 936 * asic block configuration related APIs
 937 */
 938u32	bfa_ablk_meminfo(void);
 939void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa);
 940void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc);
 941bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk,
 942		struct bfa_ablk_cfg_s *ablk_cfg,
 943		bfa_ablk_cbfn_t cbfn, void *cbarg);
 944bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk,
 945		enum bfa_mode_s mode, int max_pf, int max_vf,
 946		bfa_ablk_cbfn_t cbfn, void *cbarg);
 947bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
 948		enum bfa_mode_s mode, int max_pf, int max_vf,
 949		bfa_ablk_cbfn_t cbfn, void *cbarg);
 950bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
 951		u8 port, enum bfi_pcifn_class personality,
 952		u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
 953bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
 954		bfa_ablk_cbfn_t cbfn, void *cbarg);
 955bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn,
 956		u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
 957bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
 958		bfa_ablk_cbfn_t cbfn, void *cbarg);
 959bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
 960		bfa_ablk_cbfn_t cbfn, void *cbarg);
 961
 962bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
 963				u32 *fwimg);
 964/*
 965 * bfa mfg wwn API functions
 966 */
 967mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
 968mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
 969
 970/*
 971 * F/W Image Size & Chunk
 972 */
 973extern u32 bfi_image_cb_size;
 974extern u32 bfi_image_ct_size;
 975extern u32 bfi_image_ct2_size;
 976extern u32 *bfi_image_cb;
 977extern u32 *bfi_image_ct;
 978extern u32 *bfi_image_ct2;
 979
 980static inline u32 *
 981bfi_image_cb_get_chunk(u32 off)
 982{
 983	return (u32 *)(bfi_image_cb + off);
 984}
 985
 986static inline u32 *
 987bfi_image_ct_get_chunk(u32 off)
 988{
 989	return (u32 *)(bfi_image_ct + off);
 990}
 991
 992static inline u32 *
 993bfi_image_ct2_get_chunk(u32 off)
 994{
 995	return (u32 *)(bfi_image_ct2 + off);
 996}
 997
 998static inline u32*
 999bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
1000{
1001	switch (asic_gen) {
1002	case BFI_ASIC_GEN_CB:
1003		return bfi_image_cb_get_chunk(off);
1004		break;
1005	case BFI_ASIC_GEN_CT:
1006		return bfi_image_ct_get_chunk(off);
1007		break;
1008	case BFI_ASIC_GEN_CT2:
1009		return bfi_image_ct2_get_chunk(off);
1010		break;
1011	default:
1012		return NULL;
1013	}
1014}
1015
1016static inline u32
1017bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
1018{
1019	switch (asic_gen) {
1020	case BFI_ASIC_GEN_CB:
1021		return bfi_image_cb_size;
1022		break;
1023	case BFI_ASIC_GEN_CT:
1024		return bfi_image_ct_size;
1025		break;
1026	case BFI_ASIC_GEN_CT2:
1027		return bfi_image_ct2_size;
1028		break;
1029	default:
1030		return 0;
1031	}
1032}
1033
1034/*
1035 * CNA TRCMOD declaration
1036 */
1037/*
1038 * !!! Only append to the enums defined here to avoid any versioning
1039 * !!! needed between trace utility and driver version
1040 */
1041enum {
1042	BFA_TRC_CNA_PORT	= 1,
1043	BFA_TRC_CNA_IOC		= 2,
1044	BFA_TRC_CNA_IOC_CB	= 3,
1045	BFA_TRC_CNA_IOC_CT	= 4,
1046};
1047
1048#endif /* __BFA_IOC_H__ */
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   4 * Copyright (c) 2014- QLogic Corporation.
   5 * All rights reserved
   6 * www.qlogic.com
   7 *
   8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   9 */
  10
  11#ifndef __BFA_IOC_H__
  12#define __BFA_IOC_H__
  13
  14#include "bfad_drv.h"
  15#include "bfa_cs.h"
  16#include "bfi.h"
  17
  18#define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
  19#define BFA_DBG_FWTRC_LEN					\
  20	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
  21	(sizeof(struct bfa_trc_mod_s) -				\
  22	BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
  23/*
  24 * BFA timer declarations
  25 */
  26typedef void (*bfa_timer_cbfn_t)(void *);
  27
  28/*
  29 * BFA timer data structure
  30 */
  31struct bfa_timer_s {
  32	struct list_head	qe;
  33	bfa_timer_cbfn_t timercb;
  34	void		*arg;
  35	int		timeout;	/* in millisecs */
  36};
  37
  38/*
  39 * Timer module structure
  40 */
  41struct bfa_timer_mod_s {
  42	struct list_head timer_q;
  43};
  44
  45#define BFA_TIMER_FREQ 200 /* specified in millisecs */
  46
  47void bfa_timer_beat(struct bfa_timer_mod_s *mod);
  48void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  49			bfa_timer_cbfn_t timercb, void *arg,
  50			unsigned int timeout);
  51void bfa_timer_stop(struct bfa_timer_s *timer);
  52
  53/*
  54 * Generic Scatter Gather Element used by driver
  55 */
  56struct bfa_sge_s {
  57	u32	sg_len;
  58	void		*sg_addr;
  59};
  60
  61#define bfa_sge_word_swap(__sge) do {					     \
  62	((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]);      \
  63	((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]);      \
  64	((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]);      \
  65} while (0)
  66
  67#define bfa_swap_words(_x)  (	\
  68	((u64)(_x) << 32) | ((u64)(_x) >> 32))
  69
  70#ifdef __BIG_ENDIAN
  71#define bfa_sge_to_be(_x)
  72#define bfa_sge_to_le(_x)	bfa_sge_word_swap(_x)
  73#define bfa_sgaddr_le(_x)	bfa_swap_words(_x)
  74#else
  75#define	bfa_sge_to_be(_x)	bfa_sge_word_swap(_x)
  76#define bfa_sge_to_le(_x)
  77#define bfa_sgaddr_le(_x)	(_x)
  78#endif
  79
  80/*
  81 * BFA memory resources
  82 */
  83struct bfa_mem_dma_s {
  84	struct list_head qe;		/* Queue of DMA elements */
  85	u32		mem_len;	/* Total Length in Bytes */
  86	u8		*kva;		/* kernel virtual address */
  87	u64		dma;		/* dma address if DMA memory */
  88	u8		*kva_curp;	/* kva allocation cursor */
  89	u64		dma_curp;	/* dma allocation cursor */
  90};
  91#define bfa_mem_dma_t struct bfa_mem_dma_s
  92
  93struct bfa_mem_kva_s {
  94	struct list_head qe;		/* Queue of KVA elements */
  95	u32		mem_len;	/* Total Length in Bytes */
  96	u8		*kva;		/* kernel virtual address */
  97	u8		*kva_curp;	/* kva allocation cursor */
  98};
  99#define bfa_mem_kva_t struct bfa_mem_kva_s
 100
 101struct bfa_meminfo_s {
 102	struct bfa_mem_dma_s dma_info;
 103	struct bfa_mem_kva_s kva_info;
 104};
 105
 106/* BFA memory segment setup helpers */
 107static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo,
 108				     struct bfa_mem_dma_s *dm_ptr,
 109				     size_t seg_sz)
 110{
 111	dm_ptr->mem_len = seg_sz;
 112	if (seg_sz)
 113		list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe);
 114}
 115
 116static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo,
 117				     struct bfa_mem_kva_s *kva_ptr,
 118				     size_t seg_sz)
 119{
 120	kva_ptr->mem_len = seg_sz;
 121	if (seg_sz)
 122		list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe);
 123}
 124
 125/* BFA dma memory segments iterator */
 126#define bfa_mem_dma_sptr(_mod, _i)	(&(_mod)->dma_seg[(_i)])
 127#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i)			\
 128	for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr);	\
 129	     _i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
 130
 131#define bfa_mem_kva_curp(_mod)	((_mod)->kva_seg.kva_curp)
 132#define bfa_mem_dma_virt(_sptr)	((_sptr)->kva_curp)
 133#define bfa_mem_dma_phys(_sptr)	((_sptr)->dma_curp)
 134#define bfa_mem_dma_len(_sptr)	((_sptr)->mem_len)
 135
 136/* Get the corresponding dma buf kva for a req - from the tag */
 137#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz)			      \
 138	(((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
 139	 BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
 140
 141/* Get the corresponding dma buf pa for a req - from the tag */
 142#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz)			\
 143	((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp +	\
 144	 BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
 145
 146/*
 147 * PCI device information required by IOC
 148 */
 149struct bfa_pcidev_s {
 150	int		pci_slot;
 151	u8		pci_func;
 152	u16		device_id;
 153	u16		ssid;
 154	void __iomem	*pci_bar_kva;
 155};
 156
 157/*
 158 * Structure used to remember the DMA-able memory block's KVA and Physical
 159 * Address
 160 */
 161struct bfa_dma_s {
 162	void		*kva;	/* ! Kernel virtual address	*/
 163	u64	pa;	/* ! Physical address		*/
 164};
 165
 166#define BFA_DMA_ALIGN_SZ	256
 167#define BFA_ROUNDUP(_l, _s)	(((_l) + ((_s) - 1)) & ~((_s) - 1))
 168
 169/*
 170 * smem size for Crossbow and Catapult
 171 */
 172#define BFI_SMEM_CB_SIZE	0x200000U	/* ! 2MB for crossbow	*/
 173#define BFI_SMEM_CT_SIZE	0x280000U	/* ! 2.5MB for catapult	*/
 174
 175#define bfa_dma_be_addr_set(dma_addr, pa)	\
 176		__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
 177static inline void
 178__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 179{
 180	dma_addr->a32.addr_lo = cpu_to_be32(pa);
 181	dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
 182}
 183
 184#define bfa_alen_set(__alen, __len, __pa)	\
 185	__bfa_alen_set(__alen, __len, (u64)__pa)
 186
 187static inline void
 188__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa)
 189{
 190	alen->al_len = cpu_to_be32(len);
 191	bfa_dma_be_addr_set(alen->al_addr, pa);
 192}
 193
 194struct bfa_ioc_regs_s {
 195	void __iomem *hfn_mbox_cmd;
 196	void __iomem *hfn_mbox;
 197	void __iomem *lpu_mbox_cmd;
 198	void __iomem *lpu_mbox;
 199	void __iomem *lpu_read_stat;
 200	void __iomem *pss_ctl_reg;
 201	void __iomem *pss_err_status_reg;
 202	void __iomem *app_pll_fast_ctl_reg;
 203	void __iomem *app_pll_slow_ctl_reg;
 204	void __iomem *ioc_sem_reg;
 205	void __iomem *ioc_usage_sem_reg;
 206	void __iomem *ioc_init_sem_reg;
 207	void __iomem *ioc_usage_reg;
 208	void __iomem *host_page_num_fn;
 209	void __iomem *heartbeat;
 210	void __iomem *ioc_fwstate;
 211	void __iomem *alt_ioc_fwstate;
 212	void __iomem *ll_halt;
 213	void __iomem *alt_ll_halt;
 214	void __iomem *err_set;
 215	void __iomem *ioc_fail_sync;
 216	void __iomem *shirq_isr_next;
 217	void __iomem *shirq_msk_next;
 218	void __iomem *smem_page_start;
 219	u32	smem_pg0;
 220};
 221
 222#define bfa_mem_read(_raddr, _off)	swab32(readl(((_raddr) + (_off))))
 223#define bfa_mem_write(_raddr, _off, _val)	\
 224			writel(swab32((_val)), ((_raddr) + (_off)))
 225/*
 226 * IOC Mailbox structures
 227 */
 228struct bfa_mbox_cmd_s {
 229	struct list_head	qe;
 230	u32	msg[BFI_IOC_MSGSZ];
 231};
 232
 233/*
 234 * IOC mailbox module
 235 */
 236typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
 237struct bfa_ioc_mbox_mod_s {
 238	struct list_head		cmd_q;	/*  pending mbox queue	*/
 239	int			nmclass;	/*  number of handlers */
 240	struct {
 241		bfa_ioc_mbox_mcfunc_t	cbfn;	/*  message handlers	*/
 242		void			*cbarg;
 243	} mbhdlr[BFI_MC_MAX];
 244};
 245
 246/*
 247 * IOC callback function interfaces
 248 */
 249typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
 250typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
 251typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
 252typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
 253struct bfa_ioc_cbfn_s {
 254	bfa_ioc_enable_cbfn_t	enable_cbfn;
 255	bfa_ioc_disable_cbfn_t	disable_cbfn;
 256	bfa_ioc_hbfail_cbfn_t	hbfail_cbfn;
 257	bfa_ioc_reset_cbfn_t	reset_cbfn;
 258};
 259
 260/*
 261 * IOC event notification mechanism.
 262 */
 263enum bfa_ioc_event_e {
 264	BFA_IOC_E_ENABLED	= 1,
 265	BFA_IOC_E_DISABLED	= 2,
 266	BFA_IOC_E_FAILED	= 3,
 267};
 268
 269typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e);
 270
 271struct bfa_ioc_notify_s {
 272	struct list_head		qe;
 273	bfa_ioc_notify_cbfn_t	cbfn;
 274	void			*cbarg;
 275};
 276
 277/*
 278 * Initialize a IOC event notification structure
 279 */
 280#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do {	\
 281	(__notify)->cbfn = (__cbfn);      \
 282	(__notify)->cbarg = (__cbarg);      \
 283} while (0)
 284
 285struct bfa_iocpf_s {
 286	bfa_fsm_t		fsm;
 287	struct bfa_ioc_s	*ioc;
 288	bfa_boolean_t		fw_mismatch_notified;
 289	bfa_boolean_t		auto_recover;
 290	u32			poll_time;
 291};
 292
 293struct bfa_ioc_s {
 294	bfa_fsm_t		fsm;
 295	struct bfa_s		*bfa;
 296	struct bfa_pcidev_s	pcidev;
 297	struct bfa_timer_mod_s	*timer_mod;
 298	struct bfa_timer_s	ioc_timer;
 299	struct bfa_timer_s	sem_timer;
 300	struct bfa_timer_s	hb_timer;
 301	u32		hb_count;
 302	struct list_head	notify_q;
 303	void			*dbg_fwsave;
 304	int			dbg_fwsave_len;
 305	bfa_boolean_t		dbg_fwsave_once;
 306	enum bfi_pcifn_class	clscode;
 307	struct bfa_ioc_regs_s	ioc_regs;
 308	struct bfa_trc_mod_s	*trcmod;
 309	struct bfa_ioc_drv_stats_s	stats;
 310	bfa_boolean_t		fcmode;
 311	bfa_boolean_t		pllinit;
 312	bfa_boolean_t		stats_busy;	/*  outstanding stats */
 313	u8			port_id;
 314	struct bfa_dma_s	attr_dma;
 315	struct bfi_ioc_attr_s	*attr;
 316	struct bfa_ioc_cbfn_s	*cbfn;
 317	struct bfa_ioc_mbox_mod_s mbox_mod;
 318	struct bfa_ioc_hwif_s	*ioc_hwif;
 319	struct bfa_iocpf_s	iocpf;
 320	enum bfi_asic_gen	asic_gen;
 321	enum bfi_asic_mode	asic_mode;
 322	enum bfi_port_mode	port0_mode;
 323	enum bfi_port_mode	port1_mode;
 324	enum bfa_mode_s		port_mode;
 325	u8			ad_cap_bm;	/* adapter cap bit mask */
 326	u8			port_mode_cfg;	/* config port mode */
 327	int			ioc_aen_seq;
 328};
 329
 330struct bfa_ioc_hwif_s {
 331	bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
 332	bfa_boolean_t	(*ioc_firmware_lock)	(struct bfa_ioc_s *ioc);
 333	void		(*ioc_firmware_unlock)	(struct bfa_ioc_s *ioc);
 334	void		(*ioc_reg_init)	(struct bfa_ioc_s *ioc);
 335	void		(*ioc_map_port)	(struct bfa_ioc_s *ioc);
 336	void		(*ioc_isr_mode_set)	(struct bfa_ioc_s *ioc,
 337					bfa_boolean_t msix);
 338	void		(*ioc_notify_fail)	(struct bfa_ioc_s *ioc);
 339	void		(*ioc_ownership_reset)	(struct bfa_ioc_s *ioc);
 340	bfa_boolean_t   (*ioc_sync_start)       (struct bfa_ioc_s *ioc);
 341	void		(*ioc_sync_join)	(struct bfa_ioc_s *ioc);
 342	void		(*ioc_sync_leave)	(struct bfa_ioc_s *ioc);
 343	void		(*ioc_sync_ack)		(struct bfa_ioc_s *ioc);
 344	bfa_boolean_t	(*ioc_sync_complete)	(struct bfa_ioc_s *ioc);
 345	bfa_boolean_t	(*ioc_lpu_read_stat)	(struct bfa_ioc_s *ioc);
 346	void		(*ioc_set_fwstate)	(struct bfa_ioc_s *ioc,
 347					enum bfi_ioc_state fwstate);
 348	enum bfi_ioc_state	(*ioc_get_fwstate)	(struct bfa_ioc_s *ioc);
 349	void		(*ioc_set_alt_fwstate)	(struct bfa_ioc_s *ioc,
 350					enum bfi_ioc_state fwstate);
 351	enum bfi_ioc_state	(*ioc_get_alt_fwstate)	(struct bfa_ioc_s *ioc);
 352};
 353
 354/*
 355 * Queue element to wait for room in request queue. FIFO order is
 356 * maintained when fullfilling requests.
 357 */
 358struct bfa_reqq_wait_s {
 359	struct list_head	qe;
 360	void	(*qresume) (void *cbarg);
 361	void	*cbarg;
 362};
 363
 364typedef void	(*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
 365
 366/*
 367 * Generic BFA callback element.
 368 */
 369struct bfa_cb_qe_s {
 370	struct list_head	qe;
 371	bfa_cb_cbfn_t	cbfn;
 372	bfa_boolean_t	once;
 373	bfa_boolean_t	pre_rmv;	/* set for stack based qe(s) */
 374	bfa_status_t	fw_status;	/* to access fw status in comp proc */
 375	void		*cbarg;
 376};
 377
 378/*
 379 * IOCFC state machine definitions/declarations
 380 */
 381enum iocfc_event {
 382	IOCFC_E_INIT		= 1,	/* IOCFC init request		*/
 383	IOCFC_E_START		= 2,	/* IOCFC mod start request	*/
 384	IOCFC_E_STOP		= 3,	/* IOCFC stop request		*/
 385	IOCFC_E_ENABLE		= 4,	/* IOCFC enable request		*/
 386	IOCFC_E_DISABLE		= 5,	/* IOCFC disable request	*/
 387	IOCFC_E_IOC_ENABLED	= 6,	/* IOC enabled message		*/
 388	IOCFC_E_IOC_DISABLED	= 7,	/* IOC disabled message		*/
 389	IOCFC_E_IOC_FAILED	= 8,	/* failure notice by IOC sm	*/
 390	IOCFC_E_DCONF_DONE	= 9,	/* dconf read/write done	*/
 391	IOCFC_E_CFG_DONE	= 10,	/* IOCFC config complete	*/
 392};
 393
 394/*
 395 * ASIC block configurtion related
 396 */
 397
 398typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status);
 399
 400struct bfa_ablk_s {
 401	struct bfa_ioc_s	*ioc;
 402	struct bfa_ablk_cfg_s	*cfg;
 403	u16			*pcifn;
 404	struct bfa_dma_s	dma_addr;
 405	bfa_boolean_t		busy;
 406	struct bfa_mbox_cmd_s	mb;
 407	bfa_ablk_cbfn_t		cbfn;
 408	void			*cbarg;
 409	struct bfa_ioc_notify_s	ioc_notify;
 410	struct bfa_mem_dma_s	ablk_dma;
 411};
 412#define BFA_MEM_ABLK_DMA(__bfa)		(&((__bfa)->modules.ablk.ablk_dma))
 413
 414/*
 415 *	SFP module specific
 416 */
 417typedef void	(*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status);
 418
 419struct bfa_sfp_s {
 420	void	*dev;
 421	struct bfa_ioc_s	*ioc;
 422	struct bfa_trc_mod_s	*trcmod;
 423	struct sfp_mem_s	*sfpmem;
 424	bfa_cb_sfp_t		cbfn;
 425	void			*cbarg;
 426	enum bfi_sfp_mem_e	memtype; /* mem access type   */
 427	u32			status;
 428	struct bfa_mbox_cmd_s	mbcmd;
 429	u8			*dbuf_kva; /* dma buf virtual address */
 430	u64			dbuf_pa;   /* dma buf physical address */
 431	struct bfa_ioc_notify_s	ioc_notify;
 432	enum bfa_defs_sfp_media_e *media;
 433	enum bfa_port_speed	portspeed;
 434	bfa_cb_sfp_t		state_query_cbfn;
 435	void			*state_query_cbarg;
 436	u8			lock;
 437	u8			data_valid; /* data in dbuf is valid */
 438	u8			state;	    /* sfp state  */
 439	u8			state_query_lock;
 440	struct bfa_mem_dma_s	sfp_dma;
 441	u8			is_elb;	    /* eloopback  */
 442};
 443
 444#define BFA_SFP_MOD(__bfa)	(&(__bfa)->modules.sfp)
 445#define BFA_MEM_SFP_DMA(__bfa)	(&(BFA_SFP_MOD(__bfa)->sfp_dma))
 446
 447u32	bfa_sfp_meminfo(void);
 448
 449void	bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc,
 450			void *dev, struct bfa_trc_mod_s *trcmod);
 451
 452void	bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa);
 453void	bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg);
 454
 455bfa_status_t	bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
 456			     bfa_cb_sfp_t cbfn, void *cbarg);
 457
 458bfa_status_t	bfa_sfp_media(struct bfa_sfp_s *sfp,
 459			enum bfa_defs_sfp_media_e *media,
 460			bfa_cb_sfp_t cbfn, void *cbarg);
 461
 462bfa_status_t	bfa_sfp_speed(struct bfa_sfp_s *sfp,
 463			enum bfa_port_speed portspeed,
 464			bfa_cb_sfp_t cbfn, void *cbarg);
 465
 466/*
 467 *	Flash module specific
 468 */
 469typedef void	(*bfa_cb_flash_t) (void *cbarg, bfa_status_t status);
 470
 471struct bfa_flash_s {
 472	struct bfa_ioc_s *ioc;		/* back pointer to ioc */
 473	struct bfa_trc_mod_s *trcmod;
 474	u32		type;           /* partition type */
 475	u8		instance;       /* partition instance */
 476	u8		rsv[3];
 477	u32		op_busy;        /*  operation busy flag */
 478	u32		residue;        /*  residual length */
 479	u32		offset;         /*  offset */
 480	bfa_status_t	status;         /*  status */
 481	u8		*dbuf_kva;      /*  dma buf virtual address */
 482	u64		dbuf_pa;        /*  dma buf physical address */
 483	struct bfa_reqq_wait_s	reqq_wait; /*  to wait for room in reqq */
 484	bfa_cb_flash_t	cbfn;           /*  user callback function */
 485	void		*cbarg;         /*  user callback arg */
 486	u8		*ubuf;          /*  user supplied buffer */
 487	struct bfa_cb_qe_s	hcb_qe; /*  comp: BFA callback qelem */
 488	u32		addr_off;       /*  partition address offset */
 489	struct bfa_mbox_cmd_s	mb;       /*  mailbox */
 490	struct bfa_ioc_notify_s	ioc_notify; /*  ioc event notify */
 491	struct bfa_mem_dma_s	flash_dma;
 492};
 493
 494#define BFA_FLASH(__bfa)		(&(__bfa)->modules.flash)
 495#define BFA_MEM_FLASH_DMA(__bfa)	(&(BFA_FLASH(__bfa)->flash_dma))
 496
 497bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash,
 498			struct bfa_flash_attr_s *attr,
 499			bfa_cb_flash_t cbfn, void *cbarg);
 500bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash,
 501			enum bfa_flash_part_type type, u8 instance,
 502			bfa_cb_flash_t cbfn, void *cbarg);
 503bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash,
 504			enum bfa_flash_part_type type, u8 instance,
 505			void *buf, u32 len, u32 offset,
 506			bfa_cb_flash_t cbfn, void *cbarg);
 507bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash,
 508			enum bfa_flash_part_type type, u8 instance, void *buf,
 509			u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg);
 510u32	bfa_flash_meminfo(bfa_boolean_t mincfg);
 511void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
 512		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
 513void bfa_flash_memclaim(struct bfa_flash_s *flash,
 514		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
 515bfa_status_t    bfa_flash_raw_read(void __iomem *pci_bar_kva,
 516				u32 offset, char *buf, u32 len);
 517
 518/*
 519 *	DIAG module specific
 520 */
 521
 522typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status);
 523typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon,
 524			bfa_boolean_t link_e2e_beacon);
 525
 526/*
 527 *      Firmware ping test results
 528 */
 529struct bfa_diag_results_fwping {
 530	u32     data;   /* store the corrupted data */
 531	u32     status;
 532	u32     dmastatus;
 533	u8      rsvd[4];
 534};
 535
 536struct bfa_diag_qtest_result_s {
 537	u32	status;
 538	u16	count;	/* successful queue test count */
 539	u8	queue;
 540	u8	rsvd;	/* 64-bit align */
 541};
 542
 543/*
 544 * Firmware ping test results
 545 */
 546struct bfa_diag_fwping_s {
 547	struct bfa_diag_results_fwping *result;
 548	bfa_cb_diag_t  cbfn;
 549	void            *cbarg;
 550	u32             data;
 551	u8              lock;
 552	u8              rsv[3];
 553	u32             status;
 554	u32             count;
 555	struct bfa_mbox_cmd_s   mbcmd;
 556	u8              *dbuf_kva;      /* dma buf virtual address */
 557	u64             dbuf_pa;        /* dma buf physical address */
 558};
 559
 560/*
 561 *      Temperature sensor query results
 562 */
 563struct bfa_diag_results_tempsensor_s {
 564	u32     status;
 565	u16     temp;           /* 10-bit A/D value */
 566	u16     brd_temp;       /* 9-bit board temp */
 567	u8      ts_junc;        /* show junction tempsensor   */
 568	u8      ts_brd;         /* show board tempsensor      */
 569	u8      rsvd[6];        /* keep 8 bytes alignment     */
 570};
 571
 572struct bfa_diag_tsensor_s {
 573	bfa_cb_diag_t   cbfn;
 574	void            *cbarg;
 575	struct bfa_diag_results_tempsensor_s *temp;
 576	u8              lock;
 577	u8              rsv[3];
 578	u32             status;
 579	struct bfa_mbox_cmd_s   mbcmd;
 580};
 581
 582struct bfa_diag_sfpshow_s {
 583	struct sfp_mem_s        *sfpmem;
 584	bfa_cb_diag_t           cbfn;
 585	void                    *cbarg;
 586	u8      lock;
 587	u8      static_data;
 588	u8      rsv[2];
 589	u32     status;
 590	struct bfa_mbox_cmd_s    mbcmd;
 591	u8      *dbuf_kva;      /* dma buf virtual address */
 592	u64     dbuf_pa;        /* dma buf physical address */
 593};
 594
 595struct bfa_diag_led_s {
 596	struct bfa_mbox_cmd_s   mbcmd;
 597	bfa_boolean_t   lock;   /* 1: ledtest is operating */
 598};
 599
 600struct bfa_diag_beacon_s {
 601	struct bfa_mbox_cmd_s   mbcmd;
 602	bfa_boolean_t   state;          /* port beacon state */
 603	bfa_boolean_t   link_e2e;       /* link beacon state */
 604};
 605
 606struct bfa_diag_s {
 607	void	*dev;
 608	struct bfa_ioc_s		*ioc;
 609	struct bfa_trc_mod_s		*trcmod;
 610	struct bfa_diag_fwping_s	fwping;
 611	struct bfa_diag_tsensor_s	tsensor;
 612	struct bfa_diag_sfpshow_s	sfpshow;
 613	struct bfa_diag_led_s		ledtest;
 614	struct bfa_diag_beacon_s	beacon;
 615	void	*result;
 616	struct bfa_timer_s timer;
 617	bfa_cb_diag_beacon_t  cbfn_beacon;
 618	bfa_cb_diag_t  cbfn;
 619	void		*cbarg;
 620	u8		block;
 621	u8		timer_active;
 622	u8		rsvd[2];
 623	u32		status;
 624	struct bfa_ioc_notify_s	ioc_notify;
 625	struct bfa_mem_dma_s	diag_dma;
 626};
 627
 628#define BFA_DIAG_MOD(__bfa)     (&(__bfa)->modules.diag_mod)
 629#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
 630
 631u32	bfa_diag_meminfo(void);
 632void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa);
 633void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
 634		     bfa_cb_diag_beacon_t cbfn_beacon,
 635		     struct bfa_trc_mod_s *trcmod);
 636bfa_status_t	bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset,
 637			u32 len, u32 *buf, u32 force);
 638bfa_status_t	bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset,
 639			u32 len, u32 value, u32 force);
 640bfa_status_t	bfa_diag_tsensor_query(struct bfa_diag_s *diag,
 641			struct bfa_diag_results_tempsensor_s *result,
 642			bfa_cb_diag_t cbfn, void *cbarg);
 643bfa_status_t	bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt,
 644			u32 pattern, struct bfa_diag_results_fwping *result,
 645			bfa_cb_diag_t cbfn, void *cbarg);
 646bfa_status_t	bfa_diag_sfpshow(struct bfa_diag_s *diag,
 647			struct sfp_mem_s *sfpmem, u8 static_data,
 648			bfa_cb_diag_t cbfn, void *cbarg);
 649bfa_status_t	bfa_diag_memtest(struct bfa_diag_s *diag,
 650			struct bfa_diag_memtest_s *memtest, u32 pattern,
 651			struct bfa_diag_memtest_result *result,
 652			bfa_cb_diag_t cbfn, void *cbarg);
 653bfa_status_t	bfa_diag_ledtest(struct bfa_diag_s *diag,
 654			struct bfa_diag_ledtest_s *ledtest);
 655bfa_status_t	bfa_diag_beacon_port(struct bfa_diag_s *diag,
 656			bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon,
 657			u32 sec);
 658
 659/*
 660 *	PHY module specific
 661 */
 662typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status);
 663
 664struct bfa_phy_s {
 665	struct bfa_ioc_s *ioc;          /* back pointer to ioc */
 666	struct bfa_trc_mod_s *trcmod;   /* trace module */
 667	u8	instance;       /* port instance */
 668	u8	op_busy;        /* operation busy flag */
 669	u8	rsv[2];
 670	u32	residue;        /* residual length */
 671	u32	offset;         /* offset */
 672	bfa_status_t	status;         /* status */
 673	u8	*dbuf_kva;      /* dma buf virtual address */
 674	u64	dbuf_pa;        /* dma buf physical address */
 675	struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
 676	bfa_cb_phy_t	cbfn;           /* user callback function */
 677	void		*cbarg;         /* user callback arg */
 678	u8		*ubuf;          /* user supplied buffer */
 679	struct bfa_cb_qe_s	hcb_qe; /* comp: BFA callback qelem */
 680	u32	addr_off;       /* phy address offset */
 681	struct bfa_mbox_cmd_s	mb;       /* mailbox */
 682	struct bfa_ioc_notify_s	ioc_notify; /* ioc event notify */
 683	struct bfa_mem_dma_s	phy_dma;
 684};
 685#define BFA_PHY(__bfa)	(&(__bfa)->modules.phy)
 686#define BFA_MEM_PHY_DMA(__bfa)	(&(BFA_PHY(__bfa)->phy_dma))
 687
 688bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc);
 689bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
 690			struct bfa_phy_attr_s *attr,
 691			bfa_cb_phy_t cbfn, void *cbarg);
 692bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
 693			struct bfa_phy_stats_s *stats,
 694			bfa_cb_phy_t cbfn, void *cbarg);
 695bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
 696			void *buf, u32 len, u32 offset,
 697			bfa_cb_phy_t cbfn, void *cbarg);
 698bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
 699			void *buf, u32 len, u32 offset,
 700			bfa_cb_phy_t cbfn, void *cbarg);
 701
 702u32	bfa_phy_meminfo(bfa_boolean_t mincfg);
 703void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc,
 704		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
 705void bfa_phy_memclaim(struct bfa_phy_s *phy,
 706		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
 707void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
 708
 709/*
 710 * FRU module specific
 711 */
 712typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status);
 713
 714struct bfa_fru_s {
 715	struct bfa_ioc_s *ioc;		/* back pointer to ioc */
 716	struct bfa_trc_mod_s *trcmod;	/* trace module */
 717	u8		op_busy;	/* operation busy flag */
 718	u8		rsv[3];
 719	u32		residue;	/* residual length */
 720	u32		offset;		/* offset */
 721	bfa_status_t	status;		/* status */
 722	u8		*dbuf_kva;	/* dma buf virtual address */
 723	u64		dbuf_pa;	/* dma buf physical address */
 724	struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
 725	bfa_cb_fru_t	cbfn;		/* user callback function */
 726	void		*cbarg;		/* user callback arg */
 727	u8		*ubuf;		/* user supplied buffer */
 728	struct bfa_cb_qe_s	hcb_qe;	/* comp: BFA callback qelem */
 729	u32		addr_off;	/* fru address offset */
 730	struct bfa_mbox_cmd_s mb;	/* mailbox */
 731	struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
 732	struct bfa_mem_dma_s	fru_dma;
 733	u8		trfr_cmpl;
 734};
 735
 736#define BFA_FRU(__bfa)	(&(__bfa)->modules.fru)
 737#define BFA_MEM_FRU_DMA(__bfa)	(&(BFA_FRU(__bfa)->fru_dma))
 738
 739bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru,
 740			void *buf, u32 len, u32 offset,
 741			bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl);
 742bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru,
 743			void *buf, u32 len, u32 offset,
 744			bfa_cb_fru_t cbfn, void *cbarg);
 745bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size);
 746bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru,
 747			void *buf, u32 len, u32 offset,
 748			bfa_cb_fru_t cbfn, void *cbarg);
 749bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru,
 750			void *buf, u32 len, u32 offset,
 751			bfa_cb_fru_t cbfn, void *cbarg);
 752u32	bfa_fru_meminfo(bfa_boolean_t mincfg);
 753void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc,
 754		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
 755void bfa_fru_memclaim(struct bfa_fru_s *fru,
 756		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
 757void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg);
 758
 759/*
 760 * Driver Config( dconf) specific
 761 */
 762#define BFI_DCONF_SIGNATURE	0xabcdabcd
 763#define BFI_DCONF_VERSION	1
 764
 765#pragma pack(1)
 766struct bfa_dconf_hdr_s {
 767	u32	signature;
 768	u32	version;
 769};
 770
 771struct bfa_dconf_s {
 772	struct bfa_dconf_hdr_s		hdr;
 773	struct bfa_lunmask_cfg_s	lun_mask;
 774	struct bfa_throttle_cfg_s	throttle_cfg;
 775};
 776#pragma pack()
 777
 778struct bfa_dconf_mod_s {
 779	bfa_sm_t		sm;
 780	u8			instance;
 781	bfa_boolean_t		read_data_valid;
 782	bfa_boolean_t		min_cfg;
 783	struct bfa_timer_s	timer;
 784	struct bfa_s		*bfa;
 785	void			*bfad;
 786	void			*trcmod;
 787	struct bfa_dconf_s	*dconf;
 788	struct bfa_mem_kva_s	kva_seg;
 789};
 790
 791#define BFA_DCONF_MOD(__bfa)	\
 792	(&(__bfa)->modules.dconf_mod)
 793#define BFA_MEM_DCONF_KVA(__bfa)	(&(BFA_DCONF_MOD(__bfa)->kva_seg))
 794#define bfa_dconf_read_data_valid(__bfa)	\
 795	(BFA_DCONF_MOD(__bfa)->read_data_valid)
 796#define BFA_DCONF_UPDATE_TOV	5000	/* memtest timeout in msec */
 797#define bfa_dconf_get_min_cfg(__bfa)	\
 798	(BFA_DCONF_MOD(__bfa)->min_cfg)
 799
 800void	bfa_dconf_modinit(struct bfa_s *bfa);
 801void	bfa_dconf_modexit(struct bfa_s *bfa);
 802bfa_status_t	bfa_dconf_update(struct bfa_s *bfa);
 803
 804/*
 805 *	IOC specfic macros
 806 */
 807#define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
 808#define bfa_ioc_devid(__ioc)		((__ioc)->pcidev.device_id)
 809#define bfa_ioc_bar0(__ioc)		((__ioc)->pcidev.pci_bar_kva)
 810#define bfa_ioc_portid(__ioc)		((__ioc)->port_id)
 811#define bfa_ioc_asic_gen(__ioc)		((__ioc)->asic_gen)
 812#define bfa_ioc_is_cna(__ioc)	\
 813	((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) ||	\
 814	 (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
 815#define bfa_ioc_fetch_stats(__ioc, __stats) \
 816		(((__stats)->drv_stats) = (__ioc)->stats)
 817#define bfa_ioc_clr_stats(__ioc)	\
 818		memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
 819#define bfa_ioc_maxfrsize(__ioc)	((__ioc)->attr->maxfrsize)
 820#define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 821#define bfa_ioc_speed_sup(__ioc)	\
 822	((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS :	\
 823	 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop))
 824#define bfa_ioc_get_nports(__ioc)	\
 825	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
 826
 827#define bfa_ioc_stats(_ioc, _stats)	((_ioc)->stats._stats++)
 828#define BFA_IOC_FWIMG_MINSZ	(16 * 1024)
 829#define BFA_IOC_FW_SMEM_SIZE(__ioc)			\
 830	((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB)	\
 831	 ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
 832#define BFA_IOC_FLASH_CHUNK_NO(off)		(off / BFI_FLASH_CHUNK_SZ_WORDS)
 833#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)	(off % BFI_FLASH_CHUNK_SZ_WORDS)
 834#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
 835
 836/*
 837 * IOC mailbox interface
 838 */
 839void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
 840void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
 841		bfa_ioc_mbox_mcfunc_t *mcfuncs);
 842void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
 843void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
 844bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
 845void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
 846		bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
 847
 848/*
 849 * IOC interfaces
 850 */
 851
 852#define bfa_ioc_pll_init_asic(__ioc) \
 853	((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
 854			   (__ioc)->asic_mode))
 855
 856bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
 857bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
 858bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
 859bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
 860
 861#define bfa_ioc_isr_mode_set(__ioc, __msix) do {			\
 862	if ((__ioc)->ioc_hwif->ioc_isr_mode_set)			\
 863		((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix));	\
 864} while (0)
 865#define	bfa_ioc_ownership_reset(__ioc)				\
 866			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
 867#define bfa_ioc_get_fcmode(__ioc)	((__ioc)->fcmode)
 868#define bfa_ioc_lpu_read_stat(__ioc) do {			\
 869	if ((__ioc)->ioc_hwif->ioc_lpu_read_stat)		\
 870		((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc));	\
 871} while (0)
 872
 873void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
 874void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
 875void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc);
 876void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc);
 877
 878void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
 879		struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
 880void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
 881void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 882void bfa_ioc_suspend(struct bfa_ioc_s *ioc);
 883void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 884		enum bfi_pcifn_class clscode);
 885void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
 886void bfa_ioc_enable(struct bfa_ioc_s *ioc);
 887void bfa_ioc_disable(struct bfa_ioc_s *ioc);
 888bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
 889
 890bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
 891		u32 boot_env);
 892void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
 893void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
 894bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
 895bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
 896bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
 897bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc);
 898bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 899bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
 900void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
 901enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
 902void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
 903void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
 904void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
 905void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
 906void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
 907		char *manufacturer);
 908void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
 909enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
 910
 911void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
 912void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 913		struct bfa_adapter_attr_s *ad_attr);
 914void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
 915bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
 916		int *trclen);
 917bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
 918				 int *trclen);
 919bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
 920	u32 *offset, int *buflen);
 921bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc);
 922bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
 923void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
 924			struct bfi_ioc_image_hdr_s *fwhdr);
 925bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
 926			struct bfi_ioc_image_hdr_s *fwhdr);
 927void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
 928bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
 929bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
 930void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
 931
 932/*
 933 * asic block configuration related APIs
 934 */
 935u32	bfa_ablk_meminfo(void);
 936void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa);
 937void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc);
 938bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk,
 939		struct bfa_ablk_cfg_s *ablk_cfg,
 940		bfa_ablk_cbfn_t cbfn, void *cbarg);
 941bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk,
 942		enum bfa_mode_s mode, int max_pf, int max_vf,
 943		bfa_ablk_cbfn_t cbfn, void *cbarg);
 944bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
 945		enum bfa_mode_s mode, int max_pf, int max_vf,
 946		bfa_ablk_cbfn_t cbfn, void *cbarg);
 947bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
 948		u8 port, enum bfi_pcifn_class personality,
 949		u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
 950bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
 951		bfa_ablk_cbfn_t cbfn, void *cbarg);
 952bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn,
 953		u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
 954bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
 955		bfa_ablk_cbfn_t cbfn, void *cbarg);
 956bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
 957		bfa_ablk_cbfn_t cbfn, void *cbarg);
 958
 959bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
 960				u32 *fwimg);
 961/*
 962 * bfa mfg wwn API functions
 963 */
 964mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
 965mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
 966
 967/*
 968 * F/W Image Size & Chunk
 969 */
 970extern u32 bfi_image_cb_size;
 971extern u32 bfi_image_ct_size;
 972extern u32 bfi_image_ct2_size;
 973extern u32 *bfi_image_cb;
 974extern u32 *bfi_image_ct;
 975extern u32 *bfi_image_ct2;
 976
 977static inline u32 *
 978bfi_image_cb_get_chunk(u32 off)
 979{
 980	return (u32 *)(bfi_image_cb + off);
 981}
 982
 983static inline u32 *
 984bfi_image_ct_get_chunk(u32 off)
 985{
 986	return (u32 *)(bfi_image_ct + off);
 987}
 988
 989static inline u32 *
 990bfi_image_ct2_get_chunk(u32 off)
 991{
 992	return (u32 *)(bfi_image_ct2 + off);
 993}
 994
 995static inline u32*
 996bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
 997{
 998	switch (asic_gen) {
 999	case BFI_ASIC_GEN_CB:
1000		return bfi_image_cb_get_chunk(off);
1001		break;
1002	case BFI_ASIC_GEN_CT:
1003		return bfi_image_ct_get_chunk(off);
1004		break;
1005	case BFI_ASIC_GEN_CT2:
1006		return bfi_image_ct2_get_chunk(off);
1007		break;
1008	default:
1009		return NULL;
1010	}
1011}
1012
1013static inline u32
1014bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
1015{
1016	switch (asic_gen) {
1017	case BFI_ASIC_GEN_CB:
1018		return bfi_image_cb_size;
1019		break;
1020	case BFI_ASIC_GEN_CT:
1021		return bfi_image_ct_size;
1022		break;
1023	case BFI_ASIC_GEN_CT2:
1024		return bfi_image_ct2_size;
1025		break;
1026	default:
1027		return 0;
1028	}
1029}
1030
1031/*
1032 * CNA TRCMOD declaration
1033 */
1034/*
1035 * !!! Only append to the enums defined here to avoid any versioning
1036 * !!! needed between trace utility and driver version
1037 */
1038enum {
1039	BFA_TRC_CNA_PORT	= 1,
1040	BFA_TRC_CNA_IOC		= 2,
1041	BFA_TRC_CNA_IOC_CB	= 3,
1042	BFA_TRC_CNA_IOC_CT	= 4,
1043};
1044
1045#endif /* __BFA_IOC_H__ */