Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   4 * Copyright (c) 2014- QLogic Corporation.
   5 * All rights reserved
   6 * www.qlogic.com
   7 *
   8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   9 */
  10
  11#include "bfad_drv.h"
  12#include "bfad_im.h"
  13#include "bfa_ioc.h"
  14#include "bfi_reg.h"
  15#include "bfa_defs.h"
  16#include "bfa_defs_svc.h"
  17#include "bfi.h"
  18
  19BFA_TRC_FILE(CNA, IOC);
  20
  21/*
  22 * IOC local definitions
  23 */
  24#define BFA_IOC_TOV		3000	/* msecs */
  25#define BFA_IOC_HWSEM_TOV	500	/* msecs */
  26#define BFA_IOC_HB_TOV		500	/* msecs */
  27#define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
  28#define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
  29
  30#define bfa_ioc_timer_start(__ioc)					\
  31	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
  32			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  33#define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
  34
  35#define bfa_hb_timer_start(__ioc)					\
  36	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
  37			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38#define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
  39
  40#define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  41
  42#define bfa_ioc_state_disabled(__sm)		\
  43	(((__sm) == BFI_IOC_UNINIT) ||		\
  44	((__sm) == BFI_IOC_INITING) ||		\
  45	((__sm) == BFI_IOC_HWINIT) ||		\
  46	((__sm) == BFI_IOC_DISABLED) ||		\
  47	((__sm) == BFI_IOC_FAIL) ||		\
  48	((__sm) == BFI_IOC_CFG_DISABLED))
  49
  50/*
  51 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  52 */
  53
  54#define bfa_ioc_firmware_lock(__ioc)			\
  55			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  56#define bfa_ioc_firmware_unlock(__ioc)			\
  57			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  58#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  59#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  60#define bfa_ioc_notify_fail(__ioc)              \
  61			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  62#define bfa_ioc_sync_start(__ioc)               \
  63			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  64#define bfa_ioc_sync_join(__ioc)                \
  65			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  66#define bfa_ioc_sync_leave(__ioc)               \
  67			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  68#define bfa_ioc_sync_ack(__ioc)                 \
  69			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  70#define bfa_ioc_sync_complete(__ioc)            \
  71			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  72#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
  73			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
  74#define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
  75			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
  76#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
  77		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
  78#define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
  79			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
  80
  81#define bfa_ioc_mbox_cmd_pending(__ioc)		\
  82			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  83			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  84
  85bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  86
  87/*
  88 * forward declarations
  89 */
  90static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  91static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  92static void bfa_ioc_timeout(void *ioc);
  93static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
  94static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  95static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  96static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  97static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  98static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  99static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
 100static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
 101static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
 102				enum bfa_ioc_event_e event);
 103static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 104static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
 105static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 106static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 107static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
 108				struct bfi_ioc_image_hdr_s *base_fwhdr,
 109				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
 110static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
 111				struct bfa_ioc_s *ioc,
 112				struct bfi_ioc_image_hdr_s *base_fwhdr);
 113
 114/*
 115 * IOC state machine definitions/declarations
 116 */
 117enum ioc_event {
 118	IOC_E_RESET		= 1,	/*  IOC reset request		*/
 119	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
 120	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
 121	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
 122	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
 123	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
 124	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
 125	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
 126	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
 127	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
 128	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
 129	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
 130};
 131
 132bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
 133bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
 134bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
 135bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
 136bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
 137bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
 138bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 139bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 140bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 141bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
 142
 143static struct bfa_sm_table_s ioc_sm_table[] = {
 144	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
 145	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
 146	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 147	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 148	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
 149	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
 150	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 151	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 152	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 153	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
 154};
 155
 156/*
 157 * IOCPF state machine definitions/declarations
 158 */
 159
 160#define bfa_iocpf_timer_start(__ioc)					\
 161	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
 162			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
 163#define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
 164
 165#define bfa_iocpf_poll_timer_start(__ioc)				\
 166	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
 167			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
 168
 169#define bfa_sem_timer_start(__ioc)					\
 170	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
 171			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
 172#define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
 173
 174/*
 175 * Forward declareations for iocpf state machine
 176 */
 177static void bfa_iocpf_timeout(void *ioc_arg);
 178static void bfa_iocpf_sem_timeout(void *ioc_arg);
 179static void bfa_iocpf_poll_timeout(void *ioc_arg);
 180
 181/*
 182 * IOCPF state machine events
 183 */
 184enum iocpf_event {
 185	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
 186	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
 187	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
 188	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
 189	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
 190	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
 191	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
 192	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
 193	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
 194	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
 195	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
 196	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
 197};
 198
 199/*
 200 * IOCPF states
 201 */
 202enum bfa_iocpf_state {
 203	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
 204	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
 205	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
 206	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
 207	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
 208	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
 209	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
 210	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
 211	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
 212};
 213
 214bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
 215bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
 216bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
 217bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
 218bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
 219bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
 220bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
 221bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
 222						enum iocpf_event);
 223bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
 224bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
 225bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
 226bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
 227bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
 228						enum iocpf_event);
 229bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
 230
 231static struct bfa_sm_table_s iocpf_sm_table[] = {
 232	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
 233	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
 234	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
 235	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
 236	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
 237	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
 238	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
 239	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
 240	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
 241	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
 242	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
 243	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
 244	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
 245	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 246};
 247
 248/*
 249 * IOC State Machine
 250 */
 251
 252/*
 253 * Beginning state. IOC uninit state.
 254 */
 255
 256static void
 257bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
 258{
 259}
 260
 261/*
 262 * IOC is in uninit state.
 263 */
 264static void
 265bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
 266{
 267	bfa_trc(ioc, event);
 268
 269	switch (event) {
 270	case IOC_E_RESET:
 271		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 272		break;
 273
 274	default:
 275		bfa_sm_fault(ioc, event);
 276	}
 277}
 278/*
 279 * Reset entry actions -- initialize state machine
 280 */
 281static void
 282bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
 283{
 284	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 285}
 286
 287/*
 288 * IOC is in reset state.
 289 */
 290static void
 291bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
 292{
 293	bfa_trc(ioc, event);
 294
 295	switch (event) {
 296	case IOC_E_ENABLE:
 297		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 298		break;
 299
 300	case IOC_E_DISABLE:
 301		bfa_ioc_disable_comp(ioc);
 302		break;
 303
 304	case IOC_E_DETACH:
 305		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 306		break;
 307
 308	default:
 309		bfa_sm_fault(ioc, event);
 310	}
 311}
 312
 313
 314static void
 315bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
 316{
 317	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
 318}
 319
 320/*
 321 * Host IOC function is being enabled, awaiting response from firmware.
 322 * Semaphore is acquired.
 323 */
 324static void
 325bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 326{
 327	bfa_trc(ioc, event);
 328
 329	switch (event) {
 330	case IOC_E_ENABLED:
 331		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 332		break;
 333
 334	case IOC_E_PFFAILED:
 335		/* !!! fall through !!! */
 336	case IOC_E_HWERROR:
 337		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 338		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 339		if (event != IOC_E_PFFAILED)
 340			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 341		break;
 342
 343	case IOC_E_HWFAILED:
 344		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 345		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 346		break;
 347
 348	case IOC_E_DISABLE:
 349		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 350		break;
 351
 352	case IOC_E_DETACH:
 353		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 354		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 355		break;
 356
 357	case IOC_E_ENABLE:
 358		break;
 359
 360	default:
 361		bfa_sm_fault(ioc, event);
 362	}
 363}
 364
 365
 366static void
 367bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
 368{
 369	bfa_ioc_timer_start(ioc);
 370	bfa_ioc_send_getattr(ioc);
 371}
 372
 373/*
 374 * IOC configuration in progress. Timer is active.
 375 */
 376static void
 377bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
 378{
 379	bfa_trc(ioc, event);
 380
 381	switch (event) {
 382	case IOC_E_FWRSP_GETATTR:
 383		bfa_ioc_timer_stop(ioc);
 384		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 385		break;
 386
 387	case IOC_E_PFFAILED:
 388	case IOC_E_HWERROR:
 389		bfa_ioc_timer_stop(ioc);
 390		fallthrough;
 391	case IOC_E_TIMEOUT:
 392		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 393		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 394		if (event != IOC_E_PFFAILED)
 395			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
 396		break;
 397
 398	case IOC_E_DISABLE:
 399		bfa_ioc_timer_stop(ioc);
 400		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 401		break;
 402
 403	case IOC_E_ENABLE:
 404		break;
 405
 406	default:
 407		bfa_sm_fault(ioc, event);
 408	}
 409}
 410
 411static void
 412bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 413{
 414	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 415
 416	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 417	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
 418	bfa_ioc_hb_monitor(ioc);
 419	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
 420	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 421}
 422
 423static void
 424bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
 425{
 426	bfa_trc(ioc, event);
 427
 428	switch (event) {
 429	case IOC_E_ENABLE:
 430		break;
 431
 432	case IOC_E_DISABLE:
 433		bfa_hb_timer_stop(ioc);
 434		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 435		break;
 436
 437	case IOC_E_PFFAILED:
 438	case IOC_E_HWERROR:
 439		bfa_hb_timer_stop(ioc);
 440		fallthrough;
 441	case IOC_E_HBFAIL:
 442		if (ioc->iocpf.auto_recover)
 443			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
 444		else
 445			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 446
 447		bfa_ioc_fail_notify(ioc);
 448
 449		if (event != IOC_E_PFFAILED)
 450			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 451		break;
 452
 453	default:
 454		bfa_sm_fault(ioc, event);
 455	}
 456}
 457
 458
 459static void
 460bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 461{
 462	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 463	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
 464	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
 465	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
 466}
 467
 468/*
 469 * IOC is being disabled
 470 */
 471static void
 472bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 473{
 474	bfa_trc(ioc, event);
 475
 476	switch (event) {
 477	case IOC_E_DISABLED:
 478		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 479		break;
 480
 481	case IOC_E_HWERROR:
 482		/*
 483		 * No state change.  Will move to disabled state
 484		 * after iocpf sm completes failure processing and
 485		 * moves to disabled state.
 486		 */
 487		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 488		break;
 489
 490	case IOC_E_HWFAILED:
 491		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 492		bfa_ioc_disable_comp(ioc);
 493		break;
 494
 495	default:
 496		bfa_sm_fault(ioc, event);
 497	}
 498}
 499
 500/*
 501 * IOC disable completion entry.
 502 */
 503static void
 504bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
 505{
 506	bfa_ioc_disable_comp(ioc);
 507}
 508
 509static void
 510bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
 511{
 512	bfa_trc(ioc, event);
 513
 514	switch (event) {
 515	case IOC_E_ENABLE:
 516		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 517		break;
 518
 519	case IOC_E_DISABLE:
 520		ioc->cbfn->disable_cbfn(ioc->bfa);
 521		break;
 522
 523	case IOC_E_DETACH:
 524		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 525		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 526		break;
 527
 528	default:
 529		bfa_sm_fault(ioc, event);
 530	}
 531}
 532
 533
 534static void
 535bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
 536{
 537	bfa_trc(ioc, 0);
 538}
 539
 540/*
 541 * Hardware initialization retry.
 542 */
 543static void
 544bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
 545{
 546	bfa_trc(ioc, event);
 547
 548	switch (event) {
 549	case IOC_E_ENABLED:
 550		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 551		break;
 552
 553	case IOC_E_PFFAILED:
 554	case IOC_E_HWERROR:
 555		/*
 556		 * Initialization retry failed.
 557		 */
 558		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 559		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 560		if (event != IOC_E_PFFAILED)
 561			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 562		break;
 563
 564	case IOC_E_HWFAILED:
 565		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 566		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 567		break;
 568
 569	case IOC_E_ENABLE:
 570		break;
 571
 572	case IOC_E_DISABLE:
 573		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 574		break;
 575
 576	case IOC_E_DETACH:
 577		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 578		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 579		break;
 580
 581	default:
 582		bfa_sm_fault(ioc, event);
 583	}
 584}
 585
 586
 587static void
 588bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
 589{
 590	bfa_trc(ioc, 0);
 591}
 592
 593/*
 594 * IOC failure.
 595 */
 596static void
 597bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 598{
 599	bfa_trc(ioc, event);
 600
 601	switch (event) {
 602
 603	case IOC_E_ENABLE:
 604		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 605		break;
 606
 607	case IOC_E_DISABLE:
 608		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 609		break;
 610
 611	case IOC_E_DETACH:
 612		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 613		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 614		break;
 615
 616	case IOC_E_HWERROR:
 617	case IOC_E_HWFAILED:
 618		/*
 619		 * HB failure / HW error notification, ignore.
 620		 */
 621		break;
 622	default:
 623		bfa_sm_fault(ioc, event);
 624	}
 625}
 626
 627static void
 628bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
 629{
 630	bfa_trc(ioc, 0);
 631}
 632
 633static void
 634bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
 635{
 636	bfa_trc(ioc, event);
 637
 638	switch (event) {
 639	case IOC_E_ENABLE:
 640		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 641		break;
 642
 643	case IOC_E_DISABLE:
 644		ioc->cbfn->disable_cbfn(ioc->bfa);
 645		break;
 646
 647	case IOC_E_DETACH:
 648		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 649		break;
 650
 651	case IOC_E_HWERROR:
 652		/* Ignore - already in hwfail state */
 653		break;
 654
 655	default:
 656		bfa_sm_fault(ioc, event);
 657	}
 658}
 659
 660/*
 661 * IOCPF State Machine
 662 */
 663
 664/*
 665 * Reset entry actions -- initialize state machine
 666 */
 667static void
 668bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
 669{
 670	iocpf->fw_mismatch_notified = BFA_FALSE;
 671	iocpf->auto_recover = bfa_auto_recover;
 672}
 673
 674/*
 675 * Beginning state. IOC is in reset state.
 676 */
 677static void
 678bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 679{
 680	struct bfa_ioc_s *ioc = iocpf->ioc;
 681
 682	bfa_trc(ioc, event);
 683
 684	switch (event) {
 685	case IOCPF_E_ENABLE:
 686		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 687		break;
 688
 689	case IOCPF_E_STOP:
 690		break;
 691
 692	default:
 693		bfa_sm_fault(ioc, event);
 694	}
 695}
 696
 697/*
 698 * Semaphore should be acquired for version check.
 699 */
 700static void
 701bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 702{
 703	struct bfi_ioc_image_hdr_s	fwhdr;
 704	u32	r32, fwstate, pgnum, loff = 0;
 705	int	i;
 706
 707	/*
 708	 * Spin on init semaphore to serialize.
 709	 */
 710	r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 711	while (r32 & 0x1) {
 712		udelay(20);
 713		r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 714	}
 715
 716	/* h/w sem init */
 717	fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
 718	if (fwstate == BFI_IOC_UNINIT) {
 719		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 720		goto sem_get;
 721	}
 722
 723	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
 724
 725	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
 726		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 727		goto sem_get;
 728	}
 729
 730	/*
 731	 * Clear fwver hdr
 732	 */
 733	pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
 734	writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
 735
 736	for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
 737		bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
 738		loff += sizeof(u32);
 739	}
 740
 741	bfa_trc(iocpf->ioc, fwstate);
 742	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
 743	bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 744	bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 745
 746	/*
 747	 * Unlock the hw semaphore. Should be here only once per boot.
 748	 */
 749	bfa_ioc_ownership_reset(iocpf->ioc);
 750
 751	/*
 752	 * unlock init semaphore.
 753	 */
 754	writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 755
 756sem_get:
 757	bfa_ioc_hw_sem_get(iocpf->ioc);
 758}
 759
 760/*
 761 * Awaiting h/w semaphore to continue with version check.
 762 */
 763static void
 764bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 765{
 766	struct bfa_ioc_s *ioc = iocpf->ioc;
 767
 768	bfa_trc(ioc, event);
 769
 770	switch (event) {
 771	case IOCPF_E_SEMLOCKED:
 772		if (bfa_ioc_firmware_lock(ioc)) {
 773			if (bfa_ioc_sync_start(ioc)) {
 774				bfa_ioc_sync_join(ioc);
 775				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 776			} else {
 777				bfa_ioc_firmware_unlock(ioc);
 778				writel(1, ioc->ioc_regs.ioc_sem_reg);
 779				bfa_sem_timer_start(ioc);
 780			}
 781		} else {
 782			writel(1, ioc->ioc_regs.ioc_sem_reg);
 783			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
 784		}
 785		break;
 786
 787	case IOCPF_E_SEM_ERROR:
 788		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 789		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 790		break;
 791
 792	case IOCPF_E_DISABLE:
 793		bfa_sem_timer_stop(ioc);
 794		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 795		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 796		break;
 797
 798	case IOCPF_E_STOP:
 799		bfa_sem_timer_stop(ioc);
 800		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 801		break;
 802
 803	default:
 804		bfa_sm_fault(ioc, event);
 805	}
 806}
 807
 808/*
 809 * Notify enable completion callback.
 810 */
 811static void
 812bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
 813{
 814	/*
 815	 * Call only the first time sm enters fwmismatch state.
 816	 */
 817	if (iocpf->fw_mismatch_notified == BFA_FALSE)
 818		bfa_ioc_pf_fwmismatch(iocpf->ioc);
 819
 820	iocpf->fw_mismatch_notified = BFA_TRUE;
 821	bfa_iocpf_timer_start(iocpf->ioc);
 822}
 823
 824/*
 825 * Awaiting firmware version match.
 826 */
 827static void
 828bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 829{
 830	struct bfa_ioc_s *ioc = iocpf->ioc;
 831
 832	bfa_trc(ioc, event);
 833
 834	switch (event) {
 835	case IOCPF_E_TIMEOUT:
 836		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 837		break;
 838
 839	case IOCPF_E_DISABLE:
 840		bfa_iocpf_timer_stop(ioc);
 841		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 842		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 843		break;
 844
 845	case IOCPF_E_STOP:
 846		bfa_iocpf_timer_stop(ioc);
 847		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 848		break;
 849
 850	default:
 851		bfa_sm_fault(ioc, event);
 852	}
 853}
 854
 855/*
 856 * Request for semaphore.
 857 */
 858static void
 859bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
 860{
 861	bfa_ioc_hw_sem_get(iocpf->ioc);
 862}
 863
 864/*
 865 * Awaiting semaphore for h/w initialzation.
 866 */
 867static void
 868bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 869{
 870	struct bfa_ioc_s *ioc = iocpf->ioc;
 871
 872	bfa_trc(ioc, event);
 873
 874	switch (event) {
 875	case IOCPF_E_SEMLOCKED:
 876		if (bfa_ioc_sync_complete(ioc)) {
 877			bfa_ioc_sync_join(ioc);
 878			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 879		} else {
 880			writel(1, ioc->ioc_regs.ioc_sem_reg);
 881			bfa_sem_timer_start(ioc);
 882		}
 883		break;
 884
 885	case IOCPF_E_SEM_ERROR:
 886		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 887		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 888		break;
 889
 890	case IOCPF_E_DISABLE:
 891		bfa_sem_timer_stop(ioc);
 892		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 893		break;
 894
 895	default:
 896		bfa_sm_fault(ioc, event);
 897	}
 898}
 899
 900static void
 901bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
 902{
 903	iocpf->poll_time = 0;
 904	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
 905}
 906
 907/*
 908 * Hardware is being initialized. Interrupts are enabled.
 909 * Holding hardware semaphore lock.
 910 */
 911static void
 912bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 913{
 914	struct bfa_ioc_s *ioc = iocpf->ioc;
 915
 916	bfa_trc(ioc, event);
 917
 918	switch (event) {
 919	case IOCPF_E_FWREADY:
 920		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
 921		break;
 922
 923	case IOCPF_E_TIMEOUT:
 924		writel(1, ioc->ioc_regs.ioc_sem_reg);
 925		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 926		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 927		break;
 928
 929	case IOCPF_E_DISABLE:
 930		bfa_iocpf_timer_stop(ioc);
 931		bfa_ioc_sync_leave(ioc);
 932		writel(1, ioc->ioc_regs.ioc_sem_reg);
 933		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 934		break;
 935
 936	default:
 937		bfa_sm_fault(ioc, event);
 938	}
 939}
 940
 941static void
 942bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
 943{
 944	bfa_iocpf_timer_start(iocpf->ioc);
 945	/*
 946	 * Enable Interrupts before sending fw IOC ENABLE cmd.
 947	 */
 948	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
 949	bfa_ioc_send_enable(iocpf->ioc);
 950}
 951
 952/*
 953 * Host IOC function is being enabled, awaiting response from firmware.
 954 * Semaphore is acquired.
 955 */
 956static void
 957bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 958{
 959	struct bfa_ioc_s *ioc = iocpf->ioc;
 960
 961	bfa_trc(ioc, event);
 962
 963	switch (event) {
 964	case IOCPF_E_FWRSP_ENABLE:
 965		bfa_iocpf_timer_stop(ioc);
 966		writel(1, ioc->ioc_regs.ioc_sem_reg);
 967		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
 968		break;
 969
 970	case IOCPF_E_INITFAIL:
 971		bfa_iocpf_timer_stop(ioc);
 972		fallthrough;
 973
 974	case IOCPF_E_TIMEOUT:
 975		writel(1, ioc->ioc_regs.ioc_sem_reg);
 976		if (event == IOCPF_E_TIMEOUT)
 977			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 978		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 979		break;
 980
 981	case IOCPF_E_DISABLE:
 982		bfa_iocpf_timer_stop(ioc);
 983		writel(1, ioc->ioc_regs.ioc_sem_reg);
 984		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
 985		break;
 986
 987	default:
 988		bfa_sm_fault(ioc, event);
 989	}
 990}
 991
 992static void
 993bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
 994{
 995	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
 996}
 997
 998static void
 999bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1000{
1001	struct bfa_ioc_s *ioc = iocpf->ioc;
1002
1003	bfa_trc(ioc, event);
1004
1005	switch (event) {
1006	case IOCPF_E_DISABLE:
1007		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1008		break;
1009
1010	case IOCPF_E_GETATTRFAIL:
1011		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1012		break;
1013
1014	case IOCPF_E_FAIL:
1015		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1016		break;
1017
1018	default:
1019		bfa_sm_fault(ioc, event);
1020	}
1021}
1022
1023static void
1024bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1025{
1026	bfa_iocpf_timer_start(iocpf->ioc);
1027	bfa_ioc_send_disable(iocpf->ioc);
1028}
1029
1030/*
1031 * IOC is being disabled
1032 */
1033static void
1034bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1035{
1036	struct bfa_ioc_s *ioc = iocpf->ioc;
1037
1038	bfa_trc(ioc, event);
1039
1040	switch (event) {
1041	case IOCPF_E_FWRSP_DISABLE:
1042		bfa_iocpf_timer_stop(ioc);
1043		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1044		break;
1045
1046	case IOCPF_E_FAIL:
1047		bfa_iocpf_timer_stop(ioc);
1048		fallthrough;
1049
1050	case IOCPF_E_TIMEOUT:
1051		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1052		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1053		break;
1054
1055	case IOCPF_E_FWRSP_ENABLE:
1056		break;
1057
1058	default:
1059		bfa_sm_fault(ioc, event);
1060	}
1061}
1062
1063static void
1064bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1065{
1066	bfa_ioc_hw_sem_get(iocpf->ioc);
1067}
1068
1069/*
1070 * IOC hb ack request is being removed.
1071 */
1072static void
1073bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1074{
1075	struct bfa_ioc_s *ioc = iocpf->ioc;
1076
1077	bfa_trc(ioc, event);
1078
1079	switch (event) {
1080	case IOCPF_E_SEMLOCKED:
1081		bfa_ioc_sync_leave(ioc);
1082		writel(1, ioc->ioc_regs.ioc_sem_reg);
1083		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1084		break;
1085
1086	case IOCPF_E_SEM_ERROR:
1087		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1088		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1089		break;
1090
1091	case IOCPF_E_FAIL:
1092		break;
1093
1094	default:
1095		bfa_sm_fault(ioc, event);
1096	}
1097}
1098
1099/*
1100 * IOC disable completion entry.
1101 */
1102static void
1103bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1104{
1105	bfa_ioc_mbox_flush(iocpf->ioc);
1106	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1107}
1108
1109static void
1110bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1111{
1112	struct bfa_ioc_s *ioc = iocpf->ioc;
1113
1114	bfa_trc(ioc, event);
1115
1116	switch (event) {
1117	case IOCPF_E_ENABLE:
1118		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1119		break;
1120
1121	case IOCPF_E_STOP:
1122		bfa_ioc_firmware_unlock(ioc);
1123		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1124		break;
1125
1126	default:
1127		bfa_sm_fault(ioc, event);
1128	}
1129}
1130
1131static void
1132bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1133{
1134	bfa_ioc_debug_save_ftrc(iocpf->ioc);
1135	bfa_ioc_hw_sem_get(iocpf->ioc);
1136}
1137
1138/*
1139 * Hardware initialization failed.
1140 */
1141static void
1142bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1143{
1144	struct bfa_ioc_s *ioc = iocpf->ioc;
1145
1146	bfa_trc(ioc, event);
1147
1148	switch (event) {
1149	case IOCPF_E_SEMLOCKED:
1150		bfa_ioc_notify_fail(ioc);
1151		bfa_ioc_sync_leave(ioc);
1152		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1153		writel(1, ioc->ioc_regs.ioc_sem_reg);
1154		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1155		break;
1156
1157	case IOCPF_E_SEM_ERROR:
1158		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1159		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1160		break;
1161
1162	case IOCPF_E_DISABLE:
1163		bfa_sem_timer_stop(ioc);
1164		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1165		break;
1166
1167	case IOCPF_E_STOP:
1168		bfa_sem_timer_stop(ioc);
1169		bfa_ioc_firmware_unlock(ioc);
1170		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1171		break;
1172
1173	case IOCPF_E_FAIL:
1174		break;
1175
1176	default:
1177		bfa_sm_fault(ioc, event);
1178	}
1179}
1180
1181static void
1182bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1183{
1184	bfa_trc(iocpf->ioc, 0);
1185}
1186
1187/*
1188 * Hardware initialization failed.
1189 */
1190static void
1191bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1192{
1193	struct bfa_ioc_s *ioc = iocpf->ioc;
1194
1195	bfa_trc(ioc, event);
1196
1197	switch (event) {
1198	case IOCPF_E_DISABLE:
1199		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1200		break;
1201
1202	case IOCPF_E_STOP:
1203		bfa_ioc_firmware_unlock(ioc);
1204		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1205		break;
1206
1207	default:
1208		bfa_sm_fault(ioc, event);
1209	}
1210}
1211
1212static void
1213bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1214{
1215	/*
1216	 * Mark IOC as failed in hardware and stop firmware.
1217	 */
1218	bfa_ioc_lpu_stop(iocpf->ioc);
1219
1220	/*
1221	 * Flush any queued up mailbox requests.
1222	 */
1223	bfa_ioc_mbox_flush(iocpf->ioc);
1224
1225	bfa_ioc_hw_sem_get(iocpf->ioc);
1226}
1227
1228static void
1229bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1230{
1231	struct bfa_ioc_s *ioc = iocpf->ioc;
1232
1233	bfa_trc(ioc, event);
1234
1235	switch (event) {
1236	case IOCPF_E_SEMLOCKED:
1237		bfa_ioc_sync_ack(ioc);
1238		bfa_ioc_notify_fail(ioc);
1239		if (!iocpf->auto_recover) {
1240			bfa_ioc_sync_leave(ioc);
1241			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1242			writel(1, ioc->ioc_regs.ioc_sem_reg);
1243			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1244		} else {
1245			if (bfa_ioc_sync_complete(ioc))
1246				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1247			else {
1248				writel(1, ioc->ioc_regs.ioc_sem_reg);
1249				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1250			}
1251		}
1252		break;
1253
1254	case IOCPF_E_SEM_ERROR:
1255		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1256		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1257		break;
1258
1259	case IOCPF_E_DISABLE:
1260		bfa_sem_timer_stop(ioc);
1261		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1262		break;
1263
1264	case IOCPF_E_FAIL:
1265		break;
1266
1267	default:
1268		bfa_sm_fault(ioc, event);
1269	}
1270}
1271
1272static void
1273bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1274{
1275	bfa_trc(iocpf->ioc, 0);
1276}
1277
1278/*
1279 * IOC is in failed state.
1280 */
1281static void
1282bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1283{
1284	struct bfa_ioc_s *ioc = iocpf->ioc;
1285
1286	bfa_trc(ioc, event);
1287
1288	switch (event) {
1289	case IOCPF_E_DISABLE:
1290		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1291		break;
1292
1293	default:
1294		bfa_sm_fault(ioc, event);
1295	}
1296}
1297
1298/*
1299 *  BFA IOC private functions
1300 */
1301
1302/*
1303 * Notify common modules registered for notification.
1304 */
1305static void
1306bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1307{
1308	struct bfa_ioc_notify_s	*notify;
1309	struct list_head	*qe;
1310
1311	list_for_each(qe, &ioc->notify_q) {
1312		notify = (struct bfa_ioc_notify_s *)qe;
1313		notify->cbfn(notify->cbarg, event);
1314	}
1315}
1316
1317static void
1318bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1319{
1320	ioc->cbfn->disable_cbfn(ioc->bfa);
1321	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1322}
1323
1324bfa_boolean_t
1325bfa_ioc_sem_get(void __iomem *sem_reg)
1326{
1327	u32 r32;
1328	int cnt = 0;
1329#define BFA_SEM_SPINCNT	3000
1330
1331	r32 = readl(sem_reg);
1332
1333	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1334		cnt++;
1335		udelay(2);
1336		r32 = readl(sem_reg);
1337	}
1338
1339	if (!(r32 & 1))
1340		return BFA_TRUE;
1341
1342	return BFA_FALSE;
1343}
1344
1345static void
1346bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1347{
1348	u32	r32;
1349
1350	/*
1351	 * First read to the semaphore register will return 0, subsequent reads
1352	 * will return 1. Semaphore is released by writing 1 to the register
1353	 */
1354	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1355	if (r32 == ~0) {
1356		WARN_ON(r32 == ~0);
1357		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1358		return;
1359	}
1360	if (!(r32 & 1)) {
1361		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1362		return;
1363	}
1364
1365	bfa_sem_timer_start(ioc);
1366}
1367
1368/*
1369 * Initialize LPU local memory (aka secondary memory / SRAM)
1370 */
1371static void
1372bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1373{
1374	u32	pss_ctl;
1375	int		i;
1376#define PSS_LMEM_INIT_TIME  10000
1377
1378	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1379	pss_ctl &= ~__PSS_LMEM_RESET;
1380	pss_ctl |= __PSS_LMEM_INIT_EN;
1381
1382	/*
1383	 * i2c workaround 12.5khz clock
1384	 */
1385	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1386	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1387
1388	/*
1389	 * wait for memory initialization to be complete
1390	 */
1391	i = 0;
1392	do {
1393		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1394		i++;
1395	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1396
1397	/*
1398	 * If memory initialization is not successful, IOC timeout will catch
1399	 * such failures.
1400	 */
1401	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1402	bfa_trc(ioc, pss_ctl);
1403
1404	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1405	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1406}
1407
1408static void
1409bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1410{
1411	u32	pss_ctl;
1412
1413	/*
1414	 * Take processor out of reset.
1415	 */
1416	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1417	pss_ctl &= ~__PSS_LPU0_RESET;
1418
1419	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1420}
1421
1422static void
1423bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1424{
1425	u32	pss_ctl;
1426
1427	/*
1428	 * Put processors in reset.
1429	 */
1430	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1431	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1432
1433	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1434}
1435
1436/*
1437 * Get driver and firmware versions.
1438 */
1439void
1440bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1441{
1442	u32	pgnum;
1443	u32	loff = 0;
1444	int		i;
1445	u32	*fwsig = (u32 *) fwhdr;
1446
1447	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1448	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1449
1450	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1451	     i++) {
1452		fwsig[i] =
1453			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1454		loff += sizeof(u32);
1455	}
1456}
1457
1458/*
1459 * Returns TRUE if driver is willing to work with current smem f/w version.
1460 */
1461bfa_boolean_t
1462bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1463		struct bfi_ioc_image_hdr_s *smem_fwhdr)
1464{
1465	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1466	enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1467
1468	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1469		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1470
1471	/*
1472	 * If smem is incompatible or old, driver should not work with it.
1473	 */
1474	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1475	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1476		drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1477		return BFA_FALSE;
1478	}
1479
1480	/*
1481	 * IF Flash has a better F/W than smem do not work with smem.
1482	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1483	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1484	 */
1485	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1486
1487	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1488		return BFA_FALSE;
1489	} else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1490		return BFA_TRUE;
1491	} else {
1492		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1493			BFA_TRUE : BFA_FALSE;
1494	}
1495}
1496
1497/*
1498 * Return true if current running version is valid. Firmware signature and
1499 * execution context (driver/bios) must match.
1500 */
1501static bfa_boolean_t
1502bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1503{
1504	struct bfi_ioc_image_hdr_s fwhdr;
1505
1506	bfa_ioc_fwver_get(ioc, &fwhdr);
1507
1508	if (swab32(fwhdr.bootenv) != boot_env) {
1509		bfa_trc(ioc, fwhdr.bootenv);
1510		bfa_trc(ioc, boot_env);
1511		return BFA_FALSE;
1512	}
1513
1514	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1515}
1516
1517static bfa_boolean_t
1518bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1519				struct bfi_ioc_image_hdr_s *fwhdr_2)
1520{
1521	int i;
1522
1523	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1524		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1525			return BFA_FALSE;
1526
1527	return BFA_TRUE;
1528}
1529
1530/*
1531 * Returns TRUE if major minor and maintainence are same.
1532 * If patch versions are same, check for MD5 Checksum to be same.
1533 */
1534static bfa_boolean_t
1535bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1536				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1537{
1538	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1539		return BFA_FALSE;
1540
1541	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1542		return BFA_FALSE;
1543
1544	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1545		return BFA_FALSE;
1546
1547	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1548		return BFA_FALSE;
1549
1550	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1551		drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1552		drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1553		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1554	}
1555
1556	return BFA_TRUE;
1557}
1558
1559static bfa_boolean_t
1560bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1561{
1562	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1563		return BFA_FALSE;
1564
1565	return BFA_TRUE;
1566}
1567
1568static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1569{
1570	if (fwhdr->fwver.phase == 0 &&
1571		fwhdr->fwver.build == 0)
1572		return BFA_TRUE;
1573
1574	return BFA_FALSE;
1575}
1576
1577/*
1578 * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1579 */
1580static enum bfi_ioc_img_ver_cmp_e
1581bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1582				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1583{
1584	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1585		return BFI_IOC_IMG_VER_INCOMP;
1586
1587	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1588		return BFI_IOC_IMG_VER_BETTER;
1589
1590	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1591		return BFI_IOC_IMG_VER_OLD;
1592
1593	/*
1594	 * GA takes priority over internal builds of the same patch stream.
1595	 * At this point major minor maint and patch numbers are same.
1596	 */
1597
1598	if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1599		if (fwhdr_is_ga(fwhdr_to_cmp))
1600			return BFI_IOC_IMG_VER_SAME;
1601		else
1602			return BFI_IOC_IMG_VER_OLD;
1603	} else {
1604		if (fwhdr_is_ga(fwhdr_to_cmp))
1605			return BFI_IOC_IMG_VER_BETTER;
1606	}
1607
1608	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1609		return BFI_IOC_IMG_VER_BETTER;
1610	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1611		return BFI_IOC_IMG_VER_OLD;
1612
1613	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1614		return BFI_IOC_IMG_VER_BETTER;
1615	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1616		return BFI_IOC_IMG_VER_OLD;
1617
1618	/*
1619	 * All Version Numbers are equal.
1620	 * Md5 check to be done as a part of compatibility check.
1621	 */
1622	return BFI_IOC_IMG_VER_SAME;
1623}
1624
1625#define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
1626
1627bfa_status_t
1628bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1629				u32 *fwimg)
1630{
1631	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1632			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1633			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
1634}
1635
1636static enum bfi_ioc_img_ver_cmp_e
1637bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1638			struct bfi_ioc_image_hdr_s *base_fwhdr)
1639{
1640	struct bfi_ioc_image_hdr_s *flash_fwhdr;
1641	bfa_status_t status;
1642	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1643
1644	status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1645	if (status != BFA_STATUS_OK)
1646		return BFI_IOC_IMG_VER_INCOMP;
1647
1648	flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1649	if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1650		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1651	else
1652		return BFI_IOC_IMG_VER_INCOMP;
1653}
1654
1655
1656/*
1657 * Invalidate fwver signature
1658 */
1659bfa_status_t
1660bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1661{
1662
1663	u32	pgnum;
1664	u32	loff = 0;
1665	enum bfi_ioc_state ioc_fwstate;
1666
1667	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1668	if (!bfa_ioc_state_disabled(ioc_fwstate))
1669		return BFA_STATUS_ADAPTER_ENABLED;
1670
1671	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1672	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1673	bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1674
1675	return BFA_STATUS_OK;
1676}
1677
1678/*
1679 * Conditionally flush any pending message from firmware at start.
1680 */
1681static void
1682bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1683{
1684	u32	r32;
1685
1686	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1687	if (r32)
1688		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1689}
1690
1691static void
1692bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1693{
1694	enum bfi_ioc_state ioc_fwstate;
1695	bfa_boolean_t fwvalid;
1696	u32 boot_type;
1697	u32 boot_env;
1698
1699	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1700
1701	if (force)
1702		ioc_fwstate = BFI_IOC_UNINIT;
1703
1704	bfa_trc(ioc, ioc_fwstate);
1705
1706	boot_type = BFI_FWBOOT_TYPE_NORMAL;
1707	boot_env = BFI_FWBOOT_ENV_OS;
1708
1709	/*
1710	 * check if firmware is valid
1711	 */
1712	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1713		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1714
1715	if (!fwvalid) {
1716		if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1717			bfa_ioc_poll_fwinit(ioc);
1718		return;
1719	}
1720
1721	/*
1722	 * If hardware initialization is in progress (initialized by other IOC),
1723	 * just wait for an initialization completion interrupt.
1724	 */
1725	if (ioc_fwstate == BFI_IOC_INITING) {
1726		bfa_ioc_poll_fwinit(ioc);
1727		return;
1728	}
1729
1730	/*
1731	 * If IOC function is disabled and firmware version is same,
1732	 * just re-enable IOC.
1733	 *
1734	 * If option rom, IOC must not be in operational state. With
1735	 * convergence, IOC will be in operational state when 2nd driver
1736	 * is loaded.
1737	 */
1738	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1739
1740		/*
1741		 * When using MSI-X any pending firmware ready event should
1742		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1743		 */
1744		bfa_ioc_msgflush(ioc);
1745		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1746		return;
1747	}
1748
1749	/*
1750	 * Initialize the h/w for any other states.
1751	 */
1752	if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1753		bfa_ioc_poll_fwinit(ioc);
1754}
1755
1756static void
1757bfa_ioc_timeout(void *ioc_arg)
1758{
1759	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1760
1761	bfa_trc(ioc, 0);
1762	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1763}
1764
1765void
1766bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1767{
1768	u32 *msgp = (u32 *) ioc_msg;
1769	u32 i;
1770
1771	bfa_trc(ioc, msgp[0]);
1772	bfa_trc(ioc, len);
1773
1774	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1775
1776	/*
1777	 * first write msg to mailbox registers
1778	 */
1779	for (i = 0; i < len / sizeof(u32); i++)
1780		writel(cpu_to_le32(msgp[i]),
1781			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1782
1783	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1784		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1785
1786	/*
1787	 * write 1 to mailbox CMD to trigger LPU event
1788	 */
1789	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1790	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1791}
1792
1793static void
1794bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1795{
1796	struct bfi_ioc_ctrl_req_s enable_req;
1797
1798	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1799		    bfa_ioc_portid(ioc));
1800	enable_req.clscode = cpu_to_be16(ioc->clscode);
1801	/* unsigned 32-bit time_t overflow in y2106 */
1802	enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1803	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1804}
1805
1806static void
1807bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1808{
1809	struct bfi_ioc_ctrl_req_s disable_req;
1810
1811	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1812		    bfa_ioc_portid(ioc));
1813	disable_req.clscode = cpu_to_be16(ioc->clscode);
1814	/* unsigned 32-bit time_t overflow in y2106 */
1815	disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1816	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1817}
1818
1819static void
1820bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1821{
1822	struct bfi_ioc_getattr_req_s	attr_req;
1823
1824	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1825		    bfa_ioc_portid(ioc));
1826	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1827	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1828}
1829
1830static void
1831bfa_ioc_hb_check(void *cbarg)
1832{
1833	struct bfa_ioc_s  *ioc = cbarg;
1834	u32	hb_count;
1835
1836	hb_count = readl(ioc->ioc_regs.heartbeat);
1837	if (ioc->hb_count == hb_count) {
1838		bfa_ioc_recover(ioc);
1839		return;
1840	} else {
1841		ioc->hb_count = hb_count;
1842	}
1843
1844	bfa_ioc_mbox_poll(ioc);
1845	bfa_hb_timer_start(ioc);
1846}
1847
1848static void
1849bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1850{
1851	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1852	bfa_hb_timer_start(ioc);
1853}
1854
1855/*
1856 *	Initiate a full firmware download.
1857 */
1858static bfa_status_t
1859bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1860		    u32 boot_env)
1861{
1862	u32 *fwimg;
1863	u32 pgnum;
1864	u32 loff = 0;
1865	u32 chunkno = 0;
1866	u32 i;
1867	u32 asicmode;
1868	u32 fwimg_size;
1869	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1870	bfa_status_t status;
1871
1872	if (boot_env == BFI_FWBOOT_ENV_OS &&
1873		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1874		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1875
1876		status = bfa_ioc_flash_img_get_chnk(ioc,
1877			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1878		if (status != BFA_STATUS_OK)
1879			return status;
1880
1881		fwimg = fwimg_buf;
1882	} else {
1883		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1884		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1885					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1886	}
1887
1888	bfa_trc(ioc, fwimg_size);
1889
1890
1891	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1892	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1893
1894	for (i = 0; i < fwimg_size; i++) {
1895
1896		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1897			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1898
1899			if (boot_env == BFI_FWBOOT_ENV_OS &&
1900				boot_type == BFI_FWBOOT_TYPE_FLASH) {
1901				status = bfa_ioc_flash_img_get_chnk(ioc,
1902					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1903					fwimg_buf);
1904				if (status != BFA_STATUS_OK)
1905					return status;
1906
1907				fwimg = fwimg_buf;
1908			} else {
1909				fwimg = bfa_cb_image_get_chunk(
1910					bfa_ioc_asic_gen(ioc),
1911					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1912			}
1913		}
1914
1915		/*
1916		 * write smem
1917		 */
1918		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1919			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1920
1921		loff += sizeof(u32);
1922
1923		/*
1924		 * handle page offset wrap around
1925		 */
1926		loff = PSS_SMEM_PGOFF(loff);
1927		if (loff == 0) {
1928			pgnum++;
1929			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1930		}
1931	}
1932
1933	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1934			ioc->ioc_regs.host_page_num_fn);
1935
1936	/*
1937	 * Set boot type, env and device mode at the end.
1938	 */
1939	if (boot_env == BFI_FWBOOT_ENV_OS &&
1940		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1941		boot_type = BFI_FWBOOT_TYPE_NORMAL;
1942	}
1943	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1944				ioc->port0_mode, ioc->port1_mode);
1945	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1946			swab32(asicmode));
1947	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1948			swab32(boot_type));
1949	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1950			swab32(boot_env));
1951	return BFA_STATUS_OK;
1952}
1953
1954
1955/*
1956 * Update BFA configuration from firmware configuration.
1957 */
1958static void
1959bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1960{
1961	struct bfi_ioc_attr_s	*attr = ioc->attr;
1962
1963	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1964	attr->card_type     = be32_to_cpu(attr->card_type);
1965	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1966	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
1967	attr->mfg_year	= be16_to_cpu(attr->mfg_year);
1968
1969	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1970}
1971
1972/*
1973 * Attach time initialization of mbox logic.
1974 */
1975static void
1976bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1977{
1978	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1979	int	mc;
1980
1981	INIT_LIST_HEAD(&mod->cmd_q);
1982	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1983		mod->mbhdlr[mc].cbfn = NULL;
1984		mod->mbhdlr[mc].cbarg = ioc->bfa;
1985	}
1986}
1987
1988/*
1989 * Mbox poll timer -- restarts any pending mailbox requests.
1990 */
1991static void
1992bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1993{
1994	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1995	struct bfa_mbox_cmd_s		*cmd;
1996	u32			stat;
1997
1998	/*
1999	 * If no command pending, do nothing
2000	 */
2001	if (list_empty(&mod->cmd_q))
2002		return;
2003
2004	/*
2005	 * If previous command is not yet fetched by firmware, do nothing
2006	 */
2007	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2008	if (stat)
2009		return;
2010
2011	/*
2012	 * Enqueue command to firmware.
2013	 */
2014	bfa_q_deq(&mod->cmd_q, &cmd);
2015	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2016}
2017
2018/*
2019 * Cleanup any pending requests.
2020 */
2021static void
2022bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2023{
2024	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2025	struct bfa_mbox_cmd_s		*cmd;
2026
2027	while (!list_empty(&mod->cmd_q))
2028		bfa_q_deq(&mod->cmd_q, &cmd);
2029}
2030
2031/*
2032 * Read data from SMEM to host through PCI memmap
2033 *
2034 * @param[in]	ioc	memory for IOC
2035 * @param[in]	tbuf	app memory to store data from smem
2036 * @param[in]	soff	smem offset
2037 * @param[in]	sz	size of smem in bytes
2038 */
2039static bfa_status_t
2040bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2041{
2042	u32 pgnum, loff;
2043	__be32 r32;
2044	int i, len;
2045	u32 *buf = tbuf;
2046
2047	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2048	loff = PSS_SMEM_PGOFF(soff);
2049	bfa_trc(ioc, pgnum);
2050	bfa_trc(ioc, loff);
2051	bfa_trc(ioc, sz);
2052
2053	/*
2054	 *  Hold semaphore to serialize pll init and fwtrc.
2055	 */
2056	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2057		bfa_trc(ioc, 0);
2058		return BFA_STATUS_FAILED;
2059	}
2060
2061	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2062
2063	len = sz/sizeof(u32);
2064	bfa_trc(ioc, len);
2065	for (i = 0; i < len; i++) {
2066		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2067		buf[i] = swab32(r32);
2068		loff += sizeof(u32);
2069
2070		/*
2071		 * handle page offset wrap around
2072		 */
2073		loff = PSS_SMEM_PGOFF(loff);
2074		if (loff == 0) {
2075			pgnum++;
2076			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2077		}
2078	}
2079	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2080			ioc->ioc_regs.host_page_num_fn);
2081	/*
2082	 *  release semaphore.
2083	 */
2084	readl(ioc->ioc_regs.ioc_init_sem_reg);
2085	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2086
2087	bfa_trc(ioc, pgnum);
2088	return BFA_STATUS_OK;
2089}
2090
2091/*
2092 * Clear SMEM data from host through PCI memmap
2093 *
2094 * @param[in]	ioc	memory for IOC
2095 * @param[in]	soff	smem offset
2096 * @param[in]	sz	size of smem in bytes
2097 */
2098static bfa_status_t
2099bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2100{
2101	int i, len;
2102	u32 pgnum, loff;
2103
2104	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2105	loff = PSS_SMEM_PGOFF(soff);
2106	bfa_trc(ioc, pgnum);
2107	bfa_trc(ioc, loff);
2108	bfa_trc(ioc, sz);
2109
2110	/*
2111	 *  Hold semaphore to serialize pll init and fwtrc.
2112	 */
2113	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2114		bfa_trc(ioc, 0);
2115		return BFA_STATUS_FAILED;
2116	}
2117
2118	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2119
2120	len = sz/sizeof(u32); /* len in words */
2121	bfa_trc(ioc, len);
2122	for (i = 0; i < len; i++) {
2123		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2124		loff += sizeof(u32);
2125
2126		/*
2127		 * handle page offset wrap around
2128		 */
2129		loff = PSS_SMEM_PGOFF(loff);
2130		if (loff == 0) {
2131			pgnum++;
2132			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2133		}
2134	}
2135	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2136			ioc->ioc_regs.host_page_num_fn);
2137
2138	/*
2139	 *  release semaphore.
2140	 */
2141	readl(ioc->ioc_regs.ioc_init_sem_reg);
2142	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2143	bfa_trc(ioc, pgnum);
2144	return BFA_STATUS_OK;
2145}
2146
2147static void
2148bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2149{
2150	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2151
2152	/*
2153	 * Notify driver and common modules registered for notification.
2154	 */
2155	ioc->cbfn->hbfail_cbfn(ioc->bfa);
2156	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2157
2158	bfa_ioc_debug_save_ftrc(ioc);
2159
2160	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2161		"Heart Beat of IOC has failed\n");
2162	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2163
2164}
2165
2166static void
2167bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2168{
2169	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2170	/*
2171	 * Provide enable completion callback.
2172	 */
2173	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2174	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2175		"Running firmware version is incompatible "
2176		"with the driver version\n");
2177	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2178}
2179
2180bfa_status_t
2181bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2182{
2183
2184	/*
2185	 *  Hold semaphore so that nobody can access the chip during init.
2186	 */
2187	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2188
2189	bfa_ioc_pll_init_asic(ioc);
2190
2191	ioc->pllinit = BFA_TRUE;
2192
2193	/*
2194	 * Initialize LMEM
2195	 */
2196	bfa_ioc_lmem_init(ioc);
2197
2198	/*
2199	 *  release semaphore.
2200	 */
2201	readl(ioc->ioc_regs.ioc_init_sem_reg);
2202	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2203
2204	return BFA_STATUS_OK;
2205}
2206
2207/*
2208 * Interface used by diag module to do firmware boot with memory test
2209 * as the entry vector.
2210 */
2211bfa_status_t
2212bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2213{
2214	struct bfi_ioc_image_hdr_s *drv_fwhdr;
2215	bfa_status_t status;
2216	bfa_ioc_stats(ioc, ioc_boots);
2217
2218	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2219		return BFA_STATUS_FAILED;
2220
2221	if (boot_env == BFI_FWBOOT_ENV_OS &&
2222		boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2223
2224		drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2225			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2226
2227		/*
2228		 * Work with Flash iff flash f/w is better than driver f/w.
2229		 * Otherwise push drivers firmware.
2230		 */
2231		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2232						BFI_IOC_IMG_VER_BETTER)
2233			boot_type = BFI_FWBOOT_TYPE_FLASH;
2234	}
2235
2236	/*
2237	 * Initialize IOC state of all functions on a chip reset.
2238	 */
2239	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2240		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2241		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2242	} else {
2243		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2244		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2245	}
2246
2247	bfa_ioc_msgflush(ioc);
2248	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2249	if (status == BFA_STATUS_OK)
2250		bfa_ioc_lpu_start(ioc);
2251	else {
2252		WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2253		bfa_iocpf_timeout(ioc);
2254	}
2255	return status;
2256}
2257
2258/*
2259 * Enable/disable IOC failure auto recovery.
2260 */
2261void
2262bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2263{
2264	bfa_auto_recover = auto_recover;
2265}
2266
2267
2268
2269bfa_boolean_t
2270bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2271{
2272	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2273}
2274
2275bfa_boolean_t
2276bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2277{
2278	u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2279
2280	return ((r32 != BFI_IOC_UNINIT) &&
2281		(r32 != BFI_IOC_INITING) &&
2282		(r32 != BFI_IOC_MEMTEST));
2283}
2284
2285bfa_boolean_t
2286bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2287{
2288	__be32	*msgp = mbmsg;
2289	u32	r32;
2290	int		i;
2291
2292	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2293	if ((r32 & 1) == 0)
2294		return BFA_FALSE;
2295
2296	/*
2297	 * read the MBOX msg
2298	 */
2299	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2300	     i++) {
2301		r32 = readl(ioc->ioc_regs.lpu_mbox +
2302				   i * sizeof(u32));
2303		msgp[i] = cpu_to_be32(r32);
2304	}
2305
2306	/*
2307	 * turn off mailbox interrupt by clearing mailbox status
2308	 */
2309	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2310	readl(ioc->ioc_regs.lpu_mbox_cmd);
2311
2312	return BFA_TRUE;
2313}
2314
2315void
2316bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2317{
2318	union bfi_ioc_i2h_msg_u	*msg;
2319	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2320
2321	msg = (union bfi_ioc_i2h_msg_u *) m;
2322
2323	bfa_ioc_stats(ioc, ioc_isrs);
2324
2325	switch (msg->mh.msg_id) {
2326	case BFI_IOC_I2H_HBEAT:
2327		break;
2328
2329	case BFI_IOC_I2H_ENABLE_REPLY:
2330		ioc->port_mode = ioc->port_mode_cfg =
2331				(enum bfa_mode_s)msg->fw_event.port_mode;
2332		ioc->ad_cap_bm = msg->fw_event.cap_bm;
2333		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2334		break;
2335
2336	case BFI_IOC_I2H_DISABLE_REPLY:
2337		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2338		break;
2339
2340	case BFI_IOC_I2H_GETATTR_REPLY:
2341		bfa_ioc_getattr_reply(ioc);
2342		break;
2343
2344	default:
2345		bfa_trc(ioc, msg->mh.msg_id);
2346		WARN_ON(1);
2347	}
2348}
2349
2350/*
2351 * IOC attach time initialization and setup.
2352 *
2353 * @param[in]	ioc	memory for IOC
2354 * @param[in]	bfa	driver instance structure
2355 */
2356void
2357bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2358	       struct bfa_timer_mod_s *timer_mod)
2359{
2360	ioc->bfa	= bfa;
2361	ioc->cbfn	= cbfn;
2362	ioc->timer_mod	= timer_mod;
2363	ioc->fcmode	= BFA_FALSE;
2364	ioc->pllinit	= BFA_FALSE;
2365	ioc->dbg_fwsave_once = BFA_TRUE;
2366	ioc->iocpf.ioc	= ioc;
2367
2368	bfa_ioc_mbox_attach(ioc);
2369	INIT_LIST_HEAD(&ioc->notify_q);
2370
2371	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2372	bfa_fsm_send_event(ioc, IOC_E_RESET);
2373}
2374
2375/*
2376 * Driver detach time IOC cleanup.
2377 */
2378void
2379bfa_ioc_detach(struct bfa_ioc_s *ioc)
2380{
2381	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2382	INIT_LIST_HEAD(&ioc->notify_q);
2383}
2384
2385/*
2386 * Setup IOC PCI properties.
2387 *
2388 * @param[in]	pcidev	PCI device information for this IOC
2389 */
2390void
2391bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2392		enum bfi_pcifn_class clscode)
2393{
2394	ioc->clscode	= clscode;
2395	ioc->pcidev	= *pcidev;
2396
2397	/*
2398	 * Initialize IOC and device personality
2399	 */
2400	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2401	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2402
2403	switch (pcidev->device_id) {
2404	case BFA_PCI_DEVICE_ID_FC_8G1P:
2405	case BFA_PCI_DEVICE_ID_FC_8G2P:
2406		ioc->asic_gen = BFI_ASIC_GEN_CB;
2407		ioc->fcmode = BFA_TRUE;
2408		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2409		ioc->ad_cap_bm = BFA_CM_HBA;
2410		break;
2411
2412	case BFA_PCI_DEVICE_ID_CT:
2413		ioc->asic_gen = BFI_ASIC_GEN_CT;
2414		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2415		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2416		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2417		ioc->ad_cap_bm = BFA_CM_CNA;
2418		break;
2419
2420	case BFA_PCI_DEVICE_ID_CT_FC:
2421		ioc->asic_gen = BFI_ASIC_GEN_CT;
2422		ioc->fcmode = BFA_TRUE;
2423		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2424		ioc->ad_cap_bm = BFA_CM_HBA;
2425		break;
2426
2427	case BFA_PCI_DEVICE_ID_CT2:
2428	case BFA_PCI_DEVICE_ID_CT2_QUAD:
2429		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2430		if (clscode == BFI_PCIFN_CLASS_FC &&
2431		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2432			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2433			ioc->fcmode = BFA_TRUE;
2434			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2435			ioc->ad_cap_bm = BFA_CM_HBA;
2436		} else {
2437			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2438			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2439			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2440				ioc->port_mode =
2441				ioc->port_mode_cfg = BFA_MODE_CNA;
2442				ioc->ad_cap_bm = BFA_CM_CNA;
2443			} else {
2444				ioc->port_mode =
2445				ioc->port_mode_cfg = BFA_MODE_NIC;
2446				ioc->ad_cap_bm = BFA_CM_NIC;
2447			}
2448		}
2449		break;
2450
2451	default:
2452		WARN_ON(1);
2453	}
2454
2455	/*
2456	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2457	 */
2458	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2459		bfa_ioc_set_cb_hwif(ioc);
2460	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2461		bfa_ioc_set_ct_hwif(ioc);
2462	else {
2463		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2464		bfa_ioc_set_ct2_hwif(ioc);
2465		bfa_ioc_ct2_poweron(ioc);
2466	}
2467
2468	bfa_ioc_map_port(ioc);
2469	bfa_ioc_reg_init(ioc);
2470}
2471
2472/*
2473 * Initialize IOC dma memory
2474 *
2475 * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2476 * @param[in]	dm_pa	physical address of IOC dma memory
2477 */
2478void
2479bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2480{
2481	/*
2482	 * dma memory for firmware attribute
2483	 */
2484	ioc->attr_dma.kva = dm_kva;
2485	ioc->attr_dma.pa = dm_pa;
2486	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2487}
2488
2489void
2490bfa_ioc_enable(struct bfa_ioc_s *ioc)
2491{
2492	bfa_ioc_stats(ioc, ioc_enables);
2493	ioc->dbg_fwsave_once = BFA_TRUE;
2494
2495	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2496}
2497
2498void
2499bfa_ioc_disable(struct bfa_ioc_s *ioc)
2500{
2501	bfa_ioc_stats(ioc, ioc_disables);
2502	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2503}
2504
2505void
2506bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2507{
2508	ioc->dbg_fwsave_once = BFA_TRUE;
2509	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2510}
2511
2512/*
2513 * Initialize memory for saving firmware trace. Driver must initialize
2514 * trace memory before call bfa_ioc_enable().
2515 */
2516void
2517bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2518{
2519	ioc->dbg_fwsave	    = dbg_fwsave;
2520	ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2521}
2522
2523/*
2524 * Register mailbox message handler functions
2525 *
2526 * @param[in]	ioc		IOC instance
2527 * @param[in]	mcfuncs		message class handler functions
2528 */
2529void
2530bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2531{
2532	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2533	int				mc;
2534
2535	for (mc = 0; mc < BFI_MC_MAX; mc++)
2536		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2537}
2538
2539/*
2540 * Register mailbox message handler function, to be called by common modules
2541 */
2542void
2543bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2544		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2545{
2546	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2547
2548	mod->mbhdlr[mc].cbfn	= cbfn;
2549	mod->mbhdlr[mc].cbarg	= cbarg;
2550}
2551
2552/*
2553 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2554 * Responsibility of caller to serialize
2555 *
2556 * @param[in]	ioc	IOC instance
2557 * @param[i]	cmd	Mailbox command
2558 */
2559void
2560bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2561{
2562	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2563	u32			stat;
2564
2565	/*
2566	 * If a previous command is pending, queue new command
2567	 */
2568	if (!list_empty(&mod->cmd_q)) {
2569		list_add_tail(&cmd->qe, &mod->cmd_q);
2570		return;
2571	}
2572
2573	/*
2574	 * If mailbox is busy, queue command for poll timer
2575	 */
2576	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2577	if (stat) {
2578		list_add_tail(&cmd->qe, &mod->cmd_q);
2579		return;
2580	}
2581
2582	/*
2583	 * mailbox is free -- queue command to firmware
2584	 */
2585	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2586}
2587
2588/*
2589 * Handle mailbox interrupts
2590 */
2591void
2592bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2593{
2594	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2595	struct bfi_mbmsg_s		m;
2596	int				mc;
2597
2598	if (bfa_ioc_msgget(ioc, &m)) {
2599		/*
2600		 * Treat IOC message class as special.
2601		 */
2602		mc = m.mh.msg_class;
2603		if (mc == BFI_MC_IOC) {
2604			bfa_ioc_isr(ioc, &m);
2605			return;
2606		}
2607
2608		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2609			return;
2610
2611		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2612	}
2613
2614	bfa_ioc_lpu_read_stat(ioc);
2615
2616	/*
2617	 * Try to send pending mailbox commands
2618	 */
2619	bfa_ioc_mbox_poll(ioc);
2620}
2621
2622void
2623bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2624{
2625	bfa_ioc_stats(ioc, ioc_hbfails);
2626	ioc->stats.hb_count = ioc->hb_count;
2627	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2628}
2629
2630/*
2631 * return true if IOC is disabled
2632 */
2633bfa_boolean_t
2634bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2635{
2636	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2637		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2638}
2639
2640/*
2641 * return true if IOC firmware is different.
2642 */
2643bfa_boolean_t
2644bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2645{
2646	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2647		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2648		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2649}
2650
2651/*
2652 * Check if adapter is disabled -- both IOCs should be in a disabled
2653 * state.
2654 */
2655bfa_boolean_t
2656bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2657{
2658	u32	ioc_state;
2659
2660	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2661		return BFA_FALSE;
2662
2663	ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2664	if (!bfa_ioc_state_disabled(ioc_state))
2665		return BFA_FALSE;
2666
2667	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2668		ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2669		if (!bfa_ioc_state_disabled(ioc_state))
2670			return BFA_FALSE;
2671	}
2672
2673	return BFA_TRUE;
2674}
2675
2676/*
2677 * Reset IOC fwstate registers.
2678 */
2679void
2680bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2681{
2682	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2683	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2684}
2685
2686#define BFA_MFG_NAME "QLogic"
2687void
2688bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2689			 struct bfa_adapter_attr_s *ad_attr)
2690{
2691	struct bfi_ioc_attr_s	*ioc_attr;
2692
2693	ioc_attr = ioc->attr;
2694
2695	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2696	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2697	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2698	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2699	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2700		      sizeof(struct bfa_mfg_vpd_s));
2701
2702	ad_attr->nports = bfa_ioc_get_nports(ioc);
2703	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2704
2705	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2706	/* For now, model descr uses same model string */
2707	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2708
2709	ad_attr->card_type = ioc_attr->card_type;
2710	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2711
2712	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2713		ad_attr->prototype = 1;
2714	else
2715		ad_attr->prototype = 0;
2716
2717	ad_attr->pwwn = ioc->attr->pwwn;
2718	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2719
2720	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2721	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2722	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2723	ad_attr->asic_rev = ioc_attr->asic_rev;
2724
2725	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2726
2727	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2728	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2729				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2730	ad_attr->mfg_day = ioc_attr->mfg_day;
2731	ad_attr->mfg_month = ioc_attr->mfg_month;
2732	ad_attr->mfg_year = ioc_attr->mfg_year;
2733	memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2734}
2735
2736enum bfa_ioc_type_e
2737bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2738{
2739	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2740		return BFA_IOC_TYPE_LL;
2741
2742	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2743
2744	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2745		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2746}
2747
2748void
2749bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2750{
2751	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2752	memcpy((void *)serial_num,
2753			(void *)ioc->attr->brcd_serialnum,
2754			BFA_ADAPTER_SERIAL_NUM_LEN);
2755}
2756
2757void
2758bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2759{
2760	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2761	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2762}
2763
2764void
2765bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2766{
2767	WARN_ON(!chip_rev);
2768
2769	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2770
2771	chip_rev[0] = 'R';
2772	chip_rev[1] = 'e';
2773	chip_rev[2] = 'v';
2774	chip_rev[3] = '-';
2775	chip_rev[4] = ioc->attr->asic_rev;
2776	chip_rev[5] = '\0';
2777}
2778
2779void
2780bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2781{
2782	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2783	memcpy(optrom_ver, ioc->attr->optrom_version,
2784		      BFA_VERSION_LEN);
2785}
2786
2787void
2788bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2789{
2790	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2791	strscpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2792}
2793
2794void
2795bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2796{
2797	struct bfi_ioc_attr_s	*ioc_attr;
2798	u8 nports = bfa_ioc_get_nports(ioc);
2799
2800	WARN_ON(!model);
2801	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2802
2803	ioc_attr = ioc->attr;
2804
2805	if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2806		(!bfa_mfg_is_mezz(ioc_attr->card_type)))
2807		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2808			BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2809	else
2810		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2811			BFA_MFG_NAME, ioc_attr->card_type);
2812}
2813
2814enum bfa_ioc_state
2815bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2816{
2817	enum bfa_iocpf_state iocpf_st;
2818	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2819
2820	if (ioc_st == BFA_IOC_ENABLING ||
2821		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2822
2823		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2824
2825		switch (iocpf_st) {
2826		case BFA_IOCPF_SEMWAIT:
2827			ioc_st = BFA_IOC_SEMWAIT;
2828			break;
2829
2830		case BFA_IOCPF_HWINIT:
2831			ioc_st = BFA_IOC_HWINIT;
2832			break;
2833
2834		case BFA_IOCPF_FWMISMATCH:
2835			ioc_st = BFA_IOC_FWMISMATCH;
2836			break;
2837
2838		case BFA_IOCPF_FAIL:
2839			ioc_st = BFA_IOC_FAIL;
2840			break;
2841
2842		case BFA_IOCPF_INITFAIL:
2843			ioc_st = BFA_IOC_INITFAIL;
2844			break;
2845
2846		default:
2847			break;
2848		}
2849	}
2850
2851	return ioc_st;
2852}
2853
2854void
2855bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2856{
2857	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2858
2859	ioc_attr->state = bfa_ioc_get_state(ioc);
2860	ioc_attr->port_id = bfa_ioc_portid(ioc);
2861	ioc_attr->port_mode = ioc->port_mode;
2862	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2863	ioc_attr->cap_bm = ioc->ad_cap_bm;
2864
2865	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2866
2867	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2868
2869	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2870	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2871	ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2872	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2873}
2874
2875mac_t
2876bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2877{
2878	/*
2879	 * Check the IOC type and return the appropriate MAC
2880	 */
2881	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2882		return ioc->attr->fcoe_mac;
2883	else
2884		return ioc->attr->mac;
2885}
2886
2887mac_t
2888bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2889{
2890	mac_t	m;
2891
2892	m = ioc->attr->mfg_mac;
2893	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2894		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2895	else
2896		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2897			bfa_ioc_pcifn(ioc));
2898
2899	return m;
2900}
2901
2902/*
2903 * Send AEN notification
2904 */
2905void
2906bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2907{
2908	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2909	struct bfa_aen_entry_s	*aen_entry;
2910	enum bfa_ioc_type_e ioc_type;
2911
2912	bfad_get_aen_entry(bfad, aen_entry);
2913	if (!aen_entry)
2914		return;
2915
2916	ioc_type = bfa_ioc_get_type(ioc);
2917	switch (ioc_type) {
2918	case BFA_IOC_TYPE_FC:
2919		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2920		break;
2921	case BFA_IOC_TYPE_FCoE:
2922		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2923		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2924		break;
2925	case BFA_IOC_TYPE_LL:
2926		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2927		break;
2928	default:
2929		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2930		break;
2931	}
2932
2933	/* Send the AEN notification */
2934	aen_entry->aen_data.ioc.ioc_type = ioc_type;
2935	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2936				  BFA_AEN_CAT_IOC, event);
2937}
2938
2939/*
2940 * Retrieve saved firmware trace from a prior IOC failure.
2941 */
2942bfa_status_t
2943bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2944{
2945	int	tlen;
2946
2947	if (ioc->dbg_fwsave_len == 0)
2948		return BFA_STATUS_ENOFSAVE;
2949
2950	tlen = *trclen;
2951	if (tlen > ioc->dbg_fwsave_len)
2952		tlen = ioc->dbg_fwsave_len;
2953
2954	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2955	*trclen = tlen;
2956	return BFA_STATUS_OK;
2957}
2958
2959
2960/*
2961 * Retrieve saved firmware trace from a prior IOC failure.
2962 */
2963bfa_status_t
2964bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2965{
2966	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2967	int tlen;
2968	bfa_status_t status;
2969
2970	bfa_trc(ioc, *trclen);
2971
2972	tlen = *trclen;
2973	if (tlen > BFA_DBG_FWTRC_LEN)
2974		tlen = BFA_DBG_FWTRC_LEN;
2975
2976	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2977	*trclen = tlen;
2978	return status;
2979}
2980
2981static void
2982bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2983{
2984	struct bfa_mbox_cmd_s cmd;
2985	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2986
2987	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2988		    bfa_ioc_portid(ioc));
2989	req->clscode = cpu_to_be16(ioc->clscode);
2990	bfa_ioc_mbox_queue(ioc, &cmd);
2991}
2992
2993static void
2994bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2995{
2996	u32 fwsync_iter = 1000;
2997
2998	bfa_ioc_send_fwsync(ioc);
2999
3000	/*
3001	 * After sending a fw sync mbox command wait for it to
3002	 * take effect.  We will not wait for a response because
3003	 *    1. fw_sync mbox cmd doesn't have a response.
3004	 *    2. Even if we implement that,  interrupts might not
3005	 *	 be enabled when we call this function.
3006	 * So, just keep checking if any mbox cmd is pending, and
3007	 * after waiting for a reasonable amount of time, go ahead.
3008	 * It is possible that fw has crashed and the mbox command
3009	 * is never acknowledged.
3010	 */
3011	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3012		fwsync_iter--;
3013}
3014
3015/*
3016 * Dump firmware smem
3017 */
3018bfa_status_t
3019bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3020				u32 *offset, int *buflen)
3021{
3022	u32 loff;
3023	int dlen;
3024	bfa_status_t status;
3025	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3026
3027	if (*offset >= smem_len) {
3028		*offset = *buflen = 0;
3029		return BFA_STATUS_EINVAL;
3030	}
3031
3032	loff = *offset;
3033	dlen = *buflen;
3034
3035	/*
3036	 * First smem read, sync smem before proceeding
3037	 * No need to sync before reading every chunk.
3038	 */
3039	if (loff == 0)
3040		bfa_ioc_fwsync(ioc);
3041
3042	if ((loff + dlen) >= smem_len)
3043		dlen = smem_len - loff;
3044
3045	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3046
3047	if (status != BFA_STATUS_OK) {
3048		*offset = *buflen = 0;
3049		return status;
3050	}
3051
3052	*offset += dlen;
3053
3054	if (*offset >= smem_len)
3055		*offset = 0;
3056
3057	*buflen = dlen;
3058
3059	return status;
3060}
3061
3062/*
3063 * Firmware statistics
3064 */
3065bfa_status_t
3066bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3067{
3068	u32 loff = BFI_IOC_FWSTATS_OFF + \
3069		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3070	int tlen;
3071	bfa_status_t status;
3072
3073	if (ioc->stats_busy) {
3074		bfa_trc(ioc, ioc->stats_busy);
3075		return BFA_STATUS_DEVBUSY;
3076	}
3077	ioc->stats_busy = BFA_TRUE;
3078
3079	tlen = sizeof(struct bfa_fw_stats_s);
3080	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3081
3082	ioc->stats_busy = BFA_FALSE;
3083	return status;
3084}
3085
3086bfa_status_t
3087bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3088{
3089	u32 loff = BFI_IOC_FWSTATS_OFF + \
3090		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3091	int tlen;
3092	bfa_status_t status;
3093
3094	if (ioc->stats_busy) {
3095		bfa_trc(ioc, ioc->stats_busy);
3096		return BFA_STATUS_DEVBUSY;
3097	}
3098	ioc->stats_busy = BFA_TRUE;
3099
3100	tlen = sizeof(struct bfa_fw_stats_s);
3101	status = bfa_ioc_smem_clr(ioc, loff, tlen);
3102
3103	ioc->stats_busy = BFA_FALSE;
3104	return status;
3105}
3106
3107/*
3108 * Save firmware trace if configured.
3109 */
3110void
3111bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3112{
3113	int		tlen;
3114
3115	if (ioc->dbg_fwsave_once) {
3116		ioc->dbg_fwsave_once = BFA_FALSE;
3117		if (ioc->dbg_fwsave_len) {
3118			tlen = ioc->dbg_fwsave_len;
3119			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3120		}
3121	}
3122}
3123
3124/*
3125 * Firmware failure detected. Start recovery actions.
3126 */
3127static void
3128bfa_ioc_recover(struct bfa_ioc_s *ioc)
3129{
3130	bfa_ioc_stats(ioc, ioc_hbfails);
3131	ioc->stats.hb_count = ioc->hb_count;
3132	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3133}
3134
3135/*
3136 *  BFA IOC PF private functions
3137 */
3138static void
3139bfa_iocpf_timeout(void *ioc_arg)
3140{
3141	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3142
3143	bfa_trc(ioc, 0);
3144	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3145}
3146
3147static void
3148bfa_iocpf_sem_timeout(void *ioc_arg)
3149{
3150	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3151
3152	bfa_ioc_hw_sem_get(ioc);
3153}
3154
3155static void
3156bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3157{
3158	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3159
3160	bfa_trc(ioc, fwstate);
3161
3162	if (fwstate == BFI_IOC_DISABLED) {
3163		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3164		return;
3165	}
3166
3167	if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3168		bfa_iocpf_timeout(ioc);
3169	else {
3170		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3171		bfa_iocpf_poll_timer_start(ioc);
3172	}
3173}
3174
3175static void
3176bfa_iocpf_poll_timeout(void *ioc_arg)
3177{
3178	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3179
3180	bfa_ioc_poll_fwinit(ioc);
3181}
3182
3183/*
3184 *  bfa timer function
3185 */
3186void
3187bfa_timer_beat(struct bfa_timer_mod_s *mod)
3188{
3189	struct list_head *qh = &mod->timer_q;
3190	struct list_head *qe, *qe_next;
3191	struct bfa_timer_s *elem;
3192	struct list_head timedout_q;
3193
3194	INIT_LIST_HEAD(&timedout_q);
3195
3196	qe = bfa_q_next(qh);
3197
3198	while (qe != qh) {
3199		qe_next = bfa_q_next(qe);
3200
3201		elem = (struct bfa_timer_s *) qe;
3202		if (elem->timeout <= BFA_TIMER_FREQ) {
3203			elem->timeout = 0;
3204			list_del(&elem->qe);
3205			list_add_tail(&elem->qe, &timedout_q);
3206		} else {
3207			elem->timeout -= BFA_TIMER_FREQ;
3208		}
3209
3210		qe = qe_next;	/* go to next elem */
3211	}
3212
3213	/*
3214	 * Pop all the timeout entries
3215	 */
3216	while (!list_empty(&timedout_q)) {
3217		bfa_q_deq(&timedout_q, &elem);
3218		elem->timercb(elem->arg);
3219	}
3220}
3221
3222/*
3223 * Should be called with lock protection
3224 */
3225void
3226bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3227		    void (*timercb) (void *), void *arg, unsigned int timeout)
3228{
3229
3230	WARN_ON(timercb == NULL);
3231	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3232
3233	timer->timeout = timeout;
3234	timer->timercb = timercb;
3235	timer->arg = arg;
3236
3237	list_add_tail(&timer->qe, &mod->timer_q);
3238}
3239
3240/*
3241 * Should be called with lock protection
3242 */
3243void
3244bfa_timer_stop(struct bfa_timer_s *timer)
3245{
3246	WARN_ON(list_empty(&timer->qe));
3247
3248	list_del(&timer->qe);
3249}
3250
3251/*
3252 *	ASIC block related
3253 */
3254static void
3255bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3256{
3257	struct bfa_ablk_cfg_inst_s *cfg_inst;
3258	int i, j;
3259	u16	be16;
3260
3261	for (i = 0; i < BFA_ABLK_MAX; i++) {
3262		cfg_inst = &cfg->inst[i];
3263		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3264			be16 = cfg_inst->pf_cfg[j].pers;
3265			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3266			be16 = cfg_inst->pf_cfg[j].num_qpairs;
3267			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3268			be16 = cfg_inst->pf_cfg[j].num_vectors;
3269			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3270			be16 = cfg_inst->pf_cfg[j].bw_min;
3271			cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3272			be16 = cfg_inst->pf_cfg[j].bw_max;
3273			cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3274		}
3275	}
3276}
3277
3278static void
3279bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3280{
3281	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3282	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3283	bfa_ablk_cbfn_t cbfn;
3284
3285	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3286	bfa_trc(ablk->ioc, msg->mh.msg_id);
3287
3288	switch (msg->mh.msg_id) {
3289	case BFI_ABLK_I2H_QUERY:
3290		if (rsp->status == BFA_STATUS_OK) {
3291			memcpy(ablk->cfg, ablk->dma_addr.kva,
3292				sizeof(struct bfa_ablk_cfg_s));
3293			bfa_ablk_config_swap(ablk->cfg);
3294			ablk->cfg = NULL;
3295		}
3296		break;
3297
3298	case BFI_ABLK_I2H_ADPT_CONFIG:
3299	case BFI_ABLK_I2H_PORT_CONFIG:
3300		/* update config port mode */
3301		ablk->ioc->port_mode_cfg = rsp->port_mode;
3302		break;
3303
3304	case BFI_ABLK_I2H_PF_DELETE:
3305	case BFI_ABLK_I2H_PF_UPDATE:
3306	case BFI_ABLK_I2H_OPTROM_ENABLE:
3307	case BFI_ABLK_I2H_OPTROM_DISABLE:
3308		/* No-op */
3309		break;
3310
3311	case BFI_ABLK_I2H_PF_CREATE:
3312		*(ablk->pcifn) = rsp->pcifn;
3313		ablk->pcifn = NULL;
3314		break;
3315
3316	default:
3317		WARN_ON(1);
3318	}
3319
3320	ablk->busy = BFA_FALSE;
3321	if (ablk->cbfn) {
3322		cbfn = ablk->cbfn;
3323		ablk->cbfn = NULL;
3324		cbfn(ablk->cbarg, rsp->status);
3325	}
3326}
3327
3328static void
3329bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3330{
3331	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3332
3333	bfa_trc(ablk->ioc, event);
3334
3335	switch (event) {
3336	case BFA_IOC_E_ENABLED:
3337		WARN_ON(ablk->busy != BFA_FALSE);
3338		break;
3339
3340	case BFA_IOC_E_DISABLED:
3341	case BFA_IOC_E_FAILED:
3342		/* Fail any pending requests */
3343		ablk->pcifn = NULL;
3344		if (ablk->busy) {
3345			if (ablk->cbfn)
3346				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3347			ablk->cbfn = NULL;
3348			ablk->busy = BFA_FALSE;
3349		}
3350		break;
3351
3352	default:
3353		WARN_ON(1);
3354		break;
3355	}
3356}
3357
3358u32
3359bfa_ablk_meminfo(void)
3360{
3361	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3362}
3363
3364void
3365bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3366{
3367	ablk->dma_addr.kva = dma_kva;
3368	ablk->dma_addr.pa  = dma_pa;
3369}
3370
3371void
3372bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3373{
3374	ablk->ioc = ioc;
3375
3376	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3377	bfa_q_qe_init(&ablk->ioc_notify);
3378	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3379	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3380}
3381
3382bfa_status_t
3383bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3384		bfa_ablk_cbfn_t cbfn, void *cbarg)
3385{
3386	struct bfi_ablk_h2i_query_s *m;
3387
3388	WARN_ON(!ablk_cfg);
3389
3390	if (!bfa_ioc_is_operational(ablk->ioc)) {
3391		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3392		return BFA_STATUS_IOC_FAILURE;
3393	}
3394
3395	if (ablk->busy) {
3396		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3397		return  BFA_STATUS_DEVBUSY;
3398	}
3399
3400	ablk->cfg = ablk_cfg;
3401	ablk->cbfn  = cbfn;
3402	ablk->cbarg = cbarg;
3403	ablk->busy  = BFA_TRUE;
3404
3405	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3406	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3407		    bfa_ioc_portid(ablk->ioc));
3408	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3409	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3410
3411	return BFA_STATUS_OK;
3412}
3413
3414bfa_status_t
3415bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3416		u8 port, enum bfi_pcifn_class personality,
3417		u16 bw_min, u16 bw_max,
3418		bfa_ablk_cbfn_t cbfn, void *cbarg)
3419{
3420	struct bfi_ablk_h2i_pf_req_s *m;
3421
3422	if (!bfa_ioc_is_operational(ablk->ioc)) {
3423		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3424		return BFA_STATUS_IOC_FAILURE;
3425	}
3426
3427	if (ablk->busy) {
3428		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3429		return  BFA_STATUS_DEVBUSY;
3430	}
3431
3432	ablk->pcifn = pcifn;
3433	ablk->cbfn = cbfn;
3434	ablk->cbarg = cbarg;
3435	ablk->busy  = BFA_TRUE;
3436
3437	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3438	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3439		    bfa_ioc_portid(ablk->ioc));
3440	m->pers = cpu_to_be16((u16)personality);
3441	m->bw_min = cpu_to_be16(bw_min);
3442	m->bw_max = cpu_to_be16(bw_max);
3443	m->port = port;
3444	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3445
3446	return BFA_STATUS_OK;
3447}
3448
3449bfa_status_t
3450bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3451		bfa_ablk_cbfn_t cbfn, void *cbarg)
3452{
3453	struct bfi_ablk_h2i_pf_req_s *m;
3454
3455	if (!bfa_ioc_is_operational(ablk->ioc)) {
3456		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3457		return BFA_STATUS_IOC_FAILURE;
3458	}
3459
3460	if (ablk->busy) {
3461		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3462		return  BFA_STATUS_DEVBUSY;
3463	}
3464
3465	ablk->cbfn  = cbfn;
3466	ablk->cbarg = cbarg;
3467	ablk->busy  = BFA_TRUE;
3468
3469	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3470	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3471		    bfa_ioc_portid(ablk->ioc));
3472	m->pcifn = (u8)pcifn;
3473	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3474
3475	return BFA_STATUS_OK;
3476}
3477
3478bfa_status_t
3479bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3480		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3481{
3482	struct bfi_ablk_h2i_cfg_req_s *m;
3483
3484	if (!bfa_ioc_is_operational(ablk->ioc)) {
3485		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3486		return BFA_STATUS_IOC_FAILURE;
3487	}
3488
3489	if (ablk->busy) {
3490		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3491		return  BFA_STATUS_DEVBUSY;
3492	}
3493
3494	ablk->cbfn  = cbfn;
3495	ablk->cbarg = cbarg;
3496	ablk->busy  = BFA_TRUE;
3497
3498	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3499	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3500		    bfa_ioc_portid(ablk->ioc));
3501	m->mode = (u8)mode;
3502	m->max_pf = (u8)max_pf;
3503	m->max_vf = (u8)max_vf;
3504	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3505
3506	return BFA_STATUS_OK;
3507}
3508
3509bfa_status_t
3510bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3511		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3512{
3513	struct bfi_ablk_h2i_cfg_req_s *m;
3514
3515	if (!bfa_ioc_is_operational(ablk->ioc)) {
3516		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3517		return BFA_STATUS_IOC_FAILURE;
3518	}
3519
3520	if (ablk->busy) {
3521		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3522		return  BFA_STATUS_DEVBUSY;
3523	}
3524
3525	ablk->cbfn  = cbfn;
3526	ablk->cbarg = cbarg;
3527	ablk->busy  = BFA_TRUE;
3528
3529	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3530	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3531		bfa_ioc_portid(ablk->ioc));
3532	m->port = (u8)port;
3533	m->mode = (u8)mode;
3534	m->max_pf = (u8)max_pf;
3535	m->max_vf = (u8)max_vf;
3536	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3537
3538	return BFA_STATUS_OK;
3539}
3540
3541bfa_status_t
3542bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3543		   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3544{
3545	struct bfi_ablk_h2i_pf_req_s *m;
3546
3547	if (!bfa_ioc_is_operational(ablk->ioc)) {
3548		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3549		return BFA_STATUS_IOC_FAILURE;
3550	}
3551
3552	if (ablk->busy) {
3553		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3554		return  BFA_STATUS_DEVBUSY;
3555	}
3556
3557	ablk->cbfn  = cbfn;
3558	ablk->cbarg = cbarg;
3559	ablk->busy  = BFA_TRUE;
3560
3561	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3562	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3563		bfa_ioc_portid(ablk->ioc));
3564	m->pcifn = (u8)pcifn;
3565	m->bw_min = cpu_to_be16(bw_min);
3566	m->bw_max = cpu_to_be16(bw_max);
3567	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3568
3569	return BFA_STATUS_OK;
3570}
3571
3572bfa_status_t
3573bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3574{
3575	struct bfi_ablk_h2i_optrom_s *m;
3576
3577	if (!bfa_ioc_is_operational(ablk->ioc)) {
3578		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3579		return BFA_STATUS_IOC_FAILURE;
3580	}
3581
3582	if (ablk->busy) {
3583		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3584		return  BFA_STATUS_DEVBUSY;
3585	}
3586
3587	ablk->cbfn  = cbfn;
3588	ablk->cbarg = cbarg;
3589	ablk->busy  = BFA_TRUE;
3590
3591	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3592	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3593		bfa_ioc_portid(ablk->ioc));
3594	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3595
3596	return BFA_STATUS_OK;
3597}
3598
3599bfa_status_t
3600bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3601{
3602	struct bfi_ablk_h2i_optrom_s *m;
3603
3604	if (!bfa_ioc_is_operational(ablk->ioc)) {
3605		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3606		return BFA_STATUS_IOC_FAILURE;
3607	}
3608
3609	if (ablk->busy) {
3610		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3611		return  BFA_STATUS_DEVBUSY;
3612	}
3613
3614	ablk->cbfn  = cbfn;
3615	ablk->cbarg = cbarg;
3616	ablk->busy  = BFA_TRUE;
3617
3618	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3619	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3620		bfa_ioc_portid(ablk->ioc));
3621	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3622
3623	return BFA_STATUS_OK;
3624}
3625
3626/*
3627 *	SFP module specific
3628 */
3629
3630/* forward declarations */
3631static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3632static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3633static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3634				enum bfa_port_speed portspeed);
3635
3636static void
3637bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3638{
3639	bfa_trc(sfp, sfp->lock);
3640	if (sfp->cbfn)
3641		sfp->cbfn(sfp->cbarg, sfp->status);
3642	sfp->lock = 0;
3643	sfp->cbfn = NULL;
3644}
3645
3646static void
3647bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3648{
3649	bfa_trc(sfp, sfp->portspeed);
3650	if (sfp->media) {
3651		bfa_sfp_media_get(sfp);
3652		if (sfp->state_query_cbfn)
3653			sfp->state_query_cbfn(sfp->state_query_cbarg,
3654					sfp->status);
3655		sfp->media = NULL;
3656	}
3657
3658	if (sfp->portspeed) {
3659		sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3660		if (sfp->state_query_cbfn)
3661			sfp->state_query_cbfn(sfp->state_query_cbarg,
3662					sfp->status);
3663		sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3664	}
3665
3666	sfp->state_query_lock = 0;
3667	sfp->state_query_cbfn = NULL;
3668}
3669
3670/*
3671 *	IOC event handler.
3672 */
3673static void
3674bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3675{
3676	struct bfa_sfp_s *sfp = sfp_arg;
3677
3678	bfa_trc(sfp, event);
3679	bfa_trc(sfp, sfp->lock);
3680	bfa_trc(sfp, sfp->state_query_lock);
3681
3682	switch (event) {
3683	case BFA_IOC_E_DISABLED:
3684	case BFA_IOC_E_FAILED:
3685		if (sfp->lock) {
3686			sfp->status = BFA_STATUS_IOC_FAILURE;
3687			bfa_cb_sfp_show(sfp);
3688		}
3689
3690		if (sfp->state_query_lock) {
3691			sfp->status = BFA_STATUS_IOC_FAILURE;
3692			bfa_cb_sfp_state_query(sfp);
3693		}
3694		break;
3695
3696	default:
3697		break;
3698	}
3699}
3700
3701/*
3702 * SFP's State Change Notification post to AEN
3703 */
3704static void
3705bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3706{
3707	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3708	struct bfa_aen_entry_s  *aen_entry;
3709	enum bfa_port_aen_event aen_evt = 0;
3710
3711	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3712		      ((u64)rsp->event));
3713
3714	bfad_get_aen_entry(bfad, aen_entry);
3715	if (!aen_entry)
3716		return;
3717
3718	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3719	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3720	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3721
3722	switch (rsp->event) {
3723	case BFA_SFP_SCN_INSERTED:
3724		aen_evt = BFA_PORT_AEN_SFP_INSERT;
3725		break;
3726	case BFA_SFP_SCN_REMOVED:
3727		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3728		break;
3729	case BFA_SFP_SCN_FAILED:
3730		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3731		break;
3732	case BFA_SFP_SCN_UNSUPPORT:
3733		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3734		break;
3735	case BFA_SFP_SCN_POM:
3736		aen_evt = BFA_PORT_AEN_SFP_POM;
3737		aen_entry->aen_data.port.level = rsp->pomlvl;
3738		break;
3739	default:
3740		bfa_trc(sfp, rsp->event);
3741		WARN_ON(1);
3742	}
3743
3744	/* Send the AEN notification */
3745	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3746				  BFA_AEN_CAT_PORT, aen_evt);
3747}
3748
3749/*
3750 *	SFP get data send
3751 */
3752static void
3753bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3754{
3755	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3756
3757	bfa_trc(sfp, req->memtype);
3758
3759	/* build host command */
3760	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3761			bfa_ioc_portid(sfp->ioc));
3762
3763	/* send mbox cmd */
3764	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3765}
3766
3767/*
3768 *	SFP is valid, read sfp data
3769 */
3770static void
3771bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3772{
3773	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3774
3775	WARN_ON(sfp->lock != 0);
3776	bfa_trc(sfp, sfp->state);
3777
3778	sfp->lock = 1;
3779	sfp->memtype = memtype;
3780	req->memtype = memtype;
3781
3782	/* Setup SG list */
3783	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3784
3785	bfa_sfp_getdata_send(sfp);
3786}
3787
3788/*
3789 *	SFP scn handler
3790 */
3791static void
3792bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3793{
3794	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3795
3796	switch (rsp->event) {
3797	case BFA_SFP_SCN_INSERTED:
3798		sfp->state = BFA_SFP_STATE_INSERTED;
3799		sfp->data_valid = 0;
3800		bfa_sfp_scn_aen_post(sfp, rsp);
3801		break;
3802	case BFA_SFP_SCN_REMOVED:
3803		sfp->state = BFA_SFP_STATE_REMOVED;
3804		sfp->data_valid = 0;
3805		bfa_sfp_scn_aen_post(sfp, rsp);
3806		break;
3807	case BFA_SFP_SCN_FAILED:
3808		sfp->state = BFA_SFP_STATE_FAILED;
3809		sfp->data_valid = 0;
3810		bfa_sfp_scn_aen_post(sfp, rsp);
3811		break;
3812	case BFA_SFP_SCN_UNSUPPORT:
3813		sfp->state = BFA_SFP_STATE_UNSUPPORT;
3814		bfa_sfp_scn_aen_post(sfp, rsp);
3815		if (!sfp->lock)
3816			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3817		break;
3818	case BFA_SFP_SCN_POM:
3819		bfa_sfp_scn_aen_post(sfp, rsp);
3820		break;
3821	case BFA_SFP_SCN_VALID:
3822		sfp->state = BFA_SFP_STATE_VALID;
3823		if (!sfp->lock)
3824			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3825		break;
3826	default:
3827		bfa_trc(sfp, rsp->event);
3828		WARN_ON(1);
3829	}
3830}
3831
3832/*
3833 * SFP show complete
3834 */
3835static void
3836bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3837{
3838	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3839
3840	if (!sfp->lock) {
3841		/*
3842		 * receiving response after ioc failure
3843		 */
3844		bfa_trc(sfp, sfp->lock);
3845		return;
3846	}
3847
3848	bfa_trc(sfp, rsp->status);
3849	if (rsp->status == BFA_STATUS_OK) {
3850		sfp->data_valid = 1;
3851		if (sfp->state == BFA_SFP_STATE_VALID)
3852			sfp->status = BFA_STATUS_OK;
3853		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3854			sfp->status = BFA_STATUS_SFP_UNSUPP;
3855		else
3856			bfa_trc(sfp, sfp->state);
3857	} else {
3858		sfp->data_valid = 0;
3859		sfp->status = rsp->status;
3860		/* sfpshow shouldn't change sfp state */
3861	}
3862
3863	bfa_trc(sfp, sfp->memtype);
3864	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3865		bfa_trc(sfp, sfp->data_valid);
3866		if (sfp->data_valid) {
3867			u32	size = sizeof(struct sfp_mem_s);
3868			u8 *des = (u8 *)(sfp->sfpmem);
3869			memcpy(des, sfp->dbuf_kva, size);
3870		}
3871		/*
3872		 * Queue completion callback.
3873		 */
3874		bfa_cb_sfp_show(sfp);
3875	} else
3876		sfp->lock = 0;
3877
3878	bfa_trc(sfp, sfp->state_query_lock);
3879	if (sfp->state_query_lock) {
3880		sfp->state = rsp->state;
3881		/* Complete callback */
3882		bfa_cb_sfp_state_query(sfp);
3883	}
3884}
3885
3886/*
3887 *	SFP query fw sfp state
3888 */
3889static void
3890bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3891{
3892	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3893
3894	/* Should not be doing query if not in _INIT state */
3895	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3896	WARN_ON(sfp->state_query_lock != 0);
3897	bfa_trc(sfp, sfp->state);
3898
3899	sfp->state_query_lock = 1;
3900	req->memtype = 0;
3901
3902	if (!sfp->lock)
3903		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3904}
3905
3906static void
3907bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3908{
3909	enum bfa_defs_sfp_media_e *media = sfp->media;
3910
3911	*media = BFA_SFP_MEDIA_UNKNOWN;
3912
3913	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3914		*media = BFA_SFP_MEDIA_UNSUPPORT;
3915	else if (sfp->state == BFA_SFP_STATE_VALID) {
3916		union sfp_xcvr_e10g_code_u e10g;
3917		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3918		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3919				(sfpmem->srlid_base.xcvr[5] >> 1);
3920
3921		e10g.b = sfpmem->srlid_base.xcvr[0];
3922		bfa_trc(sfp, e10g.b);
3923		bfa_trc(sfp, xmtr_tech);
3924		/* check fc transmitter tech */
3925		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3926		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3927		    (xmtr_tech & SFP_XMTR_TECH_CA))
3928			*media = BFA_SFP_MEDIA_CU;
3929		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3930			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3931			*media = BFA_SFP_MEDIA_EL;
3932		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3933			 (xmtr_tech & SFP_XMTR_TECH_LC))
3934			*media = BFA_SFP_MEDIA_LW;
3935		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3936			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3937			 (xmtr_tech & SFP_XMTR_TECH_SA))
3938			*media = BFA_SFP_MEDIA_SW;
3939		/* Check 10G Ethernet Compilance code */
3940		else if (e10g.r.e10g_sr)
3941			*media = BFA_SFP_MEDIA_SW;
3942		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3943			*media = BFA_SFP_MEDIA_LW;
3944		else if (e10g.r.e10g_unall)
3945			*media = BFA_SFP_MEDIA_UNKNOWN;
3946		else
3947			bfa_trc(sfp, 0);
3948	} else
3949		bfa_trc(sfp, sfp->state);
3950}
3951
3952static bfa_status_t
3953bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3954{
3955	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3956	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3957	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3958	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3959
3960	if (portspeed == BFA_PORT_SPEED_10GBPS) {
3961		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3962			return BFA_STATUS_OK;
3963		else {
3964			bfa_trc(sfp, e10g.b);
3965			return BFA_STATUS_UNSUPP_SPEED;
3966		}
3967	}
3968	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3969	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3970	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3971	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3972	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3973		return BFA_STATUS_OK;
3974	else {
3975		bfa_trc(sfp, portspeed);
3976		bfa_trc(sfp, fc3.b);
3977		bfa_trc(sfp, e10g.b);
3978		return BFA_STATUS_UNSUPP_SPEED;
3979	}
3980}
3981
3982/*
3983 *	SFP hmbox handler
3984 */
3985void
3986bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3987{
3988	struct bfa_sfp_s *sfp = sfparg;
3989
3990	switch (msg->mh.msg_id) {
3991	case BFI_SFP_I2H_SHOW:
3992		bfa_sfp_show_comp(sfp, msg);
3993		break;
3994
3995	case BFI_SFP_I2H_SCN:
3996		bfa_sfp_scn(sfp, msg);
3997		break;
3998
3999	default:
4000		bfa_trc(sfp, msg->mh.msg_id);
4001		WARN_ON(1);
4002	}
4003}
4004
4005/*
4006 *	Return DMA memory needed by sfp module.
4007 */
4008u32
4009bfa_sfp_meminfo(void)
4010{
4011	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4012}
4013
4014/*
4015 *	Attach virtual and physical memory for SFP.
4016 */
4017void
4018bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4019		struct bfa_trc_mod_s *trcmod)
4020{
4021	sfp->dev = dev;
4022	sfp->ioc = ioc;
4023	sfp->trcmod = trcmod;
4024
4025	sfp->cbfn = NULL;
4026	sfp->cbarg = NULL;
4027	sfp->sfpmem = NULL;
4028	sfp->lock = 0;
4029	sfp->data_valid = 0;
4030	sfp->state = BFA_SFP_STATE_INIT;
4031	sfp->state_query_lock = 0;
4032	sfp->state_query_cbfn = NULL;
4033	sfp->state_query_cbarg = NULL;
4034	sfp->media = NULL;
4035	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4036	sfp->is_elb = BFA_FALSE;
4037
4038	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4039	bfa_q_qe_init(&sfp->ioc_notify);
4040	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4041	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4042}
4043
4044/*
4045 *	Claim Memory for SFP
4046 */
4047void
4048bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4049{
4050	sfp->dbuf_kva   = dm_kva;
4051	sfp->dbuf_pa    = dm_pa;
4052	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4053
4054	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4055	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4056}
4057
4058/*
4059 * Show SFP eeprom content
4060 *
4061 * @param[in] sfp   - bfa sfp module
4062 *
4063 * @param[out] sfpmem - sfp eeprom data
4064 *
4065 */
4066bfa_status_t
4067bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4068		bfa_cb_sfp_t cbfn, void *cbarg)
4069{
4070
4071	if (!bfa_ioc_is_operational(sfp->ioc)) {
4072		bfa_trc(sfp, 0);
4073		return BFA_STATUS_IOC_NON_OP;
4074	}
4075
4076	if (sfp->lock) {
4077		bfa_trc(sfp, 0);
4078		return BFA_STATUS_DEVBUSY;
4079	}
4080
4081	sfp->cbfn = cbfn;
4082	sfp->cbarg = cbarg;
4083	sfp->sfpmem = sfpmem;
4084
4085	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4086	return BFA_STATUS_OK;
4087}
4088
4089/*
4090 * Return SFP Media type
4091 *
4092 * @param[in] sfp   - bfa sfp module
4093 *
4094 * @param[out] media - port speed from user
4095 *
4096 */
4097bfa_status_t
4098bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4099		bfa_cb_sfp_t cbfn, void *cbarg)
4100{
4101	if (!bfa_ioc_is_operational(sfp->ioc)) {
4102		bfa_trc(sfp, 0);
4103		return BFA_STATUS_IOC_NON_OP;
4104	}
4105
4106	sfp->media = media;
4107	if (sfp->state == BFA_SFP_STATE_INIT) {
4108		if (sfp->state_query_lock) {
4109			bfa_trc(sfp, 0);
4110			return BFA_STATUS_DEVBUSY;
4111		} else {
4112			sfp->state_query_cbfn = cbfn;
4113			sfp->state_query_cbarg = cbarg;
4114			bfa_sfp_state_query(sfp);
4115			return BFA_STATUS_SFP_NOT_READY;
4116		}
4117	}
4118
4119	bfa_sfp_media_get(sfp);
4120	return BFA_STATUS_OK;
4121}
4122
4123/*
4124 * Check if user set port speed is allowed by the SFP
4125 *
4126 * @param[in] sfp   - bfa sfp module
4127 * @param[in] portspeed - port speed from user
4128 *
4129 */
4130bfa_status_t
4131bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4132		bfa_cb_sfp_t cbfn, void *cbarg)
4133{
4134	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4135
4136	if (!bfa_ioc_is_operational(sfp->ioc))
4137		return BFA_STATUS_IOC_NON_OP;
4138
4139	/* For Mezz card, all speed is allowed */
4140	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4141		return BFA_STATUS_OK;
4142
4143	/* Check SFP state */
4144	sfp->portspeed = portspeed;
4145	if (sfp->state == BFA_SFP_STATE_INIT) {
4146		if (sfp->state_query_lock) {
4147			bfa_trc(sfp, 0);
4148			return BFA_STATUS_DEVBUSY;
4149		} else {
4150			sfp->state_query_cbfn = cbfn;
4151			sfp->state_query_cbarg = cbarg;
4152			bfa_sfp_state_query(sfp);
4153			return BFA_STATUS_SFP_NOT_READY;
4154		}
4155	}
4156
4157	if (sfp->state == BFA_SFP_STATE_REMOVED ||
4158	    sfp->state == BFA_SFP_STATE_FAILED) {
4159		bfa_trc(sfp, sfp->state);
4160		return BFA_STATUS_NO_SFP_DEV;
4161	}
4162
4163	if (sfp->state == BFA_SFP_STATE_INSERTED) {
4164		bfa_trc(sfp, sfp->state);
4165		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
4166	}
4167
4168	/* For eloopback, all speed is allowed */
4169	if (sfp->is_elb)
4170		return BFA_STATUS_OK;
4171
4172	return bfa_sfp_speed_valid(sfp, portspeed);
4173}
4174
4175/*
4176 *	Flash module specific
4177 */
4178
4179/*
4180 * FLASH DMA buffer should be big enough to hold both MFG block and
4181 * asic block(64k) at the same time and also should be 2k aligned to
4182 * avoid write segement to cross sector boundary.
4183 */
4184#define BFA_FLASH_SEG_SZ	2048
4185#define BFA_FLASH_DMA_BUF_SZ	\
4186	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4187
4188static void
4189bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4190			int inst, int type)
4191{
4192	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4193	struct bfa_aen_entry_s  *aen_entry;
4194
4195	bfad_get_aen_entry(bfad, aen_entry);
4196	if (!aen_entry)
4197		return;
4198
4199	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4200	aen_entry->aen_data.audit.partition_inst = inst;
4201	aen_entry->aen_data.audit.partition_type = type;
4202
4203	/* Send the AEN notification */
4204	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4205				  BFA_AEN_CAT_AUDIT, event);
4206}
4207
4208static void
4209bfa_flash_cb(struct bfa_flash_s *flash)
4210{
4211	flash->op_busy = 0;
4212	if (flash->cbfn)
4213		flash->cbfn(flash->cbarg, flash->status);
4214}
4215
4216static void
4217bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4218{
4219	struct bfa_flash_s	*flash = cbarg;
4220
4221	bfa_trc(flash, event);
4222	switch (event) {
4223	case BFA_IOC_E_DISABLED:
4224	case BFA_IOC_E_FAILED:
4225		if (flash->op_busy) {
4226			flash->status = BFA_STATUS_IOC_FAILURE;
4227			flash->cbfn(flash->cbarg, flash->status);
4228			flash->op_busy = 0;
4229		}
4230		break;
4231
4232	default:
4233		break;
4234	}
4235}
4236
4237/*
4238 * Send flash attribute query request.
4239 *
4240 * @param[in] cbarg - callback argument
4241 */
4242static void
4243bfa_flash_query_send(void *cbarg)
4244{
4245	struct bfa_flash_s *flash = cbarg;
4246	struct bfi_flash_query_req_s *msg =
4247			(struct bfi_flash_query_req_s *) flash->mb.msg;
4248
4249	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4250		bfa_ioc_portid(flash->ioc));
4251	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4252		flash->dbuf_pa);
4253	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4254}
4255
4256/*
4257 * Send flash write request.
4258 *
4259 * @param[in] cbarg - callback argument
4260 */
4261static void
4262bfa_flash_write_send(struct bfa_flash_s *flash)
4263{
4264	struct bfi_flash_write_req_s *msg =
4265			(struct bfi_flash_write_req_s *) flash->mb.msg;
4266	u32	len;
4267
4268	msg->type = be32_to_cpu(flash->type);
4269	msg->instance = flash->instance;
4270	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4271	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4272		flash->residue : BFA_FLASH_DMA_BUF_SZ;
4273	msg->length = be32_to_cpu(len);
4274
4275	/* indicate if it's the last msg of the whole write operation */
4276	msg->last = (len == flash->residue) ? 1 : 0;
4277
4278	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4279			bfa_ioc_portid(flash->ioc));
4280	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4281	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4282	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4283
4284	flash->residue -= len;
4285	flash->offset += len;
4286}
4287
4288/*
4289 * Send flash read request.
4290 *
4291 * @param[in] cbarg - callback argument
4292 */
4293static void
4294bfa_flash_read_send(void *cbarg)
4295{
4296	struct bfa_flash_s *flash = cbarg;
4297	struct bfi_flash_read_req_s *msg =
4298			(struct bfi_flash_read_req_s *) flash->mb.msg;
4299	u32	len;
4300
4301	msg->type = be32_to_cpu(flash->type);
4302	msg->instance = flash->instance;
4303	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4304	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4305			flash->residue : BFA_FLASH_DMA_BUF_SZ;
4306	msg->length = be32_to_cpu(len);
4307	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4308		bfa_ioc_portid(flash->ioc));
4309	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4310	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4311}
4312
4313/*
4314 * Send flash erase request.
4315 *
4316 * @param[in] cbarg - callback argument
4317 */
4318static void
4319bfa_flash_erase_send(void *cbarg)
4320{
4321	struct bfa_flash_s *flash = cbarg;
4322	struct bfi_flash_erase_req_s *msg =
4323			(struct bfi_flash_erase_req_s *) flash->mb.msg;
4324
4325	msg->type = be32_to_cpu(flash->type);
4326	msg->instance = flash->instance;
4327	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4328			bfa_ioc_portid(flash->ioc));
4329	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4330}
4331
4332/*
4333 * Process flash response messages upon receiving interrupts.
4334 *
4335 * @param[in] flasharg - flash structure
4336 * @param[in] msg - message structure
4337 */
4338static void
4339bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4340{
4341	struct bfa_flash_s *flash = flasharg;
4342	u32	status;
4343
4344	union {
4345		struct bfi_flash_query_rsp_s *query;
4346		struct bfi_flash_erase_rsp_s *erase;
4347		struct bfi_flash_write_rsp_s *write;
4348		struct bfi_flash_read_rsp_s *read;
4349		struct bfi_flash_event_s *event;
4350		struct bfi_mbmsg_s   *msg;
4351	} m;
4352
4353	m.msg = msg;
4354	bfa_trc(flash, msg->mh.msg_id);
4355
4356	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4357		/* receiving response after ioc failure */
4358		bfa_trc(flash, 0x9999);
4359		return;
4360	}
4361
4362	switch (msg->mh.msg_id) {
4363	case BFI_FLASH_I2H_QUERY_RSP:
4364		status = be32_to_cpu(m.query->status);
4365		bfa_trc(flash, status);
4366		if (status == BFA_STATUS_OK) {
4367			u32	i;
4368			struct bfa_flash_attr_s *attr, *f;
4369
4370			attr = (struct bfa_flash_attr_s *) flash->ubuf;
4371			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4372			attr->status = be32_to_cpu(f->status);
4373			attr->npart = be32_to_cpu(f->npart);
4374			bfa_trc(flash, attr->status);
4375			bfa_trc(flash, attr->npart);
4376			for (i = 0; i < attr->npart; i++) {
4377				attr->part[i].part_type =
4378					be32_to_cpu(f->part[i].part_type);
4379				attr->part[i].part_instance =
4380					be32_to_cpu(f->part[i].part_instance);
4381				attr->part[i].part_off =
4382					be32_to_cpu(f->part[i].part_off);
4383				attr->part[i].part_size =
4384					be32_to_cpu(f->part[i].part_size);
4385				attr->part[i].part_len =
4386					be32_to_cpu(f->part[i].part_len);
4387				attr->part[i].part_status =
4388					be32_to_cpu(f->part[i].part_status);
4389			}
4390		}
4391		flash->status = status;
4392		bfa_flash_cb(flash);
4393		break;
4394	case BFI_FLASH_I2H_ERASE_RSP:
4395		status = be32_to_cpu(m.erase->status);
4396		bfa_trc(flash, status);
4397		flash->status = status;
4398		bfa_flash_cb(flash);
4399		break;
4400	case BFI_FLASH_I2H_WRITE_RSP:
4401		status = be32_to_cpu(m.write->status);
4402		bfa_trc(flash, status);
4403		if (status != BFA_STATUS_OK || flash->residue == 0) {
4404			flash->status = status;
4405			bfa_flash_cb(flash);
4406		} else {
4407			bfa_trc(flash, flash->offset);
4408			bfa_flash_write_send(flash);
4409		}
4410		break;
4411	case BFI_FLASH_I2H_READ_RSP:
4412		status = be32_to_cpu(m.read->status);
4413		bfa_trc(flash, status);
4414		if (status != BFA_STATUS_OK) {
4415			flash->status = status;
4416			bfa_flash_cb(flash);
4417		} else {
4418			u32 len = be32_to_cpu(m.read->length);
4419			bfa_trc(flash, flash->offset);
4420			bfa_trc(flash, len);
4421			memcpy(flash->ubuf + flash->offset,
4422				flash->dbuf_kva, len);
4423			flash->residue -= len;
4424			flash->offset += len;
4425			if (flash->residue == 0) {
4426				flash->status = status;
4427				bfa_flash_cb(flash);
4428			} else
4429				bfa_flash_read_send(flash);
4430		}
4431		break;
4432	case BFI_FLASH_I2H_BOOT_VER_RSP:
4433		break;
4434	case BFI_FLASH_I2H_EVENT:
4435		status = be32_to_cpu(m.event->status);
4436		bfa_trc(flash, status);
4437		if (status == BFA_STATUS_BAD_FWCFG)
4438			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4439		else if (status == BFA_STATUS_INVALID_VENDOR) {
4440			u32 param;
4441			param = be32_to_cpu(m.event->param);
4442			bfa_trc(flash, param);
4443			bfa_ioc_aen_post(flash->ioc,
4444				BFA_IOC_AEN_INVALID_VENDOR);
4445		}
4446		break;
4447
4448	default:
4449		WARN_ON(1);
4450	}
4451}
4452
4453/*
4454 * Flash memory info API.
4455 *
4456 * @param[in] mincfg - minimal cfg variable
4457 */
4458u32
4459bfa_flash_meminfo(bfa_boolean_t mincfg)
4460{
4461	/* min driver doesn't need flash */
4462	if (mincfg)
4463		return 0;
4464	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4465}
4466
4467/*
4468 * Flash attach API.
4469 *
4470 * @param[in] flash - flash structure
4471 * @param[in] ioc  - ioc structure
4472 * @param[in] dev  - device structure
4473 * @param[in] trcmod - trace module
4474 * @param[in] logmod - log module
4475 */
4476void
4477bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4478		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4479{
4480	flash->ioc = ioc;
4481	flash->trcmod = trcmod;
4482	flash->cbfn = NULL;
4483	flash->cbarg = NULL;
4484	flash->op_busy = 0;
4485
4486	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4487	bfa_q_qe_init(&flash->ioc_notify);
4488	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4489	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4490
4491	/* min driver doesn't need flash */
4492	if (mincfg) {
4493		flash->dbuf_kva = NULL;
4494		flash->dbuf_pa = 0;
4495	}
4496}
4497
4498/*
4499 * Claim memory for flash
4500 *
4501 * @param[in] flash - flash structure
4502 * @param[in] dm_kva - pointer to virtual memory address
4503 * @param[in] dm_pa - physical memory address
4504 * @param[in] mincfg - minimal cfg variable
4505 */
4506void
4507bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4508		bfa_boolean_t mincfg)
4509{
4510	if (mincfg)
4511		return;
4512
4513	flash->dbuf_kva = dm_kva;
4514	flash->dbuf_pa = dm_pa;
4515	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4516	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4517	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4518}
4519
4520/*
4521 * Get flash attribute.
4522 *
4523 * @param[in] flash - flash structure
4524 * @param[in] attr - flash attribute structure
4525 * @param[in] cbfn - callback function
4526 * @param[in] cbarg - callback argument
4527 *
4528 * Return status.
4529 */
4530bfa_status_t
4531bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4532		bfa_cb_flash_t cbfn, void *cbarg)
4533{
4534	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4535
4536	if (!bfa_ioc_is_operational(flash->ioc))
4537		return BFA_STATUS_IOC_NON_OP;
4538
4539	if (flash->op_busy) {
4540		bfa_trc(flash, flash->op_busy);
4541		return BFA_STATUS_DEVBUSY;
4542	}
4543
4544	flash->op_busy = 1;
4545	flash->cbfn = cbfn;
4546	flash->cbarg = cbarg;
4547	flash->ubuf = (u8 *) attr;
4548	bfa_flash_query_send(flash);
4549
4550	return BFA_STATUS_OK;
4551}
4552
4553/*
4554 * Erase flash partition.
4555 *
4556 * @param[in] flash - flash structure
4557 * @param[in] type - flash partition type
4558 * @param[in] instance - flash partition instance
4559 * @param[in] cbfn - callback function
4560 * @param[in] cbarg - callback argument
4561 *
4562 * Return status.
4563 */
4564bfa_status_t
4565bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4566		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4567{
4568	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4569	bfa_trc(flash, type);
4570	bfa_trc(flash, instance);
4571
4572	if (!bfa_ioc_is_operational(flash->ioc))
4573		return BFA_STATUS_IOC_NON_OP;
4574
4575	if (flash->op_busy) {
4576		bfa_trc(flash, flash->op_busy);
4577		return BFA_STATUS_DEVBUSY;
4578	}
4579
4580	flash->op_busy = 1;
4581	flash->cbfn = cbfn;
4582	flash->cbarg = cbarg;
4583	flash->type = type;
4584	flash->instance = instance;
4585
4586	bfa_flash_erase_send(flash);
4587	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4588				instance, type);
4589	return BFA_STATUS_OK;
4590}
4591
4592/*
4593 * Update flash partition.
4594 *
4595 * @param[in] flash - flash structure
4596 * @param[in] type - flash partition type
4597 * @param[in] instance - flash partition instance
4598 * @param[in] buf - update data buffer
4599 * @param[in] len - data buffer length
4600 * @param[in] offset - offset relative to the partition starting address
4601 * @param[in] cbfn - callback function
4602 * @param[in] cbarg - callback argument
4603 *
4604 * Return status.
4605 */
4606bfa_status_t
4607bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4608		u8 instance, void *buf, u32 len, u32 offset,
4609		bfa_cb_flash_t cbfn, void *cbarg)
4610{
4611	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4612	bfa_trc(flash, type);
4613	bfa_trc(flash, instance);
4614	bfa_trc(flash, len);
4615	bfa_trc(flash, offset);
4616
4617	if (!bfa_ioc_is_operational(flash->ioc))
4618		return BFA_STATUS_IOC_NON_OP;
4619
4620	/*
4621	 * 'len' must be in word (4-byte) boundary
4622	 * 'offset' must be in sector (16kb) boundary
4623	 */
4624	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4625		return BFA_STATUS_FLASH_BAD_LEN;
4626
4627	if (type == BFA_FLASH_PART_MFG)
4628		return BFA_STATUS_EINVAL;
4629
4630	if (flash->op_busy) {
4631		bfa_trc(flash, flash->op_busy);
4632		return BFA_STATUS_DEVBUSY;
4633	}
4634
4635	flash->op_busy = 1;
4636	flash->cbfn = cbfn;
4637	flash->cbarg = cbarg;
4638	flash->type = type;
4639	flash->instance = instance;
4640	flash->residue = len;
4641	flash->offset = 0;
4642	flash->addr_off = offset;
4643	flash->ubuf = buf;
4644
4645	bfa_flash_write_send(flash);
4646	return BFA_STATUS_OK;
4647}
4648
4649/*
4650 * Read flash partition.
4651 *
4652 * @param[in] flash - flash structure
4653 * @param[in] type - flash partition type
4654 * @param[in] instance - flash partition instance
4655 * @param[in] buf - read data buffer
4656 * @param[in] len - data buffer length
4657 * @param[in] offset - offset relative to the partition starting address
4658 * @param[in] cbfn - callback function
4659 * @param[in] cbarg - callback argument
4660 *
4661 * Return status.
4662 */
4663bfa_status_t
4664bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4665		u8 instance, void *buf, u32 len, u32 offset,
4666		bfa_cb_flash_t cbfn, void *cbarg)
4667{
4668	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4669	bfa_trc(flash, type);
4670	bfa_trc(flash, instance);
4671	bfa_trc(flash, len);
4672	bfa_trc(flash, offset);
4673
4674	if (!bfa_ioc_is_operational(flash->ioc))
4675		return BFA_STATUS_IOC_NON_OP;
4676
4677	/*
4678	 * 'len' must be in word (4-byte) boundary
4679	 * 'offset' must be in sector (16kb) boundary
4680	 */
4681	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4682		return BFA_STATUS_FLASH_BAD_LEN;
4683
4684	if (flash->op_busy) {
4685		bfa_trc(flash, flash->op_busy);
4686		return BFA_STATUS_DEVBUSY;
4687	}
4688
4689	flash->op_busy = 1;
4690	flash->cbfn = cbfn;
4691	flash->cbarg = cbarg;
4692	flash->type = type;
4693	flash->instance = instance;
4694	flash->residue = len;
4695	flash->offset = 0;
4696	flash->addr_off = offset;
4697	flash->ubuf = buf;
4698	bfa_flash_read_send(flash);
4699
4700	return BFA_STATUS_OK;
4701}
4702
4703/*
4704 *	DIAG module specific
4705 */
4706
4707#define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
4708#define CT2_BFA_DIAG_MEMTEST_TOV	(9*30*1000)  /* 4.5 min */
4709
4710/* IOC event handler */
4711static void
4712bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4713{
4714	struct bfa_diag_s *diag = diag_arg;
4715
4716	bfa_trc(diag, event);
4717	bfa_trc(diag, diag->block);
4718	bfa_trc(diag, diag->fwping.lock);
4719	bfa_trc(diag, diag->tsensor.lock);
4720
4721	switch (event) {
4722	case BFA_IOC_E_DISABLED:
4723	case BFA_IOC_E_FAILED:
4724		if (diag->fwping.lock) {
4725			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4726			diag->fwping.cbfn(diag->fwping.cbarg,
4727					diag->fwping.status);
4728			diag->fwping.lock = 0;
4729		}
4730
4731		if (diag->tsensor.lock) {
4732			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4733			diag->tsensor.cbfn(diag->tsensor.cbarg,
4734					   diag->tsensor.status);
4735			diag->tsensor.lock = 0;
4736		}
4737
4738		if (diag->block) {
4739			if (diag->timer_active) {
4740				bfa_timer_stop(&diag->timer);
4741				diag->timer_active = 0;
4742			}
4743
4744			diag->status = BFA_STATUS_IOC_FAILURE;
4745			diag->cbfn(diag->cbarg, diag->status);
4746			diag->block = 0;
4747		}
4748		break;
4749
4750	default:
4751		break;
4752	}
4753}
4754
4755static void
4756bfa_diag_memtest_done(void *cbarg)
4757{
4758	struct bfa_diag_s *diag = cbarg;
4759	struct bfa_ioc_s  *ioc = diag->ioc;
4760	struct bfa_diag_memtest_result *res = diag->result;
4761	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
4762	u32	pgnum, i;
4763
4764	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4765	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4766
4767	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4768			 sizeof(u32)); i++) {
4769		/* read test result from smem */
4770		*((u32 *) res + i) =
4771			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4772		loff += sizeof(u32);
4773	}
4774
4775	/* Reset IOC fwstates to BFI_IOC_UNINIT */
4776	bfa_ioc_reset_fwstate(ioc);
4777
4778	res->status = swab32(res->status);
4779	bfa_trc(diag, res->status);
4780
4781	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4782		diag->status = BFA_STATUS_OK;
4783	else {
4784		diag->status = BFA_STATUS_MEMTEST_FAILED;
4785		res->addr = swab32(res->addr);
4786		res->exp = swab32(res->exp);
4787		res->act = swab32(res->act);
4788		res->err_status = swab32(res->err_status);
4789		res->err_status1 = swab32(res->err_status1);
4790		res->err_addr = swab32(res->err_addr);
4791		bfa_trc(diag, res->addr);
4792		bfa_trc(diag, res->exp);
4793		bfa_trc(diag, res->act);
4794		bfa_trc(diag, res->err_status);
4795		bfa_trc(diag, res->err_status1);
4796		bfa_trc(diag, res->err_addr);
4797	}
4798	diag->timer_active = 0;
4799	diag->cbfn(diag->cbarg, diag->status);
4800	diag->block = 0;
4801}
4802
4803/*
4804 * Firmware ping
4805 */
4806
4807/*
4808 * Perform DMA test directly
4809 */
4810static void
4811diag_fwping_send(struct bfa_diag_s *diag)
4812{
4813	struct bfi_diag_fwping_req_s *fwping_req;
4814	u32	i;
4815
4816	bfa_trc(diag, diag->fwping.dbuf_pa);
4817
4818	/* fill DMA area with pattern */
4819	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4820		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4821
4822	/* Fill mbox msg */
4823	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4824
4825	/* Setup SG list */
4826	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4827			diag->fwping.dbuf_pa);
4828	/* Set up dma count */
4829	fwping_req->count = cpu_to_be32(diag->fwping.count);
4830	/* Set up data pattern */
4831	fwping_req->data = diag->fwping.data;
4832
4833	/* build host command */
4834	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4835		bfa_ioc_portid(diag->ioc));
4836
4837	/* send mbox cmd */
4838	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4839}
4840
4841static void
4842diag_fwping_comp(struct bfa_diag_s *diag,
4843		 struct bfi_diag_fwping_rsp_s *diag_rsp)
4844{
4845	u32	rsp_data = diag_rsp->data;
4846	u8	rsp_dma_status = diag_rsp->dma_status;
4847
4848	bfa_trc(diag, rsp_data);
4849	bfa_trc(diag, rsp_dma_status);
4850
4851	if (rsp_dma_status == BFA_STATUS_OK) {
4852		u32	i, pat;
4853		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4854			diag->fwping.data;
4855		/* Check mbox data */
4856		if (diag->fwping.data != rsp_data) {
4857			bfa_trc(diag, rsp_data);
4858			diag->fwping.result->dmastatus =
4859					BFA_STATUS_DATACORRUPTED;
4860			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4861			diag->fwping.cbfn(diag->fwping.cbarg,
4862					diag->fwping.status);
4863			diag->fwping.lock = 0;
4864			return;
4865		}
4866		/* Check dma pattern */
4867		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4868			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4869				bfa_trc(diag, i);
4870				bfa_trc(diag, pat);
4871				bfa_trc(diag,
4872					*((u32 *)diag->fwping.dbuf_kva + i));
4873				diag->fwping.result->dmastatus =
4874						BFA_STATUS_DATACORRUPTED;
4875				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4876				diag->fwping.cbfn(diag->fwping.cbarg,
4877						diag->fwping.status);
4878				diag->fwping.lock = 0;
4879				return;
4880			}
4881		}
4882		diag->fwping.result->dmastatus = BFA_STATUS_OK;
4883		diag->fwping.status = BFA_STATUS_OK;
4884		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4885		diag->fwping.lock = 0;
4886	} else {
4887		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4888		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4889		diag->fwping.lock = 0;
4890	}
4891}
4892
4893/*
4894 * Temperature Sensor
4895 */
4896
4897static void
4898diag_tempsensor_send(struct bfa_diag_s *diag)
4899{
4900	struct bfi_diag_ts_req_s *msg;
4901
4902	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4903	bfa_trc(diag, msg->temp);
4904	/* build host command */
4905	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4906		bfa_ioc_portid(diag->ioc));
4907	/* send mbox cmd */
4908	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4909}
4910
4911static void
4912diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4913{
4914	if (!diag->tsensor.lock) {
4915		/* receiving response after ioc failure */
4916		bfa_trc(diag, diag->tsensor.lock);
4917		return;
4918	}
4919
4920	/*
4921	 * ASIC junction tempsensor is a reg read operation
4922	 * it will always return OK
4923	 */
4924	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4925	diag->tsensor.temp->ts_junc = rsp->ts_junc;
4926	diag->tsensor.temp->ts_brd = rsp->ts_brd;
4927
4928	if (rsp->ts_brd) {
4929		/* tsensor.temp->status is brd_temp status */
4930		diag->tsensor.temp->status = rsp->status;
4931		if (rsp->status == BFA_STATUS_OK) {
4932			diag->tsensor.temp->brd_temp =
4933				be16_to_cpu(rsp->brd_temp);
4934		} else
4935			diag->tsensor.temp->brd_temp = 0;
4936	}
4937
4938	bfa_trc(diag, rsp->status);
4939	bfa_trc(diag, rsp->ts_junc);
4940	bfa_trc(diag, rsp->temp);
4941	bfa_trc(diag, rsp->ts_brd);
4942	bfa_trc(diag, rsp->brd_temp);
4943
4944	/* tsensor status is always good bcos we always have junction temp */
4945	diag->tsensor.status = BFA_STATUS_OK;
4946	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4947	diag->tsensor.lock = 0;
4948}
4949
4950/*
4951 *	LED Test command
4952 */
4953static void
4954diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4955{
4956	struct bfi_diag_ledtest_req_s  *msg;
4957
4958	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4959	/* build host command */
4960	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4961			bfa_ioc_portid(diag->ioc));
4962
4963	/*
4964	 * convert the freq from N blinks per 10 sec to
4965	 * crossbow ontime value. We do it here because division is need
4966	 */
4967	if (ledtest->freq)
4968		ledtest->freq = 500 / ledtest->freq;
4969
4970	if (ledtest->freq == 0)
4971		ledtest->freq = 1;
4972
4973	bfa_trc(diag, ledtest->freq);
4974	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4975	msg->cmd = (u8) ledtest->cmd;
4976	msg->color = (u8) ledtest->color;
4977	msg->portid = bfa_ioc_portid(diag->ioc);
4978	msg->led = ledtest->led;
4979	msg->freq = cpu_to_be16(ledtest->freq);
4980
4981	/* send mbox cmd */
4982	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4983}
4984
4985static void
4986diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4987{
4988	bfa_trc(diag, diag->ledtest.lock);
4989	diag->ledtest.lock = BFA_FALSE;
4990	/* no bfa_cb_queue is needed because driver is not waiting */
4991}
4992
4993/*
4994 * Port beaconing
4995 */
4996static void
4997diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4998{
4999	struct bfi_diag_portbeacon_req_s *msg;
5000
5001	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
5002	/* build host command */
5003	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
5004		bfa_ioc_portid(diag->ioc));
5005	msg->beacon = beacon;
5006	msg->period = cpu_to_be32(sec);
5007	/* send mbox cmd */
5008	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
5009}
5010
5011static void
5012diag_portbeacon_comp(struct bfa_diag_s *diag)
5013{
5014	bfa_trc(diag, diag->beacon.state);
5015	diag->beacon.state = BFA_FALSE;
5016	if (diag->cbfn_beacon)
5017		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
5018}
5019
5020/*
5021 *	Diag hmbox handler
5022 */
5023static void
5024bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5025{
5026	struct bfa_diag_s *diag = diagarg;
5027
5028	switch (msg->mh.msg_id) {
5029	case BFI_DIAG_I2H_PORTBEACON:
5030		diag_portbeacon_comp(diag);
5031		break;
5032	case BFI_DIAG_I2H_FWPING:
5033		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5034		break;
5035	case BFI_DIAG_I2H_TEMPSENSOR:
5036		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5037		break;
5038	case BFI_DIAG_I2H_LEDTEST:
5039		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5040		break;
5041	default:
5042		bfa_trc(diag, msg->mh.msg_id);
5043		WARN_ON(1);
5044	}
5045}
5046
5047/*
5048 * Gen RAM Test
5049 *
5050 *   @param[in] *diag           - diag data struct
5051 *   @param[in] *memtest        - mem test params input from upper layer,
5052 *   @param[in] pattern         - mem test pattern
5053 *   @param[in] *result         - mem test result
5054 *   @param[in] cbfn            - mem test callback functioin
5055 *   @param[in] cbarg           - callback functioin arg
5056 *
5057 *   @param[out]
5058 */
5059bfa_status_t
5060bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5061		u32 pattern, struct bfa_diag_memtest_result *result,
5062		bfa_cb_diag_t cbfn, void *cbarg)
5063{
5064	u32	memtest_tov;
5065
5066	bfa_trc(diag, pattern);
5067
5068	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5069		return BFA_STATUS_ADAPTER_ENABLED;
5070
5071	/* check to see if there is another destructive diag cmd running */
5072	if (diag->block) {
5073		bfa_trc(diag, diag->block);
5074		return BFA_STATUS_DEVBUSY;
5075	} else
5076		diag->block = 1;
5077
5078	diag->result = result;
5079	diag->cbfn = cbfn;
5080	diag->cbarg = cbarg;
5081
5082	/* download memtest code and take LPU0 out of reset */
5083	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5084
5085	memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5086		       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5087	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5088			bfa_diag_memtest_done, diag, memtest_tov);
5089	diag->timer_active = 1;
5090	return BFA_STATUS_OK;
5091}
5092
5093/*
5094 * DIAG firmware ping command
5095 *
5096 *   @param[in] *diag           - diag data struct
5097 *   @param[in] cnt             - dma loop count for testing PCIE
5098 *   @param[in] data            - data pattern to pass in fw
5099 *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
5100 *   @param[in] cbfn            - callback function
5101 *   @param[in] *cbarg          - callback functioin arg
5102 *
5103 *   @param[out]
5104 */
5105bfa_status_t
5106bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5107		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5108		void *cbarg)
5109{
5110	bfa_trc(diag, cnt);
5111	bfa_trc(diag, data);
5112
5113	if (!bfa_ioc_is_operational(diag->ioc))
5114		return BFA_STATUS_IOC_NON_OP;
5115
5116	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5117	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5118		return BFA_STATUS_CMD_NOTSUPP;
5119
5120	/* check to see if there is another destructive diag cmd running */
5121	if (diag->block || diag->fwping.lock) {
5122		bfa_trc(diag, diag->block);
5123		bfa_trc(diag, diag->fwping.lock);
5124		return BFA_STATUS_DEVBUSY;
5125	}
5126
5127	/* Initialization */
5128	diag->fwping.lock = 1;
5129	diag->fwping.cbfn = cbfn;
5130	diag->fwping.cbarg = cbarg;
5131	diag->fwping.result = result;
5132	diag->fwping.data = data;
5133	diag->fwping.count = cnt;
5134
5135	/* Init test results */
5136	diag->fwping.result->data = 0;
5137	diag->fwping.result->status = BFA_STATUS_OK;
5138
5139	/* kick off the first ping */
5140	diag_fwping_send(diag);
5141	return BFA_STATUS_OK;
5142}
5143
5144/*
5145 * Read Temperature Sensor
5146 *
5147 *   @param[in] *diag           - diag data struct
5148 *   @param[in] *result         - pt to bfa_diag_temp_t data struct
5149 *   @param[in] cbfn            - callback function
5150 *   @param[in] *cbarg          - callback functioin arg
5151 *
5152 *   @param[out]
5153 */
5154bfa_status_t
5155bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5156		struct bfa_diag_results_tempsensor_s *result,
5157		bfa_cb_diag_t cbfn, void *cbarg)
5158{
5159	/* check to see if there is a destructive diag cmd running */
5160	if (diag->block || diag->tsensor.lock) {
5161		bfa_trc(diag, diag->block);
5162		bfa_trc(diag, diag->tsensor.lock);
5163		return BFA_STATUS_DEVBUSY;
5164	}
5165
5166	if (!bfa_ioc_is_operational(diag->ioc))
5167		return BFA_STATUS_IOC_NON_OP;
5168
5169	/* Init diag mod params */
5170	diag->tsensor.lock = 1;
5171	diag->tsensor.temp = result;
5172	diag->tsensor.cbfn = cbfn;
5173	diag->tsensor.cbarg = cbarg;
5174	diag->tsensor.status = BFA_STATUS_OK;
5175
5176	/* Send msg to fw */
5177	diag_tempsensor_send(diag);
5178
5179	return BFA_STATUS_OK;
5180}
5181
5182/*
5183 * LED Test command
5184 *
5185 *   @param[in] *diag           - diag data struct
5186 *   @param[in] *ledtest        - pt to ledtest data structure
5187 *
5188 *   @param[out]
5189 */
5190bfa_status_t
5191bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5192{
5193	bfa_trc(diag, ledtest->cmd);
5194
5195	if (!bfa_ioc_is_operational(diag->ioc))
5196		return BFA_STATUS_IOC_NON_OP;
5197
5198	if (diag->beacon.state)
5199		return BFA_STATUS_BEACON_ON;
5200
5201	if (diag->ledtest.lock)
5202		return BFA_STATUS_LEDTEST_OP;
5203
5204	/* Send msg to fw */
5205	diag->ledtest.lock = BFA_TRUE;
5206	diag_ledtest_send(diag, ledtest);
5207
5208	return BFA_STATUS_OK;
5209}
5210
5211/*
5212 * Port beaconing command
5213 *
5214 *   @param[in] *diag           - diag data struct
5215 *   @param[in] beacon          - port beaconing 1:ON   0:OFF
5216 *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
5217 *   @param[in] sec             - beaconing duration in seconds
5218 *
5219 *   @param[out]
5220 */
5221bfa_status_t
5222bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5223		bfa_boolean_t link_e2e_beacon, uint32_t sec)
5224{
5225	bfa_trc(diag, beacon);
5226	bfa_trc(diag, link_e2e_beacon);
5227	bfa_trc(diag, sec);
5228
5229	if (!bfa_ioc_is_operational(diag->ioc))
5230		return BFA_STATUS_IOC_NON_OP;
5231
5232	if (diag->ledtest.lock)
5233		return BFA_STATUS_LEDTEST_OP;
5234
5235	if (diag->beacon.state && beacon)       /* beacon alread on */
5236		return BFA_STATUS_BEACON_ON;
5237
5238	diag->beacon.state	= beacon;
5239	diag->beacon.link_e2e	= link_e2e_beacon;
5240	if (diag->cbfn_beacon)
5241		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5242
5243	/* Send msg to fw */
5244	diag_portbeacon_send(diag, beacon, sec);
5245
5246	return BFA_STATUS_OK;
5247}
5248
5249/*
5250 * Return DMA memory needed by diag module.
5251 */
5252u32
5253bfa_diag_meminfo(void)
5254{
5255	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5256}
5257
5258/*
5259 *	Attach virtual and physical memory for Diag.
5260 */
5261void
5262bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5263	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5264{
5265	diag->dev = dev;
5266	diag->ioc = ioc;
5267	diag->trcmod = trcmod;
5268
5269	diag->block = 0;
5270	diag->cbfn = NULL;
5271	diag->cbarg = NULL;
5272	diag->result = NULL;
5273	diag->cbfn_beacon = cbfn_beacon;
5274
5275	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5276	bfa_q_qe_init(&diag->ioc_notify);
5277	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5278	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5279}
5280
5281void
5282bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5283{
5284	diag->fwping.dbuf_kva = dm_kva;
5285	diag->fwping.dbuf_pa = dm_pa;
5286	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5287}
5288
5289/*
5290 *	PHY module specific
5291 */
5292#define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
5293#define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
5294
5295static void
5296bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5297{
5298	int i, m = sz >> 2;
5299
5300	for (i = 0; i < m; i++)
5301		obuf[i] = be32_to_cpu(ibuf[i]);
5302}
5303
5304static bfa_boolean_t
5305bfa_phy_present(struct bfa_phy_s *phy)
5306{
5307	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5308}
5309
5310static void
5311bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5312{
5313	struct bfa_phy_s *phy = cbarg;
5314
5315	bfa_trc(phy, event);
5316
5317	switch (event) {
5318	case BFA_IOC_E_DISABLED:
5319	case BFA_IOC_E_FAILED:
5320		if (phy->op_busy) {
5321			phy->status = BFA_STATUS_IOC_FAILURE;
5322			phy->cbfn(phy->cbarg, phy->status);
5323			phy->op_busy = 0;
5324		}
5325		break;
5326
5327	default:
5328		break;
5329	}
5330}
5331
5332/*
5333 * Send phy attribute query request.
5334 *
5335 * @param[in] cbarg - callback argument
5336 */
5337static void
5338bfa_phy_query_send(void *cbarg)
5339{
5340	struct bfa_phy_s *phy = cbarg;
5341	struct bfi_phy_query_req_s *msg =
5342			(struct bfi_phy_query_req_s *) phy->mb.msg;
5343
5344	msg->instance = phy->instance;
5345	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5346		bfa_ioc_portid(phy->ioc));
5347	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5348	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5349}
5350
5351/*
5352 * Send phy write request.
5353 *
5354 * @param[in] cbarg - callback argument
5355 */
5356static void
5357bfa_phy_write_send(void *cbarg)
5358{
5359	struct bfa_phy_s *phy = cbarg;
5360	struct bfi_phy_write_req_s *msg =
5361			(struct bfi_phy_write_req_s *) phy->mb.msg;
5362	u32	len;
5363	u16	*buf, *dbuf;
5364	int	i, sz;
5365
5366	msg->instance = phy->instance;
5367	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5368	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5369			phy->residue : BFA_PHY_DMA_BUF_SZ;
5370	msg->length = cpu_to_be32(len);
5371
5372	/* indicate if it's the last msg of the whole write operation */
5373	msg->last = (len == phy->residue) ? 1 : 0;
5374
5375	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5376		bfa_ioc_portid(phy->ioc));
5377	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5378
5379	buf = (u16 *) (phy->ubuf + phy->offset);
5380	dbuf = (u16 *)phy->dbuf_kva;
5381	sz = len >> 1;
5382	for (i = 0; i < sz; i++)
5383		buf[i] = cpu_to_be16(dbuf[i]);
5384
5385	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5386
5387	phy->residue -= len;
5388	phy->offset += len;
5389}
5390
5391/*
5392 * Send phy read request.
5393 *
5394 * @param[in] cbarg - callback argument
5395 */
5396static void
5397bfa_phy_read_send(void *cbarg)
5398{
5399	struct bfa_phy_s *phy = cbarg;
5400	struct bfi_phy_read_req_s *msg =
5401			(struct bfi_phy_read_req_s *) phy->mb.msg;
5402	u32	len;
5403
5404	msg->instance = phy->instance;
5405	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5406	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5407			phy->residue : BFA_PHY_DMA_BUF_SZ;
5408	msg->length = cpu_to_be32(len);
5409	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5410		bfa_ioc_portid(phy->ioc));
5411	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5412	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5413}
5414
5415/*
5416 * Send phy stats request.
5417 *
5418 * @param[in] cbarg - callback argument
5419 */
5420static void
5421bfa_phy_stats_send(void *cbarg)
5422{
5423	struct bfa_phy_s *phy = cbarg;
5424	struct bfi_phy_stats_req_s *msg =
5425			(struct bfi_phy_stats_req_s *) phy->mb.msg;
5426
5427	msg->instance = phy->instance;
5428	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5429		bfa_ioc_portid(phy->ioc));
5430	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5431	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5432}
5433
5434/*
5435 * Flash memory info API.
5436 *
5437 * @param[in] mincfg - minimal cfg variable
5438 */
5439u32
5440bfa_phy_meminfo(bfa_boolean_t mincfg)
5441{
5442	/* min driver doesn't need phy */
5443	if (mincfg)
5444		return 0;
5445
5446	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5447}
5448
5449/*
5450 * Flash attach API.
5451 *
5452 * @param[in] phy - phy structure
5453 * @param[in] ioc  - ioc structure
5454 * @param[in] dev  - device structure
5455 * @param[in] trcmod - trace module
5456 * @param[in] logmod - log module
5457 */
5458void
5459bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5460		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5461{
5462	phy->ioc = ioc;
5463	phy->trcmod = trcmod;
5464	phy->cbfn = NULL;
5465	phy->cbarg = NULL;
5466	phy->op_busy = 0;
5467
5468	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5469	bfa_q_qe_init(&phy->ioc_notify);
5470	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5471	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5472
5473	/* min driver doesn't need phy */
5474	if (mincfg) {
5475		phy->dbuf_kva = NULL;
5476		phy->dbuf_pa = 0;
5477	}
5478}
5479
5480/*
5481 * Claim memory for phy
5482 *
5483 * @param[in] phy - phy structure
5484 * @param[in] dm_kva - pointer to virtual memory address
5485 * @param[in] dm_pa - physical memory address
5486 * @param[in] mincfg - minimal cfg variable
5487 */
5488void
5489bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5490		bfa_boolean_t mincfg)
5491{
5492	if (mincfg)
5493		return;
5494
5495	phy->dbuf_kva = dm_kva;
5496	phy->dbuf_pa = dm_pa;
5497	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5498	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5499	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5500}
5501
5502bfa_boolean_t
5503bfa_phy_busy(struct bfa_ioc_s *ioc)
5504{
5505	void __iomem	*rb;
5506
5507	rb = bfa_ioc_bar0(ioc);
5508	return readl(rb + BFA_PHY_LOCK_STATUS);
5509}
5510
5511/*
5512 * Get phy attribute.
5513 *
5514 * @param[in] phy - phy structure
5515 * @param[in] attr - phy attribute structure
5516 * @param[in] cbfn - callback function
5517 * @param[in] cbarg - callback argument
5518 *
5519 * Return status.
5520 */
5521bfa_status_t
5522bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5523		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5524{
5525	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5526	bfa_trc(phy, instance);
5527
5528	if (!bfa_phy_present(phy))
5529		return BFA_STATUS_PHY_NOT_PRESENT;
5530
5531	if (!bfa_ioc_is_operational(phy->ioc))
5532		return BFA_STATUS_IOC_NON_OP;
5533
5534	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5535		bfa_trc(phy, phy->op_busy);
5536		return BFA_STATUS_DEVBUSY;
5537	}
5538
5539	phy->op_busy = 1;
5540	phy->cbfn = cbfn;
5541	phy->cbarg = cbarg;
5542	phy->instance = instance;
5543	phy->ubuf = (uint8_t *) attr;
5544	bfa_phy_query_send(phy);
5545
5546	return BFA_STATUS_OK;
5547}
5548
5549/*
5550 * Get phy stats.
5551 *
5552 * @param[in] phy - phy structure
5553 * @param[in] instance - phy image instance
5554 * @param[in] stats - pointer to phy stats
5555 * @param[in] cbfn - callback function
5556 * @param[in] cbarg - callback argument
5557 *
5558 * Return status.
5559 */
5560bfa_status_t
5561bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5562		struct bfa_phy_stats_s *stats,
5563		bfa_cb_phy_t cbfn, void *cbarg)
5564{
5565	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5566	bfa_trc(phy, instance);
5567
5568	if (!bfa_phy_present(phy))
5569		return BFA_STATUS_PHY_NOT_PRESENT;
5570
5571	if (!bfa_ioc_is_operational(phy->ioc))
5572		return BFA_STATUS_IOC_NON_OP;
5573
5574	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5575		bfa_trc(phy, phy->op_busy);
5576		return BFA_STATUS_DEVBUSY;
5577	}
5578
5579	phy->op_busy = 1;
5580	phy->cbfn = cbfn;
5581	phy->cbarg = cbarg;
5582	phy->instance = instance;
5583	phy->ubuf = (u8 *) stats;
5584	bfa_phy_stats_send(phy);
5585
5586	return BFA_STATUS_OK;
5587}
5588
5589/*
5590 * Update phy image.
5591 *
5592 * @param[in] phy - phy structure
5593 * @param[in] instance - phy image instance
5594 * @param[in] buf - update data buffer
5595 * @param[in] len - data buffer length
5596 * @param[in] offset - offset relative to starting address
5597 * @param[in] cbfn - callback function
5598 * @param[in] cbarg - callback argument
5599 *
5600 * Return status.
5601 */
5602bfa_status_t
5603bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5604		void *buf, u32 len, u32 offset,
5605		bfa_cb_phy_t cbfn, void *cbarg)
5606{
5607	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5608	bfa_trc(phy, instance);
5609	bfa_trc(phy, len);
5610	bfa_trc(phy, offset);
5611
5612	if (!bfa_phy_present(phy))
5613		return BFA_STATUS_PHY_NOT_PRESENT;
5614
5615	if (!bfa_ioc_is_operational(phy->ioc))
5616		return BFA_STATUS_IOC_NON_OP;
5617
5618	/* 'len' must be in word (4-byte) boundary */
5619	if (!len || (len & 0x03))
5620		return BFA_STATUS_FAILED;
5621
5622	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5623		bfa_trc(phy, phy->op_busy);
5624		return BFA_STATUS_DEVBUSY;
5625	}
5626
5627	phy->op_busy = 1;
5628	phy->cbfn = cbfn;
5629	phy->cbarg = cbarg;
5630	phy->instance = instance;
5631	phy->residue = len;
5632	phy->offset = 0;
5633	phy->addr_off = offset;
5634	phy->ubuf = buf;
5635
5636	bfa_phy_write_send(phy);
5637	return BFA_STATUS_OK;
5638}
5639
5640/*
5641 * Read phy image.
5642 *
5643 * @param[in] phy - phy structure
5644 * @param[in] instance - phy image instance
5645 * @param[in] buf - read data buffer
5646 * @param[in] len - data buffer length
5647 * @param[in] offset - offset relative to starting address
5648 * @param[in] cbfn - callback function
5649 * @param[in] cbarg - callback argument
5650 *
5651 * Return status.
5652 */
5653bfa_status_t
5654bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5655		void *buf, u32 len, u32 offset,
5656		bfa_cb_phy_t cbfn, void *cbarg)
5657{
5658	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5659	bfa_trc(phy, instance);
5660	bfa_trc(phy, len);
5661	bfa_trc(phy, offset);
5662
5663	if (!bfa_phy_present(phy))
5664		return BFA_STATUS_PHY_NOT_PRESENT;
5665
5666	if (!bfa_ioc_is_operational(phy->ioc))
5667		return BFA_STATUS_IOC_NON_OP;
5668
5669	/* 'len' must be in word (4-byte) boundary */
5670	if (!len || (len & 0x03))
5671		return BFA_STATUS_FAILED;
5672
5673	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5674		bfa_trc(phy, phy->op_busy);
5675		return BFA_STATUS_DEVBUSY;
5676	}
5677
5678	phy->op_busy = 1;
5679	phy->cbfn = cbfn;
5680	phy->cbarg = cbarg;
5681	phy->instance = instance;
5682	phy->residue = len;
5683	phy->offset = 0;
5684	phy->addr_off = offset;
5685	phy->ubuf = buf;
5686	bfa_phy_read_send(phy);
5687
5688	return BFA_STATUS_OK;
5689}
5690
5691/*
5692 * Process phy response messages upon receiving interrupts.
5693 *
5694 * @param[in] phyarg - phy structure
5695 * @param[in] msg - message structure
5696 */
5697void
5698bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5699{
5700	struct bfa_phy_s *phy = phyarg;
5701	u32	status;
5702
5703	union {
5704		struct bfi_phy_query_rsp_s *query;
5705		struct bfi_phy_stats_rsp_s *stats;
5706		struct bfi_phy_write_rsp_s *write;
5707		struct bfi_phy_read_rsp_s *read;
5708		struct bfi_mbmsg_s   *msg;
5709	} m;
5710
5711	m.msg = msg;
5712	bfa_trc(phy, msg->mh.msg_id);
5713
5714	if (!phy->op_busy) {
5715		/* receiving response after ioc failure */
5716		bfa_trc(phy, 0x9999);
5717		return;
5718	}
5719
5720	switch (msg->mh.msg_id) {
5721	case BFI_PHY_I2H_QUERY_RSP:
5722		status = be32_to_cpu(m.query->status);
5723		bfa_trc(phy, status);
5724
5725		if (status == BFA_STATUS_OK) {
5726			struct bfa_phy_attr_s *attr =
5727				(struct bfa_phy_attr_s *) phy->ubuf;
5728			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5729					sizeof(struct bfa_phy_attr_s));
5730			bfa_trc(phy, attr->status);
5731			bfa_trc(phy, attr->length);
5732		}
5733
5734		phy->status = status;
5735		phy->op_busy = 0;
5736		if (phy->cbfn)
5737			phy->cbfn(phy->cbarg, phy->status);
5738		break;
5739	case BFI_PHY_I2H_STATS_RSP:
5740		status = be32_to_cpu(m.stats->status);
5741		bfa_trc(phy, status);
5742
5743		if (status == BFA_STATUS_OK) {
5744			struct bfa_phy_stats_s *stats =
5745				(struct bfa_phy_stats_s *) phy->ubuf;
5746			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5747				sizeof(struct bfa_phy_stats_s));
5748			bfa_trc(phy, stats->status);
5749		}
5750
5751		phy->status = status;
5752		phy->op_busy = 0;
5753		if (phy->cbfn)
5754			phy->cbfn(phy->cbarg, phy->status);
5755		break;
5756	case BFI_PHY_I2H_WRITE_RSP:
5757		status = be32_to_cpu(m.write->status);
5758		bfa_trc(phy, status);
5759
5760		if (status != BFA_STATUS_OK || phy->residue == 0) {
5761			phy->status = status;
5762			phy->op_busy = 0;
5763			if (phy->cbfn)
5764				phy->cbfn(phy->cbarg, phy->status);
5765		} else {
5766			bfa_trc(phy, phy->offset);
5767			bfa_phy_write_send(phy);
5768		}
5769		break;
5770	case BFI_PHY_I2H_READ_RSP:
5771		status = be32_to_cpu(m.read->status);
5772		bfa_trc(phy, status);
5773
5774		if (status != BFA_STATUS_OK) {
5775			phy->status = status;
5776			phy->op_busy = 0;
5777			if (phy->cbfn)
5778				phy->cbfn(phy->cbarg, phy->status);
5779		} else {
5780			u32 len = be32_to_cpu(m.read->length);
5781			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5782			u16 *dbuf = (u16 *)phy->dbuf_kva;
5783			int i, sz = len >> 1;
5784
5785			bfa_trc(phy, phy->offset);
5786			bfa_trc(phy, len);
5787
5788			for (i = 0; i < sz; i++)
5789				buf[i] = be16_to_cpu(dbuf[i]);
5790
5791			phy->residue -= len;
5792			phy->offset += len;
5793
5794			if (phy->residue == 0) {
5795				phy->status = status;
5796				phy->op_busy = 0;
5797				if (phy->cbfn)
5798					phy->cbfn(phy->cbarg, phy->status);
5799			} else
5800				bfa_phy_read_send(phy);
5801		}
5802		break;
5803	default:
5804		WARN_ON(1);
5805	}
5806}
5807
5808/*
5809 * DCONF state machine events
5810 */
5811enum bfa_dconf_event {
5812	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
5813	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
5814	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
5815	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
5816	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
5817	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
5818};
5819
5820/* forward declaration of DCONF state machine */
5821static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5822				enum bfa_dconf_event event);
5823static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5824				enum bfa_dconf_event event);
5825static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5826				enum bfa_dconf_event event);
5827static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5828				enum bfa_dconf_event event);
5829static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5830				enum bfa_dconf_event event);
5831static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5832				enum bfa_dconf_event event);
5833static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5834				enum bfa_dconf_event event);
5835
5836static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5837static void bfa_dconf_timer(void *cbarg);
5838static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5839static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5840
5841/*
5842 * Beginning state of dconf module. Waiting for an event to start.
5843 */
5844static void
5845bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5846{
5847	bfa_status_t bfa_status;
5848	bfa_trc(dconf->bfa, event);
5849
5850	switch (event) {
5851	case BFA_DCONF_SM_INIT:
5852		if (dconf->min_cfg) {
5853			bfa_trc(dconf->bfa, dconf->min_cfg);
5854			bfa_fsm_send_event(&dconf->bfa->iocfc,
5855					IOCFC_E_DCONF_DONE);
5856			return;
5857		}
5858		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5859		bfa_timer_start(dconf->bfa, &dconf->timer,
5860			bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5861		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5862					BFA_FLASH_PART_DRV, dconf->instance,
5863					dconf->dconf,
5864					sizeof(struct bfa_dconf_s), 0,
5865					bfa_dconf_init_cb, dconf->bfa);
5866		if (bfa_status != BFA_STATUS_OK) {
5867			bfa_timer_stop(&dconf->timer);
5868			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5869			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5870			return;
5871		}
5872		break;
5873	case BFA_DCONF_SM_EXIT:
5874		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5875		break;
5876	case BFA_DCONF_SM_IOCDISABLE:
5877	case BFA_DCONF_SM_WR:
5878	case BFA_DCONF_SM_FLASH_COMP:
5879		break;
5880	default:
5881		bfa_sm_fault(dconf->bfa, event);
5882	}
5883}
5884
5885/*
5886 * Read flash for dconf entries and make a call back to the driver once done.
5887 */
5888static void
5889bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5890			enum bfa_dconf_event event)
5891{
5892	bfa_trc(dconf->bfa, event);
5893
5894	switch (event) {
5895	case BFA_DCONF_SM_FLASH_COMP:
5896		bfa_timer_stop(&dconf->timer);
5897		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5898		break;
5899	case BFA_DCONF_SM_TIMEOUT:
5900		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5901		bfa_ioc_suspend(&dconf->bfa->ioc);
5902		break;
5903	case BFA_DCONF_SM_EXIT:
5904		bfa_timer_stop(&dconf->timer);
5905		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5906		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5907		break;
5908	case BFA_DCONF_SM_IOCDISABLE:
5909		bfa_timer_stop(&dconf->timer);
5910		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5911		break;
5912	default:
5913		bfa_sm_fault(dconf->bfa, event);
5914	}
5915}
5916
5917/*
5918 * DCONF Module is in ready state. Has completed the initialization.
5919 */
5920static void
5921bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5922{
5923	bfa_trc(dconf->bfa, event);
5924
5925	switch (event) {
5926	case BFA_DCONF_SM_WR:
5927		bfa_timer_start(dconf->bfa, &dconf->timer,
5928			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5929		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5930		break;
5931	case BFA_DCONF_SM_EXIT:
5932		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5933		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5934		break;
5935	case BFA_DCONF_SM_INIT:
5936	case BFA_DCONF_SM_IOCDISABLE:
5937		break;
5938	default:
5939		bfa_sm_fault(dconf->bfa, event);
5940	}
5941}
5942
5943/*
5944 * entries are dirty, write back to the flash.
5945 */
5946
5947static void
5948bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5949{
5950	bfa_trc(dconf->bfa, event);
5951
5952	switch (event) {
5953	case BFA_DCONF_SM_TIMEOUT:
5954		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5955		bfa_dconf_flash_write(dconf);
5956		break;
5957	case BFA_DCONF_SM_WR:
5958		bfa_timer_stop(&dconf->timer);
5959		bfa_timer_start(dconf->bfa, &dconf->timer,
5960			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5961		break;
5962	case BFA_DCONF_SM_EXIT:
5963		bfa_timer_stop(&dconf->timer);
5964		bfa_timer_start(dconf->bfa, &dconf->timer,
5965			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5966		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5967		bfa_dconf_flash_write(dconf);
5968		break;
5969	case BFA_DCONF_SM_FLASH_COMP:
5970		break;
5971	case BFA_DCONF_SM_IOCDISABLE:
5972		bfa_timer_stop(&dconf->timer);
5973		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5974		break;
5975	default:
5976		bfa_sm_fault(dconf->bfa, event);
5977	}
5978}
5979
5980/*
5981 * Sync the dconf entries to the flash.
5982 */
5983static void
5984bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5985			enum bfa_dconf_event event)
5986{
5987	bfa_trc(dconf->bfa, event);
5988
5989	switch (event) {
5990	case BFA_DCONF_SM_IOCDISABLE:
5991	case BFA_DCONF_SM_FLASH_COMP:
5992		bfa_timer_stop(&dconf->timer);
5993		fallthrough;
5994	case BFA_DCONF_SM_TIMEOUT:
5995		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5996		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5997		break;
5998	default:
5999		bfa_sm_fault(dconf->bfa, event);
6000	}
6001}
6002
6003static void
6004bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
6005{
6006	bfa_trc(dconf->bfa, event);
6007
6008	switch (event) {
6009	case BFA_DCONF_SM_FLASH_COMP:
6010		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
6011		break;
6012	case BFA_DCONF_SM_WR:
6013		bfa_timer_start(dconf->bfa, &dconf->timer,
6014			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6015		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6016		break;
6017	case BFA_DCONF_SM_EXIT:
6018		bfa_timer_start(dconf->bfa, &dconf->timer,
6019			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6020		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
6021		break;
6022	case BFA_DCONF_SM_IOCDISABLE:
6023		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
6024		break;
6025	default:
6026		bfa_sm_fault(dconf->bfa, event);
6027	}
6028}
6029
6030static void
6031bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
6032			enum bfa_dconf_event event)
6033{
6034	bfa_trc(dconf->bfa, event);
6035
6036	switch (event) {
6037	case BFA_DCONF_SM_INIT:
6038		bfa_timer_start(dconf->bfa, &dconf->timer,
6039			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6040		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6041		break;
6042	case BFA_DCONF_SM_EXIT:
6043		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6044		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6045		break;
6046	case BFA_DCONF_SM_IOCDISABLE:
6047		break;
6048	default:
6049		bfa_sm_fault(dconf->bfa, event);
6050	}
6051}
6052
6053/*
6054 * Compute and return memory needed by DRV_CFG module.
6055 */
6056void
6057bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6058		  struct bfa_s *bfa)
6059{
6060	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6061
6062	if (cfg->drvcfg.min_cfg)
6063		bfa_mem_kva_setup(meminfo, dconf_kva,
6064				sizeof(struct bfa_dconf_hdr_s));
6065	else
6066		bfa_mem_kva_setup(meminfo, dconf_kva,
6067				sizeof(struct bfa_dconf_s));
6068}
6069
6070void
6071bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
6072{
6073	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6074
6075	dconf->bfad = bfad;
6076	dconf->bfa = bfa;
6077	dconf->instance = bfa->ioc.port_id;
6078	bfa_trc(bfa, dconf->instance);
6079
6080	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6081	if (cfg->drvcfg.min_cfg) {
6082		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6083		dconf->min_cfg = BFA_TRUE;
6084	} else {
6085		dconf->min_cfg = BFA_FALSE;
6086		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6087	}
6088
6089	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6090	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6091}
6092
6093static void
6094bfa_dconf_init_cb(void *arg, bfa_status_t status)
6095{
6096	struct bfa_s *bfa = arg;
6097	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6098
6099	if (status == BFA_STATUS_OK) {
6100		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6101		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6102			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6103		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6104			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6105	}
6106	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6107	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6108}
6109
6110void
6111bfa_dconf_modinit(struct bfa_s *bfa)
6112{
6113	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6114	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6115}
6116
6117static void bfa_dconf_timer(void *cbarg)
6118{
6119	struct bfa_dconf_mod_s *dconf = cbarg;
6120	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6121}
6122
6123void
6124bfa_dconf_iocdisable(struct bfa_s *bfa)
6125{
6126	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6127	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6128}
6129
6130static bfa_status_t
6131bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6132{
6133	bfa_status_t bfa_status;
6134	bfa_trc(dconf->bfa, 0);
6135
6136	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6137				BFA_FLASH_PART_DRV, dconf->instance,
6138				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
6139				bfa_dconf_cbfn, dconf);
6140	if (bfa_status != BFA_STATUS_OK)
6141		WARN_ON(bfa_status);
6142	bfa_trc(dconf->bfa, bfa_status);
6143
6144	return bfa_status;
6145}
6146
6147bfa_status_t
6148bfa_dconf_update(struct bfa_s *bfa)
6149{
6150	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6151	bfa_trc(dconf->bfa, 0);
6152	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6153		return BFA_STATUS_FAILED;
6154
6155	if (dconf->min_cfg) {
6156		bfa_trc(dconf->bfa, dconf->min_cfg);
6157		return BFA_STATUS_FAILED;
6158	}
6159
6160	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6161	return BFA_STATUS_OK;
6162}
6163
6164static void
6165bfa_dconf_cbfn(void *arg, bfa_status_t status)
6166{
6167	struct bfa_dconf_mod_s *dconf = arg;
6168	WARN_ON(status);
6169	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6170}
6171
6172void
6173bfa_dconf_modexit(struct bfa_s *bfa)
6174{
6175	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6176	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6177}
6178
6179/*
6180 * FRU specific functions
6181 */
6182
6183#define BFA_FRU_DMA_BUF_SZ	0x02000		/* 8k dma buffer */
6184#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6185#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6186
6187static void
6188bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6189{
6190	struct bfa_fru_s *fru = cbarg;
6191
6192	bfa_trc(fru, event);
6193
6194	switch (event) {
6195	case BFA_IOC_E_DISABLED:
6196	case BFA_IOC_E_FAILED:
6197		if (fru->op_busy) {
6198			fru->status = BFA_STATUS_IOC_FAILURE;
6199			fru->cbfn(fru->cbarg, fru->status);
6200			fru->op_busy = 0;
6201		}
6202		break;
6203
6204	default:
6205		break;
6206	}
6207}
6208
6209/*
6210 * Send fru write request.
6211 *
6212 * @param[in] cbarg - callback argument
6213 */
6214static void
6215bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6216{
6217	struct bfa_fru_s *fru = cbarg;
6218	struct bfi_fru_write_req_s *msg =
6219			(struct bfi_fru_write_req_s *) fru->mb.msg;
6220	u32 len;
6221
6222	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6223	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6224				fru->residue : BFA_FRU_DMA_BUF_SZ;
6225	msg->length = cpu_to_be32(len);
6226
6227	/*
6228	 * indicate if it's the last msg of the whole write operation
6229	 */
6230	msg->last = (len == fru->residue) ? 1 : 0;
6231
6232	msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6233	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6234	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6235
6236	memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6237	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6238
6239	fru->residue -= len;
6240	fru->offset += len;
6241}
6242
6243/*
6244 * Send fru read request.
6245 *
6246 * @param[in] cbarg - callback argument
6247 */
6248static void
6249bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6250{
6251	struct bfa_fru_s *fru = cbarg;
6252	struct bfi_fru_read_req_s *msg =
6253			(struct bfi_fru_read_req_s *) fru->mb.msg;
6254	u32 len;
6255
6256	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6257	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6258				fru->residue : BFA_FRU_DMA_BUF_SZ;
6259	msg->length = cpu_to_be32(len);
6260	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6261	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6262	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6263}
6264
6265/*
6266 * Flash memory info API.
6267 *
6268 * @param[in] mincfg - minimal cfg variable
6269 */
6270u32
6271bfa_fru_meminfo(bfa_boolean_t mincfg)
6272{
6273	/* min driver doesn't need fru */
6274	if (mincfg)
6275		return 0;
6276
6277	return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6278}
6279
6280/*
6281 * Flash attach API.
6282 *
6283 * @param[in] fru - fru structure
6284 * @param[in] ioc  - ioc structure
6285 * @param[in] dev  - device structure
6286 * @param[in] trcmod - trace module
6287 * @param[in] logmod - log module
6288 */
6289void
6290bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6291	struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6292{
6293	fru->ioc = ioc;
6294	fru->trcmod = trcmod;
6295	fru->cbfn = NULL;
6296	fru->cbarg = NULL;
6297	fru->op_busy = 0;
6298
6299	bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6300	bfa_q_qe_init(&fru->ioc_notify);
6301	bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6302	list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6303
6304	/* min driver doesn't need fru */
6305	if (mincfg) {
6306		fru->dbuf_kva = NULL;
6307		fru->dbuf_pa = 0;
6308	}
6309}
6310
6311/*
6312 * Claim memory for fru
6313 *
6314 * @param[in] fru - fru structure
6315 * @param[in] dm_kva - pointer to virtual memory address
6316 * @param[in] dm_pa - frusical memory address
6317 * @param[in] mincfg - minimal cfg variable
6318 */
6319void
6320bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6321	bfa_boolean_t mincfg)
6322{
6323	if (mincfg)
6324		return;
6325
6326	fru->dbuf_kva = dm_kva;
6327	fru->dbuf_pa = dm_pa;
6328	memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6329	dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6330	dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6331}
6332
6333/*
6334 * Update fru vpd image.
6335 *
6336 * @param[in] fru - fru structure
6337 * @param[in] buf - update data buffer
6338 * @param[in] len - data buffer length
6339 * @param[in] offset - offset relative to starting address
6340 * @param[in] cbfn - callback function
6341 * @param[in] cbarg - callback argument
6342 *
6343 * Return status.
6344 */
6345bfa_status_t
6346bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6347		  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6348{
6349	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6350	bfa_trc(fru, len);
6351	bfa_trc(fru, offset);
6352
6353	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6354		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6355		return BFA_STATUS_FRU_NOT_PRESENT;
6356
6357	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6358		return BFA_STATUS_CMD_NOTSUPP;
6359
6360	if (!bfa_ioc_is_operational(fru->ioc))
6361		return BFA_STATUS_IOC_NON_OP;
6362
6363	if (fru->op_busy) {
6364		bfa_trc(fru, fru->op_busy);
6365		return BFA_STATUS_DEVBUSY;
6366	}
6367
6368	fru->op_busy = 1;
6369
6370	fru->cbfn = cbfn;
6371	fru->cbarg = cbarg;
6372	fru->residue = len;
6373	fru->offset = 0;
6374	fru->addr_off = offset;
6375	fru->ubuf = buf;
6376	fru->trfr_cmpl = trfr_cmpl;
6377
6378	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6379
6380	return BFA_STATUS_OK;
6381}
6382
6383/*
6384 * Read fru vpd image.
6385 *
6386 * @param[in] fru - fru structure
6387 * @param[in] buf - read data buffer
6388 * @param[in] len - data buffer length
6389 * @param[in] offset - offset relative to starting address
6390 * @param[in] cbfn - callback function
6391 * @param[in] cbarg - callback argument
6392 *
6393 * Return status.
6394 */
6395bfa_status_t
6396bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6397		bfa_cb_fru_t cbfn, void *cbarg)
6398{
6399	bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6400	bfa_trc(fru, len);
6401	bfa_trc(fru, offset);
6402
6403	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6404		return BFA_STATUS_FRU_NOT_PRESENT;
6405
6406	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6407		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6408		return BFA_STATUS_CMD_NOTSUPP;
6409
6410	if (!bfa_ioc_is_operational(fru->ioc))
6411		return BFA_STATUS_IOC_NON_OP;
6412
6413	if (fru->op_busy) {
6414		bfa_trc(fru, fru->op_busy);
6415		return BFA_STATUS_DEVBUSY;
6416	}
6417
6418	fru->op_busy = 1;
6419
6420	fru->cbfn = cbfn;
6421	fru->cbarg = cbarg;
6422	fru->residue = len;
6423	fru->offset = 0;
6424	fru->addr_off = offset;
6425	fru->ubuf = buf;
6426	bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6427
6428	return BFA_STATUS_OK;
6429}
6430
6431/*
6432 * Get maximum size fru vpd image.
6433 *
6434 * @param[in] fru - fru structure
6435 * @param[out] size - maximum size of fru vpd data
6436 *
6437 * Return status.
6438 */
6439bfa_status_t
6440bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6441{
6442	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6443		return BFA_STATUS_FRU_NOT_PRESENT;
6444
6445	if (!bfa_ioc_is_operational(fru->ioc))
6446		return BFA_STATUS_IOC_NON_OP;
6447
6448	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6449		fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6450		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6451	else
6452		return BFA_STATUS_CMD_NOTSUPP;
6453	return BFA_STATUS_OK;
6454}
6455/*
6456 * tfru write.
6457 *
6458 * @param[in] fru - fru structure
6459 * @param[in] buf - update data buffer
6460 * @param[in] len - data buffer length
6461 * @param[in] offset - offset relative to starting address
6462 * @param[in] cbfn - callback function
6463 * @param[in] cbarg - callback argument
6464 *
6465 * Return status.
6466 */
6467bfa_status_t
6468bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6469	       bfa_cb_fru_t cbfn, void *cbarg)
6470{
6471	bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6472	bfa_trc(fru, len);
6473	bfa_trc(fru, offset);
6474	bfa_trc(fru, *((u8 *) buf));
6475
6476	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6477		return BFA_STATUS_FRU_NOT_PRESENT;
6478
6479	if (!bfa_ioc_is_operational(fru->ioc))
6480		return BFA_STATUS_IOC_NON_OP;
6481
6482	if (fru->op_busy) {
6483		bfa_trc(fru, fru->op_busy);
6484		return BFA_STATUS_DEVBUSY;
6485	}
6486
6487	fru->op_busy = 1;
6488
6489	fru->cbfn = cbfn;
6490	fru->cbarg = cbarg;
6491	fru->residue = len;
6492	fru->offset = 0;
6493	fru->addr_off = offset;
6494	fru->ubuf = buf;
6495
6496	bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6497
6498	return BFA_STATUS_OK;
6499}
6500
6501/*
6502 * tfru read.
6503 *
6504 * @param[in] fru - fru structure
6505 * @param[in] buf - read data buffer
6506 * @param[in] len - data buffer length
6507 * @param[in] offset - offset relative to starting address
6508 * @param[in] cbfn - callback function
6509 * @param[in] cbarg - callback argument
6510 *
6511 * Return status.
6512 */
6513bfa_status_t
6514bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6515	      bfa_cb_fru_t cbfn, void *cbarg)
6516{
6517	bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6518	bfa_trc(fru, len);
6519	bfa_trc(fru, offset);
6520
6521	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6522		return BFA_STATUS_FRU_NOT_PRESENT;
6523
6524	if (!bfa_ioc_is_operational(fru->ioc))
6525		return BFA_STATUS_IOC_NON_OP;
6526
6527	if (fru->op_busy) {
6528		bfa_trc(fru, fru->op_busy);
6529		return BFA_STATUS_DEVBUSY;
6530	}
6531
6532	fru->op_busy = 1;
6533
6534	fru->cbfn = cbfn;
6535	fru->cbarg = cbarg;
6536	fru->residue = len;
6537	fru->offset = 0;
6538	fru->addr_off = offset;
6539	fru->ubuf = buf;
6540	bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6541
6542	return BFA_STATUS_OK;
6543}
6544
6545/*
6546 * Process fru response messages upon receiving interrupts.
6547 *
6548 * @param[in] fruarg - fru structure
6549 * @param[in] msg - message structure
6550 */
6551void
6552bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6553{
6554	struct bfa_fru_s *fru = fruarg;
6555	struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6556	u32 status;
6557
6558	bfa_trc(fru, msg->mh.msg_id);
6559
6560	if (!fru->op_busy) {
6561		/*
6562		 * receiving response after ioc failure
6563		 */
6564		bfa_trc(fru, 0x9999);
6565		return;
6566	}
6567
6568	switch (msg->mh.msg_id) {
6569	case BFI_FRUVPD_I2H_WRITE_RSP:
6570	case BFI_TFRU_I2H_WRITE_RSP:
6571		status = be32_to_cpu(rsp->status);
6572		bfa_trc(fru, status);
6573
6574		if (status != BFA_STATUS_OK || fru->residue == 0) {
6575			fru->status = status;
6576			fru->op_busy = 0;
6577			if (fru->cbfn)
6578				fru->cbfn(fru->cbarg, fru->status);
6579		} else {
6580			bfa_trc(fru, fru->offset);
6581			if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6582				bfa_fru_write_send(fru,
6583					BFI_FRUVPD_H2I_WRITE_REQ);
6584			else
6585				bfa_fru_write_send(fru,
6586					BFI_TFRU_H2I_WRITE_REQ);
6587		}
6588		break;
6589	case BFI_FRUVPD_I2H_READ_RSP:
6590	case BFI_TFRU_I2H_READ_RSP:
6591		status = be32_to_cpu(rsp->status);
6592		bfa_trc(fru, status);
6593
6594		if (status != BFA_STATUS_OK) {
6595			fru->status = status;
6596			fru->op_busy = 0;
6597			if (fru->cbfn)
6598				fru->cbfn(fru->cbarg, fru->status);
6599		} else {
6600			u32 len = be32_to_cpu(rsp->length);
6601
6602			bfa_trc(fru, fru->offset);
6603			bfa_trc(fru, len);
6604
6605			memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6606			fru->residue -= len;
6607			fru->offset += len;
6608
6609			if (fru->residue == 0) {
6610				fru->status = status;
6611				fru->op_busy = 0;
6612				if (fru->cbfn)
6613					fru->cbfn(fru->cbarg, fru->status);
6614			} else {
6615				if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6616					bfa_fru_read_send(fru,
6617						BFI_FRUVPD_H2I_READ_REQ);
6618				else
6619					bfa_fru_read_send(fru,
6620						BFI_TFRU_H2I_READ_REQ);
6621			}
6622		}
6623		break;
6624	default:
6625		WARN_ON(1);
6626	}
6627}
6628
6629/*
6630 * register definitions
6631 */
6632#define FLI_CMD_REG			0x0001d000
6633#define FLI_RDDATA_REG			0x0001d010
6634#define FLI_ADDR_REG			0x0001d004
6635#define FLI_DEV_STATUS_REG		0x0001d014
6636
6637#define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
6638#define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
6639#define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
6640#define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
6641
6642enum bfa_flash_cmd {
6643	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
6644	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
6645};
6646
6647/*
6648 * Hardware error definition
6649 */
6650enum bfa_flash_err {
6651	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
6652	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
6653	BFA_FLASH_BAD		= -3,	/*!< flash bad */
6654	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
6655	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
6656	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
6657	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
6658	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
6659	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
6660};
6661
6662/*
6663 * Flash command register data structure
6664 */
6665union bfa_flash_cmd_reg_u {
6666	struct {
6667#ifdef __BIG_ENDIAN
6668		u32	act:1;
6669		u32	rsv:1;
6670		u32	write_cnt:9;
6671		u32	read_cnt:9;
6672		u32	addr_cnt:4;
6673		u32	cmd:8;
6674#else
6675		u32	cmd:8;
6676		u32	addr_cnt:4;
6677		u32	read_cnt:9;
6678		u32	write_cnt:9;
6679		u32	rsv:1;
6680		u32	act:1;
6681#endif
6682	} r;
6683	u32	i;
6684};
6685
6686/*
6687 * Flash device status register data structure
6688 */
6689union bfa_flash_dev_status_reg_u {
6690	struct {
6691#ifdef __BIG_ENDIAN
6692		u32	rsv:21;
6693		u32	fifo_cnt:6;
6694		u32	busy:1;
6695		u32	init_status:1;
6696		u32	present:1;
6697		u32	bad:1;
6698		u32	good:1;
6699#else
6700		u32	good:1;
6701		u32	bad:1;
6702		u32	present:1;
6703		u32	init_status:1;
6704		u32	busy:1;
6705		u32	fifo_cnt:6;
6706		u32	rsv:21;
6707#endif
6708	} r;
6709	u32	i;
6710};
6711
6712/*
6713 * Flash address register data structure
6714 */
6715union bfa_flash_addr_reg_u {
6716	struct {
6717#ifdef __BIG_ENDIAN
6718		u32	addr:24;
6719		u32	dummy:8;
6720#else
6721		u32	dummy:8;
6722		u32	addr:24;
6723#endif
6724	} r;
6725	u32	i;
6726};
6727
6728/*
6729 * dg flash_raw_private Flash raw private functions
6730 */
6731static void
6732bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6733		  u8 rd_cnt, u8 ad_cnt, u8 op)
6734{
6735	union bfa_flash_cmd_reg_u cmd;
6736
6737	cmd.i = 0;
6738	cmd.r.act = 1;
6739	cmd.r.write_cnt = wr_cnt;
6740	cmd.r.read_cnt = rd_cnt;
6741	cmd.r.addr_cnt = ad_cnt;
6742	cmd.r.cmd = op;
6743	writel(cmd.i, (pci_bar + FLI_CMD_REG));
6744}
6745
6746static void
6747bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6748{
6749	union bfa_flash_addr_reg_u addr;
6750
6751	addr.r.addr = address & 0x00ffffff;
6752	addr.r.dummy = 0;
6753	writel(addr.i, (pci_bar + FLI_ADDR_REG));
6754}
6755
6756static int
6757bfa_flash_cmd_act_check(void __iomem *pci_bar)
6758{
6759	union bfa_flash_cmd_reg_u cmd;
6760
6761	cmd.i = readl(pci_bar + FLI_CMD_REG);
6762
6763	if (cmd.r.act)
6764		return BFA_FLASH_ERR_CMD_ACT;
6765
6766	return 0;
6767}
6768
6769/*
6770 * @brief
6771 * Flush FLI data fifo.
6772 *
6773 * @param[in] pci_bar - pci bar address
6774 * @param[in] dev_status - device status
6775 *
6776 * Return 0 on success, negative error number on error.
6777 */
6778static u32
6779bfa_flash_fifo_flush(void __iomem *pci_bar)
6780{
6781	u32 i;
6782	union bfa_flash_dev_status_reg_u dev_status;
6783
6784	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6785
6786	if (!dev_status.r.fifo_cnt)
6787		return 0;
6788
6789	/* fifo counter in terms of words */
6790	for (i = 0; i < dev_status.r.fifo_cnt; i++)
6791		readl(pci_bar + FLI_RDDATA_REG);
6792
6793	/*
6794	 * Check the device status. It may take some time.
6795	 */
6796	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6797		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6798		if (!dev_status.r.fifo_cnt)
6799			break;
6800	}
6801
6802	if (dev_status.r.fifo_cnt)
6803		return BFA_FLASH_ERR_FIFO_CNT;
6804
6805	return 0;
6806}
6807
6808/*
6809 * @brief
6810 * Read flash status.
6811 *
6812 * @param[in] pci_bar - pci bar address
6813 *
6814 * Return 0 on success, negative error number on error.
6815*/
6816static u32
6817bfa_flash_status_read(void __iomem *pci_bar)
6818{
6819	union bfa_flash_dev_status_reg_u	dev_status;
6820	int				status;
6821	u32			ret_status;
6822	int				i;
6823
6824	status = bfa_flash_fifo_flush(pci_bar);
6825	if (status < 0)
6826		return status;
6827
6828	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6829
6830	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6831		status = bfa_flash_cmd_act_check(pci_bar);
6832		if (!status)
6833			break;
6834	}
6835
6836	if (status)
6837		return status;
6838
6839	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6840	if (!dev_status.r.fifo_cnt)
6841		return BFA_FLASH_BUSY;
6842
6843	ret_status = readl(pci_bar + FLI_RDDATA_REG);
6844	ret_status >>= 24;
6845
6846	status = bfa_flash_fifo_flush(pci_bar);
6847	if (status < 0)
6848		return status;
6849
6850	return ret_status;
6851}
6852
6853/*
6854 * @brief
6855 * Start flash read operation.
6856 *
6857 * @param[in] pci_bar - pci bar address
6858 * @param[in] offset - flash address offset
6859 * @param[in] len - read data length
6860 * @param[in] buf - read data buffer
6861 *
6862 * Return 0 on success, negative error number on error.
6863 */
6864static u32
6865bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6866			 char *buf)
6867{
6868	int status;
6869
6870	/*
6871	 * len must be mutiple of 4 and not exceeding fifo size
6872	 */
6873	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6874		return BFA_FLASH_ERR_LEN;
6875
6876	/*
6877	 * check status
6878	 */
6879	status = bfa_flash_status_read(pci_bar);
6880	if (status == BFA_FLASH_BUSY)
6881		status = bfa_flash_status_read(pci_bar);
6882
6883	if (status < 0)
6884		return status;
6885
6886	/*
6887	 * check if write-in-progress bit is cleared
6888	 */
6889	if (status & BFA_FLASH_WIP_MASK)
6890		return BFA_FLASH_ERR_WIP;
6891
6892	bfa_flash_set_addr(pci_bar, offset);
6893
6894	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6895
6896	return 0;
6897}
6898
6899/*
6900 * @brief
6901 * Check flash read operation.
6902 *
6903 * @param[in] pci_bar - pci bar address
6904 *
6905 * Return flash device status, 1 if busy, 0 if not.
6906 */
6907static u32
6908bfa_flash_read_check(void __iomem *pci_bar)
6909{
6910	if (bfa_flash_cmd_act_check(pci_bar))
6911		return 1;
6912
6913	return 0;
6914}
6915
6916/*
6917 * @brief
6918 * End flash read operation.
6919 *
6920 * @param[in] pci_bar - pci bar address
6921 * @param[in] len - read data length
6922 * @param[in] buf - read data buffer
6923 *
6924 */
6925static void
6926bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6927{
6928
6929	u32 i;
6930
6931	/*
6932	 * read data fifo up to 32 words
6933	 */
6934	for (i = 0; i < len; i += 4) {
6935		u32 w = readl(pci_bar + FLI_RDDATA_REG);
6936		*((u32 *) (buf + i)) = swab32(w);
6937	}
6938
6939	bfa_flash_fifo_flush(pci_bar);
6940}
6941
6942/*
6943 * @brief
6944 * Perform flash raw read.
6945 *
6946 * @param[in] pci_bar - pci bar address
6947 * @param[in] offset - flash partition address offset
6948 * @param[in] buf - read data buffer
6949 * @param[in] len - read data length
6950 *
6951 * Return status.
6952 */
6953
6954
6955#define FLASH_BLOCKING_OP_MAX   500
6956#define FLASH_SEM_LOCK_REG	0x18820
6957
6958static int
6959bfa_raw_sem_get(void __iomem *bar)
6960{
6961	int	locked;
6962
6963	locked = readl((bar + FLASH_SEM_LOCK_REG));
6964	return !locked;
6965
6966}
6967
6968static bfa_status_t
6969bfa_flash_sem_get(void __iomem *bar)
6970{
6971	u32 n = FLASH_BLOCKING_OP_MAX;
6972
6973	while (!bfa_raw_sem_get(bar)) {
6974		if (--n <= 0)
6975			return BFA_STATUS_BADFLASH;
6976		mdelay(10);
6977	}
6978	return BFA_STATUS_OK;
6979}
6980
6981static void
6982bfa_flash_sem_put(void __iomem *bar)
6983{
6984	writel(0, (bar + FLASH_SEM_LOCK_REG));
6985}
6986
6987bfa_status_t
6988bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
6989		       u32 len)
6990{
6991	u32 n;
6992	int status;
6993	u32 off, l, s, residue, fifo_sz;
6994
6995	residue = len;
6996	off = 0;
6997	fifo_sz = BFA_FLASH_FIFO_SIZE;
6998	status = bfa_flash_sem_get(pci_bar);
6999	if (status != BFA_STATUS_OK)
7000		return status;
7001
7002	while (residue) {
7003		s = offset + off;
7004		n = s / fifo_sz;
7005		l = (n + 1) * fifo_sz - s;
7006		if (l > residue)
7007			l = residue;
7008
7009		status = bfa_flash_read_start(pci_bar, offset + off, l,
7010								&buf[off]);
7011		if (status < 0) {
7012			bfa_flash_sem_put(pci_bar);
7013			return BFA_STATUS_FAILED;
7014		}
7015
7016		n = BFA_FLASH_BLOCKING_OP_MAX;
7017		while (bfa_flash_read_check(pci_bar)) {
7018			if (--n <= 0) {
7019				bfa_flash_sem_put(pci_bar);
7020				return BFA_STATUS_FAILED;
7021			}
7022		}
7023
7024		bfa_flash_read_end(pci_bar, l, &buf[off]);
7025
7026		residue -= l;
7027		off += l;
7028	}
7029	bfa_flash_sem_put(pci_bar);
7030
7031	return BFA_STATUS_OK;
7032}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   4 * Copyright (c) 2014- QLogic Corporation.
   5 * All rights reserved
   6 * www.qlogic.com
   7 *
   8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   9 */
  10
  11#include "bfad_drv.h"
  12#include "bfad_im.h"
  13#include "bfa_ioc.h"
  14#include "bfi_reg.h"
  15#include "bfa_defs.h"
  16#include "bfa_defs_svc.h"
  17#include "bfi.h"
  18
  19BFA_TRC_FILE(CNA, IOC);
  20
  21/*
  22 * IOC local definitions
  23 */
  24#define BFA_IOC_TOV		3000	/* msecs */
  25#define BFA_IOC_HWSEM_TOV	500	/* msecs */
  26#define BFA_IOC_HB_TOV		500	/* msecs */
  27#define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
  28#define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
  29
  30#define bfa_ioc_timer_start(__ioc)					\
  31	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
  32			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  33#define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
  34
  35#define bfa_hb_timer_start(__ioc)					\
  36	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
  37			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38#define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
  39
  40#define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  41
  42#define bfa_ioc_state_disabled(__sm)		\
  43	(((__sm) == BFI_IOC_UNINIT) ||		\
  44	((__sm) == BFI_IOC_INITING) ||		\
  45	((__sm) == BFI_IOC_HWINIT) ||		\
  46	((__sm) == BFI_IOC_DISABLED) ||		\
  47	((__sm) == BFI_IOC_FAIL) ||		\
  48	((__sm) == BFI_IOC_CFG_DISABLED))
  49
  50/*
  51 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  52 */
  53
  54#define bfa_ioc_firmware_lock(__ioc)			\
  55			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  56#define bfa_ioc_firmware_unlock(__ioc)			\
  57			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  58#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  59#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  60#define bfa_ioc_notify_fail(__ioc)              \
  61			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  62#define bfa_ioc_sync_start(__ioc)               \
  63			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  64#define bfa_ioc_sync_join(__ioc)                \
  65			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  66#define bfa_ioc_sync_leave(__ioc)               \
  67			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  68#define bfa_ioc_sync_ack(__ioc)                 \
  69			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  70#define bfa_ioc_sync_complete(__ioc)            \
  71			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  72#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
  73			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
  74#define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
  75			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
  76#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
  77		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
  78#define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
  79			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
  80
  81#define bfa_ioc_mbox_cmd_pending(__ioc)		\
  82			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  83			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  84
  85bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  86
  87/*
  88 * forward declarations
  89 */
  90static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  91static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  92static void bfa_ioc_timeout(void *ioc);
  93static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
  94static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  95static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  96static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  97static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  98static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  99static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
 100static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
 101static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
 102				enum bfa_ioc_event_e event);
 103static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 104static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
 105static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 106static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 107static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
 108				struct bfi_ioc_image_hdr_s *base_fwhdr,
 109				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
 110static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
 111				struct bfa_ioc_s *ioc,
 112				struct bfi_ioc_image_hdr_s *base_fwhdr);
 113
 114/*
 115 * IOC state machine definitions/declarations
 116 */
 117enum ioc_event {
 118	IOC_E_RESET		= 1,	/*  IOC reset request		*/
 119	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
 120	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
 121	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
 122	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
 123	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
 124	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
 125	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
 126	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
 127	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
 128	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
 129	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
 130};
 131
 132bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
 133bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
 134bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
 135bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
 136bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
 137bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
 138bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 139bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 140bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 141bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
 142
 143static struct bfa_sm_table_s ioc_sm_table[] = {
 144	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
 145	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
 146	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 147	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 148	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
 149	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
 150	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 151	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 152	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 153	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
 154};
 155
 156/*
 157 * IOCPF state machine definitions/declarations
 158 */
 159
 160#define bfa_iocpf_timer_start(__ioc)					\
 161	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
 162			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
 163#define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
 164
 165#define bfa_iocpf_poll_timer_start(__ioc)				\
 166	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
 167			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
 168
 169#define bfa_sem_timer_start(__ioc)					\
 170	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
 171			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
 172#define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
 173
 174/*
 175 * Forward declareations for iocpf state machine
 176 */
 177static void bfa_iocpf_timeout(void *ioc_arg);
 178static void bfa_iocpf_sem_timeout(void *ioc_arg);
 179static void bfa_iocpf_poll_timeout(void *ioc_arg);
 180
 181/*
 182 * IOCPF state machine events
 183 */
 184enum iocpf_event {
 185	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
 186	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
 187	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
 188	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
 189	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
 190	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
 191	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
 192	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
 193	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
 194	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
 195	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
 196	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
 197};
 198
 199/*
 200 * IOCPF states
 201 */
 202enum bfa_iocpf_state {
 203	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
 204	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
 205	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
 206	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
 207	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
 208	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
 209	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
 210	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
 211	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
 212};
 213
 214bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
 215bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
 216bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
 217bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
 218bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
 219bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
 220bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
 221bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
 222						enum iocpf_event);
 223bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
 224bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
 225bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
 226bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
 227bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
 228						enum iocpf_event);
 229bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
 230
 231static struct bfa_sm_table_s iocpf_sm_table[] = {
 232	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
 233	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
 234	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
 235	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
 236	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
 237	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
 238	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
 239	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
 240	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
 241	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
 242	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
 243	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
 244	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
 245	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 246};
 247
 248/*
 249 * IOC State Machine
 250 */
 251
 252/*
 253 * Beginning state. IOC uninit state.
 254 */
 255
 256static void
 257bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
 258{
 259}
 260
 261/*
 262 * IOC is in uninit state.
 263 */
 264static void
 265bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
 266{
 267	bfa_trc(ioc, event);
 268
 269	switch (event) {
 270	case IOC_E_RESET:
 271		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 272		break;
 273
 274	default:
 275		bfa_sm_fault(ioc, event);
 276	}
 277}
 278/*
 279 * Reset entry actions -- initialize state machine
 280 */
 281static void
 282bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
 283{
 284	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 285}
 286
 287/*
 288 * IOC is in reset state.
 289 */
 290static void
 291bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
 292{
 293	bfa_trc(ioc, event);
 294
 295	switch (event) {
 296	case IOC_E_ENABLE:
 297		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 298		break;
 299
 300	case IOC_E_DISABLE:
 301		bfa_ioc_disable_comp(ioc);
 302		break;
 303
 304	case IOC_E_DETACH:
 305		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 306		break;
 307
 308	default:
 309		bfa_sm_fault(ioc, event);
 310	}
 311}
 312
 313
 314static void
 315bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
 316{
 317	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
 318}
 319
 320/*
 321 * Host IOC function is being enabled, awaiting response from firmware.
 322 * Semaphore is acquired.
 323 */
 324static void
 325bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 326{
 327	bfa_trc(ioc, event);
 328
 329	switch (event) {
 330	case IOC_E_ENABLED:
 331		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 332		break;
 333
 334	case IOC_E_PFFAILED:
 335		/* !!! fall through !!! */
 336	case IOC_E_HWERROR:
 337		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 338		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 339		if (event != IOC_E_PFFAILED)
 340			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 341		break;
 342
 343	case IOC_E_HWFAILED:
 344		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 345		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 346		break;
 347
 348	case IOC_E_DISABLE:
 349		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 350		break;
 351
 352	case IOC_E_DETACH:
 353		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 354		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 355		break;
 356
 357	case IOC_E_ENABLE:
 358		break;
 359
 360	default:
 361		bfa_sm_fault(ioc, event);
 362	}
 363}
 364
 365
 366static void
 367bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
 368{
 369	bfa_ioc_timer_start(ioc);
 370	bfa_ioc_send_getattr(ioc);
 371}
 372
 373/*
 374 * IOC configuration in progress. Timer is active.
 375 */
 376static void
 377bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
 378{
 379	bfa_trc(ioc, event);
 380
 381	switch (event) {
 382	case IOC_E_FWRSP_GETATTR:
 383		bfa_ioc_timer_stop(ioc);
 384		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 385		break;
 386
 387	case IOC_E_PFFAILED:
 388	case IOC_E_HWERROR:
 389		bfa_ioc_timer_stop(ioc);
 390		fallthrough;
 391	case IOC_E_TIMEOUT:
 392		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 393		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 394		if (event != IOC_E_PFFAILED)
 395			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
 396		break;
 397
 398	case IOC_E_DISABLE:
 399		bfa_ioc_timer_stop(ioc);
 400		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 401		break;
 402
 403	case IOC_E_ENABLE:
 404		break;
 405
 406	default:
 407		bfa_sm_fault(ioc, event);
 408	}
 409}
 410
 411static void
 412bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 413{
 414	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 415
 416	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 417	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
 418	bfa_ioc_hb_monitor(ioc);
 419	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
 420	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 421}
 422
 423static void
 424bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
 425{
 426	bfa_trc(ioc, event);
 427
 428	switch (event) {
 429	case IOC_E_ENABLE:
 430		break;
 431
 432	case IOC_E_DISABLE:
 433		bfa_hb_timer_stop(ioc);
 434		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 435		break;
 436
 437	case IOC_E_PFFAILED:
 438	case IOC_E_HWERROR:
 439		bfa_hb_timer_stop(ioc);
 440		fallthrough;
 441	case IOC_E_HBFAIL:
 442		if (ioc->iocpf.auto_recover)
 443			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
 444		else
 445			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 446
 447		bfa_ioc_fail_notify(ioc);
 448
 449		if (event != IOC_E_PFFAILED)
 450			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 451		break;
 452
 453	default:
 454		bfa_sm_fault(ioc, event);
 455	}
 456}
 457
 458
 459static void
 460bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 461{
 462	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 463	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
 464	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
 465	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
 466}
 467
 468/*
 469 * IOC is being disabled
 470 */
 471static void
 472bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 473{
 474	bfa_trc(ioc, event);
 475
 476	switch (event) {
 477	case IOC_E_DISABLED:
 478		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 479		break;
 480
 481	case IOC_E_HWERROR:
 482		/*
 483		 * No state change.  Will move to disabled state
 484		 * after iocpf sm completes failure processing and
 485		 * moves to disabled state.
 486		 */
 487		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 488		break;
 489
 490	case IOC_E_HWFAILED:
 491		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 492		bfa_ioc_disable_comp(ioc);
 493		break;
 494
 495	default:
 496		bfa_sm_fault(ioc, event);
 497	}
 498}
 499
 500/*
 501 * IOC disable completion entry.
 502 */
 503static void
 504bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
 505{
 506	bfa_ioc_disable_comp(ioc);
 507}
 508
 509static void
 510bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
 511{
 512	bfa_trc(ioc, event);
 513
 514	switch (event) {
 515	case IOC_E_ENABLE:
 516		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 517		break;
 518
 519	case IOC_E_DISABLE:
 520		ioc->cbfn->disable_cbfn(ioc->bfa);
 521		break;
 522
 523	case IOC_E_DETACH:
 524		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 525		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 526		break;
 527
 528	default:
 529		bfa_sm_fault(ioc, event);
 530	}
 531}
 532
 533
 534static void
 535bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
 536{
 537	bfa_trc(ioc, 0);
 538}
 539
 540/*
 541 * Hardware initialization retry.
 542 */
 543static void
 544bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
 545{
 546	bfa_trc(ioc, event);
 547
 548	switch (event) {
 549	case IOC_E_ENABLED:
 550		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 551		break;
 552
 553	case IOC_E_PFFAILED:
 554	case IOC_E_HWERROR:
 555		/*
 556		 * Initialization retry failed.
 557		 */
 558		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 559		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 560		if (event != IOC_E_PFFAILED)
 561			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 562		break;
 563
 564	case IOC_E_HWFAILED:
 565		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 566		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 567		break;
 568
 569	case IOC_E_ENABLE:
 570		break;
 571
 572	case IOC_E_DISABLE:
 573		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 574		break;
 575
 576	case IOC_E_DETACH:
 577		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 578		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 579		break;
 580
 581	default:
 582		bfa_sm_fault(ioc, event);
 583	}
 584}
 585
 586
 587static void
 588bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
 589{
 590	bfa_trc(ioc, 0);
 591}
 592
 593/*
 594 * IOC failure.
 595 */
 596static void
 597bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 598{
 599	bfa_trc(ioc, event);
 600
 601	switch (event) {
 602
 603	case IOC_E_ENABLE:
 604		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 605		break;
 606
 607	case IOC_E_DISABLE:
 608		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 609		break;
 610
 611	case IOC_E_DETACH:
 612		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 613		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 614		break;
 615
 616	case IOC_E_HWERROR:
 617	case IOC_E_HWFAILED:
 618		/*
 619		 * HB failure / HW error notification, ignore.
 620		 */
 621		break;
 622	default:
 623		bfa_sm_fault(ioc, event);
 624	}
 625}
 626
 627static void
 628bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
 629{
 630	bfa_trc(ioc, 0);
 631}
 632
 633static void
 634bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
 635{
 636	bfa_trc(ioc, event);
 637
 638	switch (event) {
 639	case IOC_E_ENABLE:
 640		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 641		break;
 642
 643	case IOC_E_DISABLE:
 644		ioc->cbfn->disable_cbfn(ioc->bfa);
 645		break;
 646
 647	case IOC_E_DETACH:
 648		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 649		break;
 650
 651	case IOC_E_HWERROR:
 652		/* Ignore - already in hwfail state */
 653		break;
 654
 655	default:
 656		bfa_sm_fault(ioc, event);
 657	}
 658}
 659
 660/*
 661 * IOCPF State Machine
 662 */
 663
 664/*
 665 * Reset entry actions -- initialize state machine
 666 */
 667static void
 668bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
 669{
 670	iocpf->fw_mismatch_notified = BFA_FALSE;
 671	iocpf->auto_recover = bfa_auto_recover;
 672}
 673
 674/*
 675 * Beginning state. IOC is in reset state.
 676 */
 677static void
 678bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 679{
 680	struct bfa_ioc_s *ioc = iocpf->ioc;
 681
 682	bfa_trc(ioc, event);
 683
 684	switch (event) {
 685	case IOCPF_E_ENABLE:
 686		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 687		break;
 688
 689	case IOCPF_E_STOP:
 690		break;
 691
 692	default:
 693		bfa_sm_fault(ioc, event);
 694	}
 695}
 696
 697/*
 698 * Semaphore should be acquired for version check.
 699 */
 700static void
 701bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 702{
 703	struct bfi_ioc_image_hdr_s	fwhdr;
 704	u32	r32, fwstate, pgnum, loff = 0;
 705	int	i;
 706
 707	/*
 708	 * Spin on init semaphore to serialize.
 709	 */
 710	r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 711	while (r32 & 0x1) {
 712		udelay(20);
 713		r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 714	}
 715
 716	/* h/w sem init */
 717	fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
 718	if (fwstate == BFI_IOC_UNINIT) {
 719		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 720		goto sem_get;
 721	}
 722
 723	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
 724
 725	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
 726		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 727		goto sem_get;
 728	}
 729
 730	/*
 731	 * Clear fwver hdr
 732	 */
 733	pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
 734	writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
 735
 736	for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
 737		bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
 738		loff += sizeof(u32);
 739	}
 740
 741	bfa_trc(iocpf->ioc, fwstate);
 742	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
 743	bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 744	bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 745
 746	/*
 747	 * Unlock the hw semaphore. Should be here only once per boot.
 748	 */
 749	bfa_ioc_ownership_reset(iocpf->ioc);
 750
 751	/*
 752	 * unlock init semaphore.
 753	 */
 754	writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 755
 756sem_get:
 757	bfa_ioc_hw_sem_get(iocpf->ioc);
 758}
 759
 760/*
 761 * Awaiting h/w semaphore to continue with version check.
 762 */
 763static void
 764bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 765{
 766	struct bfa_ioc_s *ioc = iocpf->ioc;
 767
 768	bfa_trc(ioc, event);
 769
 770	switch (event) {
 771	case IOCPF_E_SEMLOCKED:
 772		if (bfa_ioc_firmware_lock(ioc)) {
 773			if (bfa_ioc_sync_start(ioc)) {
 774				bfa_ioc_sync_join(ioc);
 775				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 776			} else {
 777				bfa_ioc_firmware_unlock(ioc);
 778				writel(1, ioc->ioc_regs.ioc_sem_reg);
 779				bfa_sem_timer_start(ioc);
 780			}
 781		} else {
 782			writel(1, ioc->ioc_regs.ioc_sem_reg);
 783			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
 784		}
 785		break;
 786
 787	case IOCPF_E_SEM_ERROR:
 788		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 789		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 790		break;
 791
 792	case IOCPF_E_DISABLE:
 793		bfa_sem_timer_stop(ioc);
 794		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 795		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 796		break;
 797
 798	case IOCPF_E_STOP:
 799		bfa_sem_timer_stop(ioc);
 800		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 801		break;
 802
 803	default:
 804		bfa_sm_fault(ioc, event);
 805	}
 806}
 807
 808/*
 809 * Notify enable completion callback.
 810 */
 811static void
 812bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
 813{
 814	/*
 815	 * Call only the first time sm enters fwmismatch state.
 816	 */
 817	if (iocpf->fw_mismatch_notified == BFA_FALSE)
 818		bfa_ioc_pf_fwmismatch(iocpf->ioc);
 819
 820	iocpf->fw_mismatch_notified = BFA_TRUE;
 821	bfa_iocpf_timer_start(iocpf->ioc);
 822}
 823
 824/*
 825 * Awaiting firmware version match.
 826 */
 827static void
 828bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 829{
 830	struct bfa_ioc_s *ioc = iocpf->ioc;
 831
 832	bfa_trc(ioc, event);
 833
 834	switch (event) {
 835	case IOCPF_E_TIMEOUT:
 836		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 837		break;
 838
 839	case IOCPF_E_DISABLE:
 840		bfa_iocpf_timer_stop(ioc);
 841		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 842		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 843		break;
 844
 845	case IOCPF_E_STOP:
 846		bfa_iocpf_timer_stop(ioc);
 847		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 848		break;
 849
 850	default:
 851		bfa_sm_fault(ioc, event);
 852	}
 853}
 854
 855/*
 856 * Request for semaphore.
 857 */
 858static void
 859bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
 860{
 861	bfa_ioc_hw_sem_get(iocpf->ioc);
 862}
 863
 864/*
 865 * Awaiting semaphore for h/w initialzation.
 866 */
 867static void
 868bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 869{
 870	struct bfa_ioc_s *ioc = iocpf->ioc;
 871
 872	bfa_trc(ioc, event);
 873
 874	switch (event) {
 875	case IOCPF_E_SEMLOCKED:
 876		if (bfa_ioc_sync_complete(ioc)) {
 877			bfa_ioc_sync_join(ioc);
 878			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 879		} else {
 880			writel(1, ioc->ioc_regs.ioc_sem_reg);
 881			bfa_sem_timer_start(ioc);
 882		}
 883		break;
 884
 885	case IOCPF_E_SEM_ERROR:
 886		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 887		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 888		break;
 889
 890	case IOCPF_E_DISABLE:
 891		bfa_sem_timer_stop(ioc);
 892		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 893		break;
 894
 895	default:
 896		bfa_sm_fault(ioc, event);
 897	}
 898}
 899
 900static void
 901bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
 902{
 903	iocpf->poll_time = 0;
 904	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
 905}
 906
 907/*
 908 * Hardware is being initialized. Interrupts are enabled.
 909 * Holding hardware semaphore lock.
 910 */
 911static void
 912bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 913{
 914	struct bfa_ioc_s *ioc = iocpf->ioc;
 915
 916	bfa_trc(ioc, event);
 917
 918	switch (event) {
 919	case IOCPF_E_FWREADY:
 920		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
 921		break;
 922
 923	case IOCPF_E_TIMEOUT:
 924		writel(1, ioc->ioc_regs.ioc_sem_reg);
 925		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 926		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 927		break;
 928
 929	case IOCPF_E_DISABLE:
 930		bfa_iocpf_timer_stop(ioc);
 931		bfa_ioc_sync_leave(ioc);
 932		writel(1, ioc->ioc_regs.ioc_sem_reg);
 933		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 934		break;
 935
 936	default:
 937		bfa_sm_fault(ioc, event);
 938	}
 939}
 940
 941static void
 942bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
 943{
 944	bfa_iocpf_timer_start(iocpf->ioc);
 945	/*
 946	 * Enable Interrupts before sending fw IOC ENABLE cmd.
 947	 */
 948	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
 949	bfa_ioc_send_enable(iocpf->ioc);
 950}
 951
 952/*
 953 * Host IOC function is being enabled, awaiting response from firmware.
 954 * Semaphore is acquired.
 955 */
 956static void
 957bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 958{
 959	struct bfa_ioc_s *ioc = iocpf->ioc;
 960
 961	bfa_trc(ioc, event);
 962
 963	switch (event) {
 964	case IOCPF_E_FWRSP_ENABLE:
 965		bfa_iocpf_timer_stop(ioc);
 966		writel(1, ioc->ioc_regs.ioc_sem_reg);
 967		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
 968		break;
 969
 970	case IOCPF_E_INITFAIL:
 971		bfa_iocpf_timer_stop(ioc);
 972		fallthrough;
 973
 974	case IOCPF_E_TIMEOUT:
 975		writel(1, ioc->ioc_regs.ioc_sem_reg);
 976		if (event == IOCPF_E_TIMEOUT)
 977			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 978		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 979		break;
 980
 981	case IOCPF_E_DISABLE:
 982		bfa_iocpf_timer_stop(ioc);
 983		writel(1, ioc->ioc_regs.ioc_sem_reg);
 984		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
 985		break;
 986
 987	default:
 988		bfa_sm_fault(ioc, event);
 989	}
 990}
 991
 992static void
 993bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
 994{
 995	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
 996}
 997
 998static void
 999bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1000{
1001	struct bfa_ioc_s *ioc = iocpf->ioc;
1002
1003	bfa_trc(ioc, event);
1004
1005	switch (event) {
1006	case IOCPF_E_DISABLE:
1007		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1008		break;
1009
1010	case IOCPF_E_GETATTRFAIL:
1011		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1012		break;
1013
1014	case IOCPF_E_FAIL:
1015		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1016		break;
1017
1018	default:
1019		bfa_sm_fault(ioc, event);
1020	}
1021}
1022
1023static void
1024bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1025{
1026	bfa_iocpf_timer_start(iocpf->ioc);
1027	bfa_ioc_send_disable(iocpf->ioc);
1028}
1029
1030/*
1031 * IOC is being disabled
1032 */
1033static void
1034bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1035{
1036	struct bfa_ioc_s *ioc = iocpf->ioc;
1037
1038	bfa_trc(ioc, event);
1039
1040	switch (event) {
1041	case IOCPF_E_FWRSP_DISABLE:
1042		bfa_iocpf_timer_stop(ioc);
1043		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1044		break;
1045
1046	case IOCPF_E_FAIL:
1047		bfa_iocpf_timer_stop(ioc);
1048		fallthrough;
1049
1050	case IOCPF_E_TIMEOUT:
1051		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1052		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1053		break;
1054
1055	case IOCPF_E_FWRSP_ENABLE:
1056		break;
1057
1058	default:
1059		bfa_sm_fault(ioc, event);
1060	}
1061}
1062
1063static void
1064bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1065{
1066	bfa_ioc_hw_sem_get(iocpf->ioc);
1067}
1068
1069/*
1070 * IOC hb ack request is being removed.
1071 */
1072static void
1073bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1074{
1075	struct bfa_ioc_s *ioc = iocpf->ioc;
1076
1077	bfa_trc(ioc, event);
1078
1079	switch (event) {
1080	case IOCPF_E_SEMLOCKED:
1081		bfa_ioc_sync_leave(ioc);
1082		writel(1, ioc->ioc_regs.ioc_sem_reg);
1083		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1084		break;
1085
1086	case IOCPF_E_SEM_ERROR:
1087		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1088		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1089		break;
1090
1091	case IOCPF_E_FAIL:
1092		break;
1093
1094	default:
1095		bfa_sm_fault(ioc, event);
1096	}
1097}
1098
1099/*
1100 * IOC disable completion entry.
1101 */
1102static void
1103bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1104{
1105	bfa_ioc_mbox_flush(iocpf->ioc);
1106	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1107}
1108
1109static void
1110bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1111{
1112	struct bfa_ioc_s *ioc = iocpf->ioc;
1113
1114	bfa_trc(ioc, event);
1115
1116	switch (event) {
1117	case IOCPF_E_ENABLE:
1118		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1119		break;
1120
1121	case IOCPF_E_STOP:
1122		bfa_ioc_firmware_unlock(ioc);
1123		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1124		break;
1125
1126	default:
1127		bfa_sm_fault(ioc, event);
1128	}
1129}
1130
1131static void
1132bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1133{
1134	bfa_ioc_debug_save_ftrc(iocpf->ioc);
1135	bfa_ioc_hw_sem_get(iocpf->ioc);
1136}
1137
1138/*
1139 * Hardware initialization failed.
1140 */
1141static void
1142bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1143{
1144	struct bfa_ioc_s *ioc = iocpf->ioc;
1145
1146	bfa_trc(ioc, event);
1147
1148	switch (event) {
1149	case IOCPF_E_SEMLOCKED:
1150		bfa_ioc_notify_fail(ioc);
1151		bfa_ioc_sync_leave(ioc);
1152		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1153		writel(1, ioc->ioc_regs.ioc_sem_reg);
1154		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1155		break;
1156
1157	case IOCPF_E_SEM_ERROR:
1158		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1159		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1160		break;
1161
1162	case IOCPF_E_DISABLE:
1163		bfa_sem_timer_stop(ioc);
1164		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1165		break;
1166
1167	case IOCPF_E_STOP:
1168		bfa_sem_timer_stop(ioc);
1169		bfa_ioc_firmware_unlock(ioc);
1170		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1171		break;
1172
1173	case IOCPF_E_FAIL:
1174		break;
1175
1176	default:
1177		bfa_sm_fault(ioc, event);
1178	}
1179}
1180
1181static void
1182bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1183{
1184	bfa_trc(iocpf->ioc, 0);
1185}
1186
1187/*
1188 * Hardware initialization failed.
1189 */
1190static void
1191bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1192{
1193	struct bfa_ioc_s *ioc = iocpf->ioc;
1194
1195	bfa_trc(ioc, event);
1196
1197	switch (event) {
1198	case IOCPF_E_DISABLE:
1199		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1200		break;
1201
1202	case IOCPF_E_STOP:
1203		bfa_ioc_firmware_unlock(ioc);
1204		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1205		break;
1206
1207	default:
1208		bfa_sm_fault(ioc, event);
1209	}
1210}
1211
1212static void
1213bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1214{
1215	/*
1216	 * Mark IOC as failed in hardware and stop firmware.
1217	 */
1218	bfa_ioc_lpu_stop(iocpf->ioc);
1219
1220	/*
1221	 * Flush any queued up mailbox requests.
1222	 */
1223	bfa_ioc_mbox_flush(iocpf->ioc);
1224
1225	bfa_ioc_hw_sem_get(iocpf->ioc);
1226}
1227
1228static void
1229bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1230{
1231	struct bfa_ioc_s *ioc = iocpf->ioc;
1232
1233	bfa_trc(ioc, event);
1234
1235	switch (event) {
1236	case IOCPF_E_SEMLOCKED:
1237		bfa_ioc_sync_ack(ioc);
1238		bfa_ioc_notify_fail(ioc);
1239		if (!iocpf->auto_recover) {
1240			bfa_ioc_sync_leave(ioc);
1241			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1242			writel(1, ioc->ioc_regs.ioc_sem_reg);
1243			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1244		} else {
1245			if (bfa_ioc_sync_complete(ioc))
1246				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1247			else {
1248				writel(1, ioc->ioc_regs.ioc_sem_reg);
1249				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1250			}
1251		}
1252		break;
1253
1254	case IOCPF_E_SEM_ERROR:
1255		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1256		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1257		break;
1258
1259	case IOCPF_E_DISABLE:
1260		bfa_sem_timer_stop(ioc);
1261		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1262		break;
1263
1264	case IOCPF_E_FAIL:
1265		break;
1266
1267	default:
1268		bfa_sm_fault(ioc, event);
1269	}
1270}
1271
1272static void
1273bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1274{
1275	bfa_trc(iocpf->ioc, 0);
1276}
1277
1278/*
1279 * IOC is in failed state.
1280 */
1281static void
1282bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1283{
1284	struct bfa_ioc_s *ioc = iocpf->ioc;
1285
1286	bfa_trc(ioc, event);
1287
1288	switch (event) {
1289	case IOCPF_E_DISABLE:
1290		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1291		break;
1292
1293	default:
1294		bfa_sm_fault(ioc, event);
1295	}
1296}
1297
1298/*
1299 *  BFA IOC private functions
1300 */
1301
1302/*
1303 * Notify common modules registered for notification.
1304 */
1305static void
1306bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1307{
1308	struct bfa_ioc_notify_s	*notify;
1309	struct list_head	*qe;
1310
1311	list_for_each(qe, &ioc->notify_q) {
1312		notify = (struct bfa_ioc_notify_s *)qe;
1313		notify->cbfn(notify->cbarg, event);
1314	}
1315}
1316
1317static void
1318bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1319{
1320	ioc->cbfn->disable_cbfn(ioc->bfa);
1321	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1322}
1323
1324bfa_boolean_t
1325bfa_ioc_sem_get(void __iomem *sem_reg)
1326{
1327	u32 r32;
1328	int cnt = 0;
1329#define BFA_SEM_SPINCNT	3000
1330
1331	r32 = readl(sem_reg);
1332
1333	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1334		cnt++;
1335		udelay(2);
1336		r32 = readl(sem_reg);
1337	}
1338
1339	if (!(r32 & 1))
1340		return BFA_TRUE;
1341
1342	return BFA_FALSE;
1343}
1344
1345static void
1346bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1347{
1348	u32	r32;
1349
1350	/*
1351	 * First read to the semaphore register will return 0, subsequent reads
1352	 * will return 1. Semaphore is released by writing 1 to the register
1353	 */
1354	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1355	if (r32 == ~0) {
1356		WARN_ON(r32 == ~0);
1357		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1358		return;
1359	}
1360	if (!(r32 & 1)) {
1361		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1362		return;
1363	}
1364
1365	bfa_sem_timer_start(ioc);
1366}
1367
1368/*
1369 * Initialize LPU local memory (aka secondary memory / SRAM)
1370 */
1371static void
1372bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1373{
1374	u32	pss_ctl;
1375	int		i;
1376#define PSS_LMEM_INIT_TIME  10000
1377
1378	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1379	pss_ctl &= ~__PSS_LMEM_RESET;
1380	pss_ctl |= __PSS_LMEM_INIT_EN;
1381
1382	/*
1383	 * i2c workaround 12.5khz clock
1384	 */
1385	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1386	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1387
1388	/*
1389	 * wait for memory initialization to be complete
1390	 */
1391	i = 0;
1392	do {
1393		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1394		i++;
1395	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1396
1397	/*
1398	 * If memory initialization is not successful, IOC timeout will catch
1399	 * such failures.
1400	 */
1401	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1402	bfa_trc(ioc, pss_ctl);
1403
1404	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1405	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1406}
1407
1408static void
1409bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1410{
1411	u32	pss_ctl;
1412
1413	/*
1414	 * Take processor out of reset.
1415	 */
1416	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1417	pss_ctl &= ~__PSS_LPU0_RESET;
1418
1419	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1420}
1421
1422static void
1423bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1424{
1425	u32	pss_ctl;
1426
1427	/*
1428	 * Put processors in reset.
1429	 */
1430	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1431	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1432
1433	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1434}
1435
1436/*
1437 * Get driver and firmware versions.
1438 */
1439void
1440bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1441{
1442	u32	pgnum;
1443	u32	loff = 0;
1444	int		i;
1445	u32	*fwsig = (u32 *) fwhdr;
1446
1447	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1448	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1449
1450	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1451	     i++) {
1452		fwsig[i] =
1453			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1454		loff += sizeof(u32);
1455	}
1456}
1457
1458/*
1459 * Returns TRUE if driver is willing to work with current smem f/w version.
1460 */
1461bfa_boolean_t
1462bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1463		struct bfi_ioc_image_hdr_s *smem_fwhdr)
1464{
1465	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1466	enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1467
1468	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1469		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1470
1471	/*
1472	 * If smem is incompatible or old, driver should not work with it.
1473	 */
1474	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1475	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1476		drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1477		return BFA_FALSE;
1478	}
1479
1480	/*
1481	 * IF Flash has a better F/W than smem do not work with smem.
1482	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1483	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1484	 */
1485	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1486
1487	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1488		return BFA_FALSE;
1489	} else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1490		return BFA_TRUE;
1491	} else {
1492		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1493			BFA_TRUE : BFA_FALSE;
1494	}
1495}
1496
1497/*
1498 * Return true if current running version is valid. Firmware signature and
1499 * execution context (driver/bios) must match.
1500 */
1501static bfa_boolean_t
1502bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1503{
1504	struct bfi_ioc_image_hdr_s fwhdr;
1505
1506	bfa_ioc_fwver_get(ioc, &fwhdr);
1507
1508	if (swab32(fwhdr.bootenv) != boot_env) {
1509		bfa_trc(ioc, fwhdr.bootenv);
1510		bfa_trc(ioc, boot_env);
1511		return BFA_FALSE;
1512	}
1513
1514	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1515}
1516
1517static bfa_boolean_t
1518bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1519				struct bfi_ioc_image_hdr_s *fwhdr_2)
1520{
1521	int i;
1522
1523	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1524		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1525			return BFA_FALSE;
1526
1527	return BFA_TRUE;
1528}
1529
1530/*
1531 * Returns TRUE if major minor and maintainence are same.
1532 * If patch versions are same, check for MD5 Checksum to be same.
1533 */
1534static bfa_boolean_t
1535bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1536				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1537{
1538	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1539		return BFA_FALSE;
1540
1541	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1542		return BFA_FALSE;
1543
1544	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1545		return BFA_FALSE;
1546
1547	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1548		return BFA_FALSE;
1549
1550	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1551		drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1552		drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1553		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1554	}
1555
1556	return BFA_TRUE;
1557}
1558
1559static bfa_boolean_t
1560bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1561{
1562	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1563		return BFA_FALSE;
1564
1565	return BFA_TRUE;
1566}
1567
1568static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1569{
1570	if (fwhdr->fwver.phase == 0 &&
1571		fwhdr->fwver.build == 0)
1572		return BFA_TRUE;
1573
1574	return BFA_FALSE;
1575}
1576
1577/*
1578 * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1579 */
1580static enum bfi_ioc_img_ver_cmp_e
1581bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1582				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1583{
1584	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1585		return BFI_IOC_IMG_VER_INCOMP;
1586
1587	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1588		return BFI_IOC_IMG_VER_BETTER;
1589
1590	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1591		return BFI_IOC_IMG_VER_OLD;
1592
1593	/*
1594	 * GA takes priority over internal builds of the same patch stream.
1595	 * At this point major minor maint and patch numbers are same.
1596	 */
1597
1598	if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1599		if (fwhdr_is_ga(fwhdr_to_cmp))
1600			return BFI_IOC_IMG_VER_SAME;
1601		else
1602			return BFI_IOC_IMG_VER_OLD;
1603	} else {
1604		if (fwhdr_is_ga(fwhdr_to_cmp))
1605			return BFI_IOC_IMG_VER_BETTER;
1606	}
1607
1608	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1609		return BFI_IOC_IMG_VER_BETTER;
1610	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1611		return BFI_IOC_IMG_VER_OLD;
1612
1613	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1614		return BFI_IOC_IMG_VER_BETTER;
1615	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1616		return BFI_IOC_IMG_VER_OLD;
1617
1618	/*
1619	 * All Version Numbers are equal.
1620	 * Md5 check to be done as a part of compatibility check.
1621	 */
1622	return BFI_IOC_IMG_VER_SAME;
1623}
1624
1625#define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
1626
1627bfa_status_t
1628bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1629				u32 *fwimg)
1630{
1631	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1632			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1633			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
1634}
1635
1636static enum bfi_ioc_img_ver_cmp_e
1637bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1638			struct bfi_ioc_image_hdr_s *base_fwhdr)
1639{
1640	struct bfi_ioc_image_hdr_s *flash_fwhdr;
1641	bfa_status_t status;
1642	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1643
1644	status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1645	if (status != BFA_STATUS_OK)
1646		return BFI_IOC_IMG_VER_INCOMP;
1647
1648	flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1649	if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1650		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1651	else
1652		return BFI_IOC_IMG_VER_INCOMP;
1653}
1654
1655
1656/*
1657 * Invalidate fwver signature
1658 */
1659bfa_status_t
1660bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1661{
1662
1663	u32	pgnum;
1664	u32	loff = 0;
1665	enum bfi_ioc_state ioc_fwstate;
1666
1667	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1668	if (!bfa_ioc_state_disabled(ioc_fwstate))
1669		return BFA_STATUS_ADAPTER_ENABLED;
1670
1671	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1672	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1673	bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1674
1675	return BFA_STATUS_OK;
1676}
1677
1678/*
1679 * Conditionally flush any pending message from firmware at start.
1680 */
1681static void
1682bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1683{
1684	u32	r32;
1685
1686	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1687	if (r32)
1688		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1689}
1690
1691static void
1692bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1693{
1694	enum bfi_ioc_state ioc_fwstate;
1695	bfa_boolean_t fwvalid;
1696	u32 boot_type;
1697	u32 boot_env;
1698
1699	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1700
1701	if (force)
1702		ioc_fwstate = BFI_IOC_UNINIT;
1703
1704	bfa_trc(ioc, ioc_fwstate);
1705
1706	boot_type = BFI_FWBOOT_TYPE_NORMAL;
1707	boot_env = BFI_FWBOOT_ENV_OS;
1708
1709	/*
1710	 * check if firmware is valid
1711	 */
1712	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1713		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1714
1715	if (!fwvalid) {
1716		if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1717			bfa_ioc_poll_fwinit(ioc);
1718		return;
1719	}
1720
1721	/*
1722	 * If hardware initialization is in progress (initialized by other IOC),
1723	 * just wait for an initialization completion interrupt.
1724	 */
1725	if (ioc_fwstate == BFI_IOC_INITING) {
1726		bfa_ioc_poll_fwinit(ioc);
1727		return;
1728	}
1729
1730	/*
1731	 * If IOC function is disabled and firmware version is same,
1732	 * just re-enable IOC.
1733	 *
1734	 * If option rom, IOC must not be in operational state. With
1735	 * convergence, IOC will be in operational state when 2nd driver
1736	 * is loaded.
1737	 */
1738	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1739
1740		/*
1741		 * When using MSI-X any pending firmware ready event should
1742		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1743		 */
1744		bfa_ioc_msgflush(ioc);
1745		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1746		return;
1747	}
1748
1749	/*
1750	 * Initialize the h/w for any other states.
1751	 */
1752	if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1753		bfa_ioc_poll_fwinit(ioc);
1754}
1755
1756static void
1757bfa_ioc_timeout(void *ioc_arg)
1758{
1759	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1760
1761	bfa_trc(ioc, 0);
1762	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1763}
1764
1765void
1766bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1767{
1768	u32 *msgp = (u32 *) ioc_msg;
1769	u32 i;
1770
1771	bfa_trc(ioc, msgp[0]);
1772	bfa_trc(ioc, len);
1773
1774	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1775
1776	/*
1777	 * first write msg to mailbox registers
1778	 */
1779	for (i = 0; i < len / sizeof(u32); i++)
1780		writel(cpu_to_le32(msgp[i]),
1781			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1782
1783	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1784		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1785
1786	/*
1787	 * write 1 to mailbox CMD to trigger LPU event
1788	 */
1789	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1790	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1791}
1792
1793static void
1794bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1795{
1796	struct bfi_ioc_ctrl_req_s enable_req;
1797
1798	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1799		    bfa_ioc_portid(ioc));
1800	enable_req.clscode = cpu_to_be16(ioc->clscode);
1801	/* unsigned 32-bit time_t overflow in y2106 */
1802	enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1803	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1804}
1805
1806static void
1807bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1808{
1809	struct bfi_ioc_ctrl_req_s disable_req;
1810
1811	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1812		    bfa_ioc_portid(ioc));
1813	disable_req.clscode = cpu_to_be16(ioc->clscode);
1814	/* unsigned 32-bit time_t overflow in y2106 */
1815	disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1816	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1817}
1818
1819static void
1820bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1821{
1822	struct bfi_ioc_getattr_req_s	attr_req;
1823
1824	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1825		    bfa_ioc_portid(ioc));
1826	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1827	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1828}
1829
1830static void
1831bfa_ioc_hb_check(void *cbarg)
1832{
1833	struct bfa_ioc_s  *ioc = cbarg;
1834	u32	hb_count;
1835
1836	hb_count = readl(ioc->ioc_regs.heartbeat);
1837	if (ioc->hb_count == hb_count) {
1838		bfa_ioc_recover(ioc);
1839		return;
1840	} else {
1841		ioc->hb_count = hb_count;
1842	}
1843
1844	bfa_ioc_mbox_poll(ioc);
1845	bfa_hb_timer_start(ioc);
1846}
1847
1848static void
1849bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1850{
1851	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1852	bfa_hb_timer_start(ioc);
1853}
1854
1855/*
1856 *	Initiate a full firmware download.
1857 */
1858static bfa_status_t
1859bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1860		    u32 boot_env)
1861{
1862	u32 *fwimg;
1863	u32 pgnum;
1864	u32 loff = 0;
1865	u32 chunkno = 0;
1866	u32 i;
1867	u32 asicmode;
1868	u32 fwimg_size;
1869	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1870	bfa_status_t status;
1871
1872	if (boot_env == BFI_FWBOOT_ENV_OS &&
1873		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1874		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1875
1876		status = bfa_ioc_flash_img_get_chnk(ioc,
1877			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1878		if (status != BFA_STATUS_OK)
1879			return status;
1880
1881		fwimg = fwimg_buf;
1882	} else {
1883		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1884		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1885					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1886	}
1887
1888	bfa_trc(ioc, fwimg_size);
1889
1890
1891	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1892	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1893
1894	for (i = 0; i < fwimg_size; i++) {
1895
1896		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1897			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1898
1899			if (boot_env == BFI_FWBOOT_ENV_OS &&
1900				boot_type == BFI_FWBOOT_TYPE_FLASH) {
1901				status = bfa_ioc_flash_img_get_chnk(ioc,
1902					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1903					fwimg_buf);
1904				if (status != BFA_STATUS_OK)
1905					return status;
1906
1907				fwimg = fwimg_buf;
1908			} else {
1909				fwimg = bfa_cb_image_get_chunk(
1910					bfa_ioc_asic_gen(ioc),
1911					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1912			}
1913		}
1914
1915		/*
1916		 * write smem
1917		 */
1918		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1919			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1920
1921		loff += sizeof(u32);
1922
1923		/*
1924		 * handle page offset wrap around
1925		 */
1926		loff = PSS_SMEM_PGOFF(loff);
1927		if (loff == 0) {
1928			pgnum++;
1929			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1930		}
1931	}
1932
1933	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1934			ioc->ioc_regs.host_page_num_fn);
1935
1936	/*
1937	 * Set boot type, env and device mode at the end.
1938	 */
1939	if (boot_env == BFI_FWBOOT_ENV_OS &&
1940		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1941		boot_type = BFI_FWBOOT_TYPE_NORMAL;
1942	}
1943	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1944				ioc->port0_mode, ioc->port1_mode);
1945	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1946			swab32(asicmode));
1947	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1948			swab32(boot_type));
1949	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1950			swab32(boot_env));
1951	return BFA_STATUS_OK;
1952}
1953
1954
1955/*
1956 * Update BFA configuration from firmware configuration.
1957 */
1958static void
1959bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1960{
1961	struct bfi_ioc_attr_s	*attr = ioc->attr;
1962
1963	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1964	attr->card_type     = be32_to_cpu(attr->card_type);
1965	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1966	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
1967	attr->mfg_year	= be16_to_cpu(attr->mfg_year);
1968
1969	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1970}
1971
1972/*
1973 * Attach time initialization of mbox logic.
1974 */
1975static void
1976bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1977{
1978	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1979	int	mc;
1980
1981	INIT_LIST_HEAD(&mod->cmd_q);
1982	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1983		mod->mbhdlr[mc].cbfn = NULL;
1984		mod->mbhdlr[mc].cbarg = ioc->bfa;
1985	}
1986}
1987
1988/*
1989 * Mbox poll timer -- restarts any pending mailbox requests.
1990 */
1991static void
1992bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1993{
1994	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1995	struct bfa_mbox_cmd_s		*cmd;
1996	u32			stat;
1997
1998	/*
1999	 * If no command pending, do nothing
2000	 */
2001	if (list_empty(&mod->cmd_q))
2002		return;
2003
2004	/*
2005	 * If previous command is not yet fetched by firmware, do nothing
2006	 */
2007	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2008	if (stat)
2009		return;
2010
2011	/*
2012	 * Enqueue command to firmware.
2013	 */
2014	bfa_q_deq(&mod->cmd_q, &cmd);
2015	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2016}
2017
2018/*
2019 * Cleanup any pending requests.
2020 */
2021static void
2022bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2023{
2024	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2025	struct bfa_mbox_cmd_s		*cmd;
2026
2027	while (!list_empty(&mod->cmd_q))
2028		bfa_q_deq(&mod->cmd_q, &cmd);
2029}
2030
2031/*
2032 * Read data from SMEM to host through PCI memmap
2033 *
2034 * @param[in]	ioc	memory for IOC
2035 * @param[in]	tbuf	app memory to store data from smem
2036 * @param[in]	soff	smem offset
2037 * @param[in]	sz	size of smem in bytes
2038 */
2039static bfa_status_t
2040bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2041{
2042	u32 pgnum, loff;
2043	__be32 r32;
2044	int i, len;
2045	u32 *buf = tbuf;
2046
2047	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2048	loff = PSS_SMEM_PGOFF(soff);
2049	bfa_trc(ioc, pgnum);
2050	bfa_trc(ioc, loff);
2051	bfa_trc(ioc, sz);
2052
2053	/*
2054	 *  Hold semaphore to serialize pll init and fwtrc.
2055	 */
2056	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2057		bfa_trc(ioc, 0);
2058		return BFA_STATUS_FAILED;
2059	}
2060
2061	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2062
2063	len = sz/sizeof(u32);
2064	bfa_trc(ioc, len);
2065	for (i = 0; i < len; i++) {
2066		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2067		buf[i] = swab32(r32);
2068		loff += sizeof(u32);
2069
2070		/*
2071		 * handle page offset wrap around
2072		 */
2073		loff = PSS_SMEM_PGOFF(loff);
2074		if (loff == 0) {
2075			pgnum++;
2076			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2077		}
2078	}
2079	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2080			ioc->ioc_regs.host_page_num_fn);
2081	/*
2082	 *  release semaphore.
2083	 */
2084	readl(ioc->ioc_regs.ioc_init_sem_reg);
2085	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2086
2087	bfa_trc(ioc, pgnum);
2088	return BFA_STATUS_OK;
2089}
2090
2091/*
2092 * Clear SMEM data from host through PCI memmap
2093 *
2094 * @param[in]	ioc	memory for IOC
2095 * @param[in]	soff	smem offset
2096 * @param[in]	sz	size of smem in bytes
2097 */
2098static bfa_status_t
2099bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2100{
2101	int i, len;
2102	u32 pgnum, loff;
2103
2104	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2105	loff = PSS_SMEM_PGOFF(soff);
2106	bfa_trc(ioc, pgnum);
2107	bfa_trc(ioc, loff);
2108	bfa_trc(ioc, sz);
2109
2110	/*
2111	 *  Hold semaphore to serialize pll init and fwtrc.
2112	 */
2113	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2114		bfa_trc(ioc, 0);
2115		return BFA_STATUS_FAILED;
2116	}
2117
2118	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2119
2120	len = sz/sizeof(u32); /* len in words */
2121	bfa_trc(ioc, len);
2122	for (i = 0; i < len; i++) {
2123		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2124		loff += sizeof(u32);
2125
2126		/*
2127		 * handle page offset wrap around
2128		 */
2129		loff = PSS_SMEM_PGOFF(loff);
2130		if (loff == 0) {
2131			pgnum++;
2132			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2133		}
2134	}
2135	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2136			ioc->ioc_regs.host_page_num_fn);
2137
2138	/*
2139	 *  release semaphore.
2140	 */
2141	readl(ioc->ioc_regs.ioc_init_sem_reg);
2142	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2143	bfa_trc(ioc, pgnum);
2144	return BFA_STATUS_OK;
2145}
2146
2147static void
2148bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2149{
2150	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2151
2152	/*
2153	 * Notify driver and common modules registered for notification.
2154	 */
2155	ioc->cbfn->hbfail_cbfn(ioc->bfa);
2156	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2157
2158	bfa_ioc_debug_save_ftrc(ioc);
2159
2160	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2161		"Heart Beat of IOC has failed\n");
2162	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2163
2164}
2165
2166static void
2167bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2168{
2169	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2170	/*
2171	 * Provide enable completion callback.
2172	 */
2173	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2174	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2175		"Running firmware version is incompatible "
2176		"with the driver version\n");
2177	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2178}
2179
2180bfa_status_t
2181bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2182{
2183
2184	/*
2185	 *  Hold semaphore so that nobody can access the chip during init.
2186	 */
2187	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2188
2189	bfa_ioc_pll_init_asic(ioc);
2190
2191	ioc->pllinit = BFA_TRUE;
2192
2193	/*
2194	 * Initialize LMEM
2195	 */
2196	bfa_ioc_lmem_init(ioc);
2197
2198	/*
2199	 *  release semaphore.
2200	 */
2201	readl(ioc->ioc_regs.ioc_init_sem_reg);
2202	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2203
2204	return BFA_STATUS_OK;
2205}
2206
2207/*
2208 * Interface used by diag module to do firmware boot with memory test
2209 * as the entry vector.
2210 */
2211bfa_status_t
2212bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2213{
2214	struct bfi_ioc_image_hdr_s *drv_fwhdr;
2215	bfa_status_t status;
2216	bfa_ioc_stats(ioc, ioc_boots);
2217
2218	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2219		return BFA_STATUS_FAILED;
2220
2221	if (boot_env == BFI_FWBOOT_ENV_OS &&
2222		boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2223
2224		drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2225			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2226
2227		/*
2228		 * Work with Flash iff flash f/w is better than driver f/w.
2229		 * Otherwise push drivers firmware.
2230		 */
2231		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2232						BFI_IOC_IMG_VER_BETTER)
2233			boot_type = BFI_FWBOOT_TYPE_FLASH;
2234	}
2235
2236	/*
2237	 * Initialize IOC state of all functions on a chip reset.
2238	 */
2239	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2240		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2241		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2242	} else {
2243		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2244		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2245	}
2246
2247	bfa_ioc_msgflush(ioc);
2248	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2249	if (status == BFA_STATUS_OK)
2250		bfa_ioc_lpu_start(ioc);
2251	else {
2252		WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2253		bfa_iocpf_timeout(ioc);
2254	}
2255	return status;
2256}
2257
2258/*
2259 * Enable/disable IOC failure auto recovery.
2260 */
2261void
2262bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2263{
2264	bfa_auto_recover = auto_recover;
2265}
2266
2267
2268
2269bfa_boolean_t
2270bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2271{
2272	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2273}
2274
2275bfa_boolean_t
2276bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2277{
2278	u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2279
2280	return ((r32 != BFI_IOC_UNINIT) &&
2281		(r32 != BFI_IOC_INITING) &&
2282		(r32 != BFI_IOC_MEMTEST));
2283}
2284
2285bfa_boolean_t
2286bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2287{
2288	__be32	*msgp = mbmsg;
2289	u32	r32;
2290	int		i;
2291
2292	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2293	if ((r32 & 1) == 0)
2294		return BFA_FALSE;
2295
2296	/*
2297	 * read the MBOX msg
2298	 */
2299	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2300	     i++) {
2301		r32 = readl(ioc->ioc_regs.lpu_mbox +
2302				   i * sizeof(u32));
2303		msgp[i] = cpu_to_be32(r32);
2304	}
2305
2306	/*
2307	 * turn off mailbox interrupt by clearing mailbox status
2308	 */
2309	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2310	readl(ioc->ioc_regs.lpu_mbox_cmd);
2311
2312	return BFA_TRUE;
2313}
2314
2315void
2316bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2317{
2318	union bfi_ioc_i2h_msg_u	*msg;
2319	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2320
2321	msg = (union bfi_ioc_i2h_msg_u *) m;
2322
2323	bfa_ioc_stats(ioc, ioc_isrs);
2324
2325	switch (msg->mh.msg_id) {
2326	case BFI_IOC_I2H_HBEAT:
2327		break;
2328
2329	case BFI_IOC_I2H_ENABLE_REPLY:
2330		ioc->port_mode = ioc->port_mode_cfg =
2331				(enum bfa_mode_s)msg->fw_event.port_mode;
2332		ioc->ad_cap_bm = msg->fw_event.cap_bm;
2333		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2334		break;
2335
2336	case BFI_IOC_I2H_DISABLE_REPLY:
2337		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2338		break;
2339
2340	case BFI_IOC_I2H_GETATTR_REPLY:
2341		bfa_ioc_getattr_reply(ioc);
2342		break;
2343
2344	default:
2345		bfa_trc(ioc, msg->mh.msg_id);
2346		WARN_ON(1);
2347	}
2348}
2349
2350/*
2351 * IOC attach time initialization and setup.
2352 *
2353 * @param[in]	ioc	memory for IOC
2354 * @param[in]	bfa	driver instance structure
2355 */
2356void
2357bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2358	       struct bfa_timer_mod_s *timer_mod)
2359{
2360	ioc->bfa	= bfa;
2361	ioc->cbfn	= cbfn;
2362	ioc->timer_mod	= timer_mod;
2363	ioc->fcmode	= BFA_FALSE;
2364	ioc->pllinit	= BFA_FALSE;
2365	ioc->dbg_fwsave_once = BFA_TRUE;
2366	ioc->iocpf.ioc	= ioc;
2367
2368	bfa_ioc_mbox_attach(ioc);
2369	INIT_LIST_HEAD(&ioc->notify_q);
2370
2371	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2372	bfa_fsm_send_event(ioc, IOC_E_RESET);
2373}
2374
2375/*
2376 * Driver detach time IOC cleanup.
2377 */
2378void
2379bfa_ioc_detach(struct bfa_ioc_s *ioc)
2380{
2381	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2382	INIT_LIST_HEAD(&ioc->notify_q);
2383}
2384
2385/*
2386 * Setup IOC PCI properties.
2387 *
2388 * @param[in]	pcidev	PCI device information for this IOC
2389 */
2390void
2391bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2392		enum bfi_pcifn_class clscode)
2393{
2394	ioc->clscode	= clscode;
2395	ioc->pcidev	= *pcidev;
2396
2397	/*
2398	 * Initialize IOC and device personality
2399	 */
2400	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2401	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2402
2403	switch (pcidev->device_id) {
2404	case BFA_PCI_DEVICE_ID_FC_8G1P:
2405	case BFA_PCI_DEVICE_ID_FC_8G2P:
2406		ioc->asic_gen = BFI_ASIC_GEN_CB;
2407		ioc->fcmode = BFA_TRUE;
2408		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2409		ioc->ad_cap_bm = BFA_CM_HBA;
2410		break;
2411
2412	case BFA_PCI_DEVICE_ID_CT:
2413		ioc->asic_gen = BFI_ASIC_GEN_CT;
2414		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2415		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2416		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2417		ioc->ad_cap_bm = BFA_CM_CNA;
2418		break;
2419
2420	case BFA_PCI_DEVICE_ID_CT_FC:
2421		ioc->asic_gen = BFI_ASIC_GEN_CT;
2422		ioc->fcmode = BFA_TRUE;
2423		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2424		ioc->ad_cap_bm = BFA_CM_HBA;
2425		break;
2426
2427	case BFA_PCI_DEVICE_ID_CT2:
2428	case BFA_PCI_DEVICE_ID_CT2_QUAD:
2429		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2430		if (clscode == BFI_PCIFN_CLASS_FC &&
2431		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2432			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2433			ioc->fcmode = BFA_TRUE;
2434			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2435			ioc->ad_cap_bm = BFA_CM_HBA;
2436		} else {
2437			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2438			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2439			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2440				ioc->port_mode =
2441				ioc->port_mode_cfg = BFA_MODE_CNA;
2442				ioc->ad_cap_bm = BFA_CM_CNA;
2443			} else {
2444				ioc->port_mode =
2445				ioc->port_mode_cfg = BFA_MODE_NIC;
2446				ioc->ad_cap_bm = BFA_CM_NIC;
2447			}
2448		}
2449		break;
2450
2451	default:
2452		WARN_ON(1);
2453	}
2454
2455	/*
2456	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2457	 */
2458	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2459		bfa_ioc_set_cb_hwif(ioc);
2460	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2461		bfa_ioc_set_ct_hwif(ioc);
2462	else {
2463		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2464		bfa_ioc_set_ct2_hwif(ioc);
2465		bfa_ioc_ct2_poweron(ioc);
2466	}
2467
2468	bfa_ioc_map_port(ioc);
2469	bfa_ioc_reg_init(ioc);
2470}
2471
2472/*
2473 * Initialize IOC dma memory
2474 *
2475 * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2476 * @param[in]	dm_pa	physical address of IOC dma memory
2477 */
2478void
2479bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2480{
2481	/*
2482	 * dma memory for firmware attribute
2483	 */
2484	ioc->attr_dma.kva = dm_kva;
2485	ioc->attr_dma.pa = dm_pa;
2486	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2487}
2488
2489void
2490bfa_ioc_enable(struct bfa_ioc_s *ioc)
2491{
2492	bfa_ioc_stats(ioc, ioc_enables);
2493	ioc->dbg_fwsave_once = BFA_TRUE;
2494
2495	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2496}
2497
2498void
2499bfa_ioc_disable(struct bfa_ioc_s *ioc)
2500{
2501	bfa_ioc_stats(ioc, ioc_disables);
2502	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2503}
2504
2505void
2506bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2507{
2508	ioc->dbg_fwsave_once = BFA_TRUE;
2509	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2510}
2511
2512/*
2513 * Initialize memory for saving firmware trace. Driver must initialize
2514 * trace memory before call bfa_ioc_enable().
2515 */
2516void
2517bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2518{
2519	ioc->dbg_fwsave	    = dbg_fwsave;
2520	ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2521}
2522
2523/*
2524 * Register mailbox message handler functions
2525 *
2526 * @param[in]	ioc		IOC instance
2527 * @param[in]	mcfuncs		message class handler functions
2528 */
2529void
2530bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2531{
2532	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2533	int				mc;
2534
2535	for (mc = 0; mc < BFI_MC_MAX; mc++)
2536		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2537}
2538
2539/*
2540 * Register mailbox message handler function, to be called by common modules
2541 */
2542void
2543bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2544		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2545{
2546	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2547
2548	mod->mbhdlr[mc].cbfn	= cbfn;
2549	mod->mbhdlr[mc].cbarg	= cbarg;
2550}
2551
2552/*
2553 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2554 * Responsibility of caller to serialize
2555 *
2556 * @param[in]	ioc	IOC instance
2557 * @param[i]	cmd	Mailbox command
2558 */
2559void
2560bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2561{
2562	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2563	u32			stat;
2564
2565	/*
2566	 * If a previous command is pending, queue new command
2567	 */
2568	if (!list_empty(&mod->cmd_q)) {
2569		list_add_tail(&cmd->qe, &mod->cmd_q);
2570		return;
2571	}
2572
2573	/*
2574	 * If mailbox is busy, queue command for poll timer
2575	 */
2576	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2577	if (stat) {
2578		list_add_tail(&cmd->qe, &mod->cmd_q);
2579		return;
2580	}
2581
2582	/*
2583	 * mailbox is free -- queue command to firmware
2584	 */
2585	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2586}
2587
2588/*
2589 * Handle mailbox interrupts
2590 */
2591void
2592bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2593{
2594	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2595	struct bfi_mbmsg_s		m;
2596	int				mc;
2597
2598	if (bfa_ioc_msgget(ioc, &m)) {
2599		/*
2600		 * Treat IOC message class as special.
2601		 */
2602		mc = m.mh.msg_class;
2603		if (mc == BFI_MC_IOC) {
2604			bfa_ioc_isr(ioc, &m);
2605			return;
2606		}
2607
2608		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2609			return;
2610
2611		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2612	}
2613
2614	bfa_ioc_lpu_read_stat(ioc);
2615
2616	/*
2617	 * Try to send pending mailbox commands
2618	 */
2619	bfa_ioc_mbox_poll(ioc);
2620}
2621
2622void
2623bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2624{
2625	bfa_ioc_stats(ioc, ioc_hbfails);
2626	ioc->stats.hb_count = ioc->hb_count;
2627	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2628}
2629
2630/*
2631 * return true if IOC is disabled
2632 */
2633bfa_boolean_t
2634bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2635{
2636	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2637		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2638}
2639
2640/*
2641 * return true if IOC firmware is different.
2642 */
2643bfa_boolean_t
2644bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2645{
2646	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2647		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2648		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2649}
2650
2651/*
2652 * Check if adapter is disabled -- both IOCs should be in a disabled
2653 * state.
2654 */
2655bfa_boolean_t
2656bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2657{
2658	u32	ioc_state;
2659
2660	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2661		return BFA_FALSE;
2662
2663	ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2664	if (!bfa_ioc_state_disabled(ioc_state))
2665		return BFA_FALSE;
2666
2667	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2668		ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2669		if (!bfa_ioc_state_disabled(ioc_state))
2670			return BFA_FALSE;
2671	}
2672
2673	return BFA_TRUE;
2674}
2675
2676/*
2677 * Reset IOC fwstate registers.
2678 */
2679void
2680bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2681{
2682	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2683	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2684}
2685
2686#define BFA_MFG_NAME "QLogic"
2687void
2688bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2689			 struct bfa_adapter_attr_s *ad_attr)
2690{
2691	struct bfi_ioc_attr_s	*ioc_attr;
2692
2693	ioc_attr = ioc->attr;
2694
2695	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2696	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2697	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2698	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2699	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2700		      sizeof(struct bfa_mfg_vpd_s));
2701
2702	ad_attr->nports = bfa_ioc_get_nports(ioc);
2703	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2704
2705	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2706	/* For now, model descr uses same model string */
2707	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2708
2709	ad_attr->card_type = ioc_attr->card_type;
2710	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2711
2712	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2713		ad_attr->prototype = 1;
2714	else
2715		ad_attr->prototype = 0;
2716
2717	ad_attr->pwwn = ioc->attr->pwwn;
2718	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2719
2720	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2721	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2722	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2723	ad_attr->asic_rev = ioc_attr->asic_rev;
2724
2725	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2726
2727	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2728	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2729				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2730	ad_attr->mfg_day = ioc_attr->mfg_day;
2731	ad_attr->mfg_month = ioc_attr->mfg_month;
2732	ad_attr->mfg_year = ioc_attr->mfg_year;
2733	memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2734}
2735
2736enum bfa_ioc_type_e
2737bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2738{
2739	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2740		return BFA_IOC_TYPE_LL;
2741
2742	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2743
2744	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2745		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2746}
2747
2748void
2749bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2750{
2751	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2752	memcpy((void *)serial_num,
2753			(void *)ioc->attr->brcd_serialnum,
2754			BFA_ADAPTER_SERIAL_NUM_LEN);
2755}
2756
2757void
2758bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2759{
2760	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2761	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2762}
2763
2764void
2765bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2766{
2767	WARN_ON(!chip_rev);
2768
2769	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2770
2771	chip_rev[0] = 'R';
2772	chip_rev[1] = 'e';
2773	chip_rev[2] = 'v';
2774	chip_rev[3] = '-';
2775	chip_rev[4] = ioc->attr->asic_rev;
2776	chip_rev[5] = '\0';
2777}
2778
2779void
2780bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2781{
2782	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2783	memcpy(optrom_ver, ioc->attr->optrom_version,
2784		      BFA_VERSION_LEN);
2785}
2786
2787void
2788bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2789{
2790	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2791	strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2792}
2793
2794void
2795bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2796{
2797	struct bfi_ioc_attr_s	*ioc_attr;
2798	u8 nports = bfa_ioc_get_nports(ioc);
2799
2800	WARN_ON(!model);
2801	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2802
2803	ioc_attr = ioc->attr;
2804
2805	if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2806		(!bfa_mfg_is_mezz(ioc_attr->card_type)))
2807		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2808			BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2809	else
2810		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2811			BFA_MFG_NAME, ioc_attr->card_type);
2812}
2813
2814enum bfa_ioc_state
2815bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2816{
2817	enum bfa_iocpf_state iocpf_st;
2818	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2819
2820	if (ioc_st == BFA_IOC_ENABLING ||
2821		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2822
2823		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2824
2825		switch (iocpf_st) {
2826		case BFA_IOCPF_SEMWAIT:
2827			ioc_st = BFA_IOC_SEMWAIT;
2828			break;
2829
2830		case BFA_IOCPF_HWINIT:
2831			ioc_st = BFA_IOC_HWINIT;
2832			break;
2833
2834		case BFA_IOCPF_FWMISMATCH:
2835			ioc_st = BFA_IOC_FWMISMATCH;
2836			break;
2837
2838		case BFA_IOCPF_FAIL:
2839			ioc_st = BFA_IOC_FAIL;
2840			break;
2841
2842		case BFA_IOCPF_INITFAIL:
2843			ioc_st = BFA_IOC_INITFAIL;
2844			break;
2845
2846		default:
2847			break;
2848		}
2849	}
2850
2851	return ioc_st;
2852}
2853
2854void
2855bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2856{
2857	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2858
2859	ioc_attr->state = bfa_ioc_get_state(ioc);
2860	ioc_attr->port_id = bfa_ioc_portid(ioc);
2861	ioc_attr->port_mode = ioc->port_mode;
2862	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2863	ioc_attr->cap_bm = ioc->ad_cap_bm;
2864
2865	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2866
2867	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2868
2869	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2870	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2871	ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2872	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2873}
2874
2875mac_t
2876bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2877{
2878	/*
2879	 * Check the IOC type and return the appropriate MAC
2880	 */
2881	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2882		return ioc->attr->fcoe_mac;
2883	else
2884		return ioc->attr->mac;
2885}
2886
2887mac_t
2888bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2889{
2890	mac_t	m;
2891
2892	m = ioc->attr->mfg_mac;
2893	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2894		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2895	else
2896		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2897			bfa_ioc_pcifn(ioc));
2898
2899	return m;
2900}
2901
2902/*
2903 * Send AEN notification
2904 */
2905void
2906bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2907{
2908	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2909	struct bfa_aen_entry_s	*aen_entry;
2910	enum bfa_ioc_type_e ioc_type;
2911
2912	bfad_get_aen_entry(bfad, aen_entry);
2913	if (!aen_entry)
2914		return;
2915
2916	ioc_type = bfa_ioc_get_type(ioc);
2917	switch (ioc_type) {
2918	case BFA_IOC_TYPE_FC:
2919		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2920		break;
2921	case BFA_IOC_TYPE_FCoE:
2922		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2923		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2924		break;
2925	case BFA_IOC_TYPE_LL:
2926		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2927		break;
2928	default:
2929		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2930		break;
2931	}
2932
2933	/* Send the AEN notification */
2934	aen_entry->aen_data.ioc.ioc_type = ioc_type;
2935	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2936				  BFA_AEN_CAT_IOC, event);
2937}
2938
2939/*
2940 * Retrieve saved firmware trace from a prior IOC failure.
2941 */
2942bfa_status_t
2943bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2944{
2945	int	tlen;
2946
2947	if (ioc->dbg_fwsave_len == 0)
2948		return BFA_STATUS_ENOFSAVE;
2949
2950	tlen = *trclen;
2951	if (tlen > ioc->dbg_fwsave_len)
2952		tlen = ioc->dbg_fwsave_len;
2953
2954	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2955	*trclen = tlen;
2956	return BFA_STATUS_OK;
2957}
2958
2959
2960/*
2961 * Retrieve saved firmware trace from a prior IOC failure.
2962 */
2963bfa_status_t
2964bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2965{
2966	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2967	int tlen;
2968	bfa_status_t status;
2969
2970	bfa_trc(ioc, *trclen);
2971
2972	tlen = *trclen;
2973	if (tlen > BFA_DBG_FWTRC_LEN)
2974		tlen = BFA_DBG_FWTRC_LEN;
2975
2976	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2977	*trclen = tlen;
2978	return status;
2979}
2980
2981static void
2982bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2983{
2984	struct bfa_mbox_cmd_s cmd;
2985	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2986
2987	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2988		    bfa_ioc_portid(ioc));
2989	req->clscode = cpu_to_be16(ioc->clscode);
2990	bfa_ioc_mbox_queue(ioc, &cmd);
2991}
2992
2993static void
2994bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2995{
2996	u32 fwsync_iter = 1000;
2997
2998	bfa_ioc_send_fwsync(ioc);
2999
3000	/*
3001	 * After sending a fw sync mbox command wait for it to
3002	 * take effect.  We will not wait for a response because
3003	 *    1. fw_sync mbox cmd doesn't have a response.
3004	 *    2. Even if we implement that,  interrupts might not
3005	 *	 be enabled when we call this function.
3006	 * So, just keep checking if any mbox cmd is pending, and
3007	 * after waiting for a reasonable amount of time, go ahead.
3008	 * It is possible that fw has crashed and the mbox command
3009	 * is never acknowledged.
3010	 */
3011	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3012		fwsync_iter--;
3013}
3014
3015/*
3016 * Dump firmware smem
3017 */
3018bfa_status_t
3019bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3020				u32 *offset, int *buflen)
3021{
3022	u32 loff;
3023	int dlen;
3024	bfa_status_t status;
3025	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3026
3027	if (*offset >= smem_len) {
3028		*offset = *buflen = 0;
3029		return BFA_STATUS_EINVAL;
3030	}
3031
3032	loff = *offset;
3033	dlen = *buflen;
3034
3035	/*
3036	 * First smem read, sync smem before proceeding
3037	 * No need to sync before reading every chunk.
3038	 */
3039	if (loff == 0)
3040		bfa_ioc_fwsync(ioc);
3041
3042	if ((loff + dlen) >= smem_len)
3043		dlen = smem_len - loff;
3044
3045	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3046
3047	if (status != BFA_STATUS_OK) {
3048		*offset = *buflen = 0;
3049		return status;
3050	}
3051
3052	*offset += dlen;
3053
3054	if (*offset >= smem_len)
3055		*offset = 0;
3056
3057	*buflen = dlen;
3058
3059	return status;
3060}
3061
3062/*
3063 * Firmware statistics
3064 */
3065bfa_status_t
3066bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3067{
3068	u32 loff = BFI_IOC_FWSTATS_OFF + \
3069		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3070	int tlen;
3071	bfa_status_t status;
3072
3073	if (ioc->stats_busy) {
3074		bfa_trc(ioc, ioc->stats_busy);
3075		return BFA_STATUS_DEVBUSY;
3076	}
3077	ioc->stats_busy = BFA_TRUE;
3078
3079	tlen = sizeof(struct bfa_fw_stats_s);
3080	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3081
3082	ioc->stats_busy = BFA_FALSE;
3083	return status;
3084}
3085
3086bfa_status_t
3087bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3088{
3089	u32 loff = BFI_IOC_FWSTATS_OFF + \
3090		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3091	int tlen;
3092	bfa_status_t status;
3093
3094	if (ioc->stats_busy) {
3095		bfa_trc(ioc, ioc->stats_busy);
3096		return BFA_STATUS_DEVBUSY;
3097	}
3098	ioc->stats_busy = BFA_TRUE;
3099
3100	tlen = sizeof(struct bfa_fw_stats_s);
3101	status = bfa_ioc_smem_clr(ioc, loff, tlen);
3102
3103	ioc->stats_busy = BFA_FALSE;
3104	return status;
3105}
3106
3107/*
3108 * Save firmware trace if configured.
3109 */
3110void
3111bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3112{
3113	int		tlen;
3114
3115	if (ioc->dbg_fwsave_once) {
3116		ioc->dbg_fwsave_once = BFA_FALSE;
3117		if (ioc->dbg_fwsave_len) {
3118			tlen = ioc->dbg_fwsave_len;
3119			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3120		}
3121	}
3122}
3123
3124/*
3125 * Firmware failure detected. Start recovery actions.
3126 */
3127static void
3128bfa_ioc_recover(struct bfa_ioc_s *ioc)
3129{
3130	bfa_ioc_stats(ioc, ioc_hbfails);
3131	ioc->stats.hb_count = ioc->hb_count;
3132	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3133}
3134
3135/*
3136 *  BFA IOC PF private functions
3137 */
3138static void
3139bfa_iocpf_timeout(void *ioc_arg)
3140{
3141	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3142
3143	bfa_trc(ioc, 0);
3144	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3145}
3146
3147static void
3148bfa_iocpf_sem_timeout(void *ioc_arg)
3149{
3150	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3151
3152	bfa_ioc_hw_sem_get(ioc);
3153}
3154
3155static void
3156bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3157{
3158	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3159
3160	bfa_trc(ioc, fwstate);
3161
3162	if (fwstate == BFI_IOC_DISABLED) {
3163		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3164		return;
3165	}
3166
3167	if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3168		bfa_iocpf_timeout(ioc);
3169	else {
3170		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3171		bfa_iocpf_poll_timer_start(ioc);
3172	}
3173}
3174
3175static void
3176bfa_iocpf_poll_timeout(void *ioc_arg)
3177{
3178	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3179
3180	bfa_ioc_poll_fwinit(ioc);
3181}
3182
3183/*
3184 *  bfa timer function
3185 */
3186void
3187bfa_timer_beat(struct bfa_timer_mod_s *mod)
3188{
3189	struct list_head *qh = &mod->timer_q;
3190	struct list_head *qe, *qe_next;
3191	struct bfa_timer_s *elem;
3192	struct list_head timedout_q;
3193
3194	INIT_LIST_HEAD(&timedout_q);
3195
3196	qe = bfa_q_next(qh);
3197
3198	while (qe != qh) {
3199		qe_next = bfa_q_next(qe);
3200
3201		elem = (struct bfa_timer_s *) qe;
3202		if (elem->timeout <= BFA_TIMER_FREQ) {
3203			elem->timeout = 0;
3204			list_del(&elem->qe);
3205			list_add_tail(&elem->qe, &timedout_q);
3206		} else {
3207			elem->timeout -= BFA_TIMER_FREQ;
3208		}
3209
3210		qe = qe_next;	/* go to next elem */
3211	}
3212
3213	/*
3214	 * Pop all the timeout entries
3215	 */
3216	while (!list_empty(&timedout_q)) {
3217		bfa_q_deq(&timedout_q, &elem);
3218		elem->timercb(elem->arg);
3219	}
3220}
3221
3222/*
3223 * Should be called with lock protection
3224 */
3225void
3226bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3227		    void (*timercb) (void *), void *arg, unsigned int timeout)
3228{
3229
3230	WARN_ON(timercb == NULL);
3231	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3232
3233	timer->timeout = timeout;
3234	timer->timercb = timercb;
3235	timer->arg = arg;
3236
3237	list_add_tail(&timer->qe, &mod->timer_q);
3238}
3239
3240/*
3241 * Should be called with lock protection
3242 */
3243void
3244bfa_timer_stop(struct bfa_timer_s *timer)
3245{
3246	WARN_ON(list_empty(&timer->qe));
3247
3248	list_del(&timer->qe);
3249}
3250
3251/*
3252 *	ASIC block related
3253 */
3254static void
3255bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3256{
3257	struct bfa_ablk_cfg_inst_s *cfg_inst;
3258	int i, j;
3259	u16	be16;
3260
3261	for (i = 0; i < BFA_ABLK_MAX; i++) {
3262		cfg_inst = &cfg->inst[i];
3263		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3264			be16 = cfg_inst->pf_cfg[j].pers;
3265			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3266			be16 = cfg_inst->pf_cfg[j].num_qpairs;
3267			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3268			be16 = cfg_inst->pf_cfg[j].num_vectors;
3269			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3270			be16 = cfg_inst->pf_cfg[j].bw_min;
3271			cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3272			be16 = cfg_inst->pf_cfg[j].bw_max;
3273			cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3274		}
3275	}
3276}
3277
3278static void
3279bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3280{
3281	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3282	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3283	bfa_ablk_cbfn_t cbfn;
3284
3285	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3286	bfa_trc(ablk->ioc, msg->mh.msg_id);
3287
3288	switch (msg->mh.msg_id) {
3289	case BFI_ABLK_I2H_QUERY:
3290		if (rsp->status == BFA_STATUS_OK) {
3291			memcpy(ablk->cfg, ablk->dma_addr.kva,
3292				sizeof(struct bfa_ablk_cfg_s));
3293			bfa_ablk_config_swap(ablk->cfg);
3294			ablk->cfg = NULL;
3295		}
3296		break;
3297
3298	case BFI_ABLK_I2H_ADPT_CONFIG:
3299	case BFI_ABLK_I2H_PORT_CONFIG:
3300		/* update config port mode */
3301		ablk->ioc->port_mode_cfg = rsp->port_mode;
3302		break;
3303
3304	case BFI_ABLK_I2H_PF_DELETE:
3305	case BFI_ABLK_I2H_PF_UPDATE:
3306	case BFI_ABLK_I2H_OPTROM_ENABLE:
3307	case BFI_ABLK_I2H_OPTROM_DISABLE:
3308		/* No-op */
3309		break;
3310
3311	case BFI_ABLK_I2H_PF_CREATE:
3312		*(ablk->pcifn) = rsp->pcifn;
3313		ablk->pcifn = NULL;
3314		break;
3315
3316	default:
3317		WARN_ON(1);
3318	}
3319
3320	ablk->busy = BFA_FALSE;
3321	if (ablk->cbfn) {
3322		cbfn = ablk->cbfn;
3323		ablk->cbfn = NULL;
3324		cbfn(ablk->cbarg, rsp->status);
3325	}
3326}
3327
3328static void
3329bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3330{
3331	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3332
3333	bfa_trc(ablk->ioc, event);
3334
3335	switch (event) {
3336	case BFA_IOC_E_ENABLED:
3337		WARN_ON(ablk->busy != BFA_FALSE);
3338		break;
3339
3340	case BFA_IOC_E_DISABLED:
3341	case BFA_IOC_E_FAILED:
3342		/* Fail any pending requests */
3343		ablk->pcifn = NULL;
3344		if (ablk->busy) {
3345			if (ablk->cbfn)
3346				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3347			ablk->cbfn = NULL;
3348			ablk->busy = BFA_FALSE;
3349		}
3350		break;
3351
3352	default:
3353		WARN_ON(1);
3354		break;
3355	}
3356}
3357
3358u32
3359bfa_ablk_meminfo(void)
3360{
3361	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3362}
3363
3364void
3365bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3366{
3367	ablk->dma_addr.kva = dma_kva;
3368	ablk->dma_addr.pa  = dma_pa;
3369}
3370
3371void
3372bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3373{
3374	ablk->ioc = ioc;
3375
3376	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3377	bfa_q_qe_init(&ablk->ioc_notify);
3378	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3379	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3380}
3381
3382bfa_status_t
3383bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3384		bfa_ablk_cbfn_t cbfn, void *cbarg)
3385{
3386	struct bfi_ablk_h2i_query_s *m;
3387
3388	WARN_ON(!ablk_cfg);
3389
3390	if (!bfa_ioc_is_operational(ablk->ioc)) {
3391		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3392		return BFA_STATUS_IOC_FAILURE;
3393	}
3394
3395	if (ablk->busy) {
3396		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3397		return  BFA_STATUS_DEVBUSY;
3398	}
3399
3400	ablk->cfg = ablk_cfg;
3401	ablk->cbfn  = cbfn;
3402	ablk->cbarg = cbarg;
3403	ablk->busy  = BFA_TRUE;
3404
3405	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3406	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3407		    bfa_ioc_portid(ablk->ioc));
3408	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3409	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3410
3411	return BFA_STATUS_OK;
3412}
3413
3414bfa_status_t
3415bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3416		u8 port, enum bfi_pcifn_class personality,
3417		u16 bw_min, u16 bw_max,
3418		bfa_ablk_cbfn_t cbfn, void *cbarg)
3419{
3420	struct bfi_ablk_h2i_pf_req_s *m;
3421
3422	if (!bfa_ioc_is_operational(ablk->ioc)) {
3423		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3424		return BFA_STATUS_IOC_FAILURE;
3425	}
3426
3427	if (ablk->busy) {
3428		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3429		return  BFA_STATUS_DEVBUSY;
3430	}
3431
3432	ablk->pcifn = pcifn;
3433	ablk->cbfn = cbfn;
3434	ablk->cbarg = cbarg;
3435	ablk->busy  = BFA_TRUE;
3436
3437	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3438	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3439		    bfa_ioc_portid(ablk->ioc));
3440	m->pers = cpu_to_be16((u16)personality);
3441	m->bw_min = cpu_to_be16(bw_min);
3442	m->bw_max = cpu_to_be16(bw_max);
3443	m->port = port;
3444	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3445
3446	return BFA_STATUS_OK;
3447}
3448
3449bfa_status_t
3450bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3451		bfa_ablk_cbfn_t cbfn, void *cbarg)
3452{
3453	struct bfi_ablk_h2i_pf_req_s *m;
3454
3455	if (!bfa_ioc_is_operational(ablk->ioc)) {
3456		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3457		return BFA_STATUS_IOC_FAILURE;
3458	}
3459
3460	if (ablk->busy) {
3461		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3462		return  BFA_STATUS_DEVBUSY;
3463	}
3464
3465	ablk->cbfn  = cbfn;
3466	ablk->cbarg = cbarg;
3467	ablk->busy  = BFA_TRUE;
3468
3469	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3470	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3471		    bfa_ioc_portid(ablk->ioc));
3472	m->pcifn = (u8)pcifn;
3473	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3474
3475	return BFA_STATUS_OK;
3476}
3477
3478bfa_status_t
3479bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3480		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3481{
3482	struct bfi_ablk_h2i_cfg_req_s *m;
3483
3484	if (!bfa_ioc_is_operational(ablk->ioc)) {
3485		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3486		return BFA_STATUS_IOC_FAILURE;
3487	}
3488
3489	if (ablk->busy) {
3490		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3491		return  BFA_STATUS_DEVBUSY;
3492	}
3493
3494	ablk->cbfn  = cbfn;
3495	ablk->cbarg = cbarg;
3496	ablk->busy  = BFA_TRUE;
3497
3498	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3499	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3500		    bfa_ioc_portid(ablk->ioc));
3501	m->mode = (u8)mode;
3502	m->max_pf = (u8)max_pf;
3503	m->max_vf = (u8)max_vf;
3504	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3505
3506	return BFA_STATUS_OK;
3507}
3508
3509bfa_status_t
3510bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3511		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3512{
3513	struct bfi_ablk_h2i_cfg_req_s *m;
3514
3515	if (!bfa_ioc_is_operational(ablk->ioc)) {
3516		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3517		return BFA_STATUS_IOC_FAILURE;
3518	}
3519
3520	if (ablk->busy) {
3521		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3522		return  BFA_STATUS_DEVBUSY;
3523	}
3524
3525	ablk->cbfn  = cbfn;
3526	ablk->cbarg = cbarg;
3527	ablk->busy  = BFA_TRUE;
3528
3529	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3530	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3531		bfa_ioc_portid(ablk->ioc));
3532	m->port = (u8)port;
3533	m->mode = (u8)mode;
3534	m->max_pf = (u8)max_pf;
3535	m->max_vf = (u8)max_vf;
3536	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3537
3538	return BFA_STATUS_OK;
3539}
3540
3541bfa_status_t
3542bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3543		   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3544{
3545	struct bfi_ablk_h2i_pf_req_s *m;
3546
3547	if (!bfa_ioc_is_operational(ablk->ioc)) {
3548		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3549		return BFA_STATUS_IOC_FAILURE;
3550	}
3551
3552	if (ablk->busy) {
3553		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3554		return  BFA_STATUS_DEVBUSY;
3555	}
3556
3557	ablk->cbfn  = cbfn;
3558	ablk->cbarg = cbarg;
3559	ablk->busy  = BFA_TRUE;
3560
3561	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3562	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3563		bfa_ioc_portid(ablk->ioc));
3564	m->pcifn = (u8)pcifn;
3565	m->bw_min = cpu_to_be16(bw_min);
3566	m->bw_max = cpu_to_be16(bw_max);
3567	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3568
3569	return BFA_STATUS_OK;
3570}
3571
3572bfa_status_t
3573bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3574{
3575	struct bfi_ablk_h2i_optrom_s *m;
3576
3577	if (!bfa_ioc_is_operational(ablk->ioc)) {
3578		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3579		return BFA_STATUS_IOC_FAILURE;
3580	}
3581
3582	if (ablk->busy) {
3583		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3584		return  BFA_STATUS_DEVBUSY;
3585	}
3586
3587	ablk->cbfn  = cbfn;
3588	ablk->cbarg = cbarg;
3589	ablk->busy  = BFA_TRUE;
3590
3591	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3592	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3593		bfa_ioc_portid(ablk->ioc));
3594	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3595
3596	return BFA_STATUS_OK;
3597}
3598
3599bfa_status_t
3600bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3601{
3602	struct bfi_ablk_h2i_optrom_s *m;
3603
3604	if (!bfa_ioc_is_operational(ablk->ioc)) {
3605		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3606		return BFA_STATUS_IOC_FAILURE;
3607	}
3608
3609	if (ablk->busy) {
3610		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3611		return  BFA_STATUS_DEVBUSY;
3612	}
3613
3614	ablk->cbfn  = cbfn;
3615	ablk->cbarg = cbarg;
3616	ablk->busy  = BFA_TRUE;
3617
3618	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3619	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3620		bfa_ioc_portid(ablk->ioc));
3621	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3622
3623	return BFA_STATUS_OK;
3624}
3625
3626/*
3627 *	SFP module specific
3628 */
3629
3630/* forward declarations */
3631static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3632static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3633static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3634				enum bfa_port_speed portspeed);
3635
3636static void
3637bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3638{
3639	bfa_trc(sfp, sfp->lock);
3640	if (sfp->cbfn)
3641		sfp->cbfn(sfp->cbarg, sfp->status);
3642	sfp->lock = 0;
3643	sfp->cbfn = NULL;
3644}
3645
3646static void
3647bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3648{
3649	bfa_trc(sfp, sfp->portspeed);
3650	if (sfp->media) {
3651		bfa_sfp_media_get(sfp);
3652		if (sfp->state_query_cbfn)
3653			sfp->state_query_cbfn(sfp->state_query_cbarg,
3654					sfp->status);
3655		sfp->media = NULL;
3656	}
3657
3658	if (sfp->portspeed) {
3659		sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3660		if (sfp->state_query_cbfn)
3661			sfp->state_query_cbfn(sfp->state_query_cbarg,
3662					sfp->status);
3663		sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3664	}
3665
3666	sfp->state_query_lock = 0;
3667	sfp->state_query_cbfn = NULL;
3668}
3669
3670/*
3671 *	IOC event handler.
3672 */
3673static void
3674bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3675{
3676	struct bfa_sfp_s *sfp = sfp_arg;
3677
3678	bfa_trc(sfp, event);
3679	bfa_trc(sfp, sfp->lock);
3680	bfa_trc(sfp, sfp->state_query_lock);
3681
3682	switch (event) {
3683	case BFA_IOC_E_DISABLED:
3684	case BFA_IOC_E_FAILED:
3685		if (sfp->lock) {
3686			sfp->status = BFA_STATUS_IOC_FAILURE;
3687			bfa_cb_sfp_show(sfp);
3688		}
3689
3690		if (sfp->state_query_lock) {
3691			sfp->status = BFA_STATUS_IOC_FAILURE;
3692			bfa_cb_sfp_state_query(sfp);
3693		}
3694		break;
3695
3696	default:
3697		break;
3698	}
3699}
3700
3701/*
3702 * SFP's State Change Notification post to AEN
3703 */
3704static void
3705bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3706{
3707	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3708	struct bfa_aen_entry_s  *aen_entry;
3709	enum bfa_port_aen_event aen_evt = 0;
3710
3711	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3712		      ((u64)rsp->event));
3713
3714	bfad_get_aen_entry(bfad, aen_entry);
3715	if (!aen_entry)
3716		return;
3717
3718	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3719	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3720	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3721
3722	switch (rsp->event) {
3723	case BFA_SFP_SCN_INSERTED:
3724		aen_evt = BFA_PORT_AEN_SFP_INSERT;
3725		break;
3726	case BFA_SFP_SCN_REMOVED:
3727		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3728		break;
3729	case BFA_SFP_SCN_FAILED:
3730		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3731		break;
3732	case BFA_SFP_SCN_UNSUPPORT:
3733		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3734		break;
3735	case BFA_SFP_SCN_POM:
3736		aen_evt = BFA_PORT_AEN_SFP_POM;
3737		aen_entry->aen_data.port.level = rsp->pomlvl;
3738		break;
3739	default:
3740		bfa_trc(sfp, rsp->event);
3741		WARN_ON(1);
3742	}
3743
3744	/* Send the AEN notification */
3745	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3746				  BFA_AEN_CAT_PORT, aen_evt);
3747}
3748
3749/*
3750 *	SFP get data send
3751 */
3752static void
3753bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3754{
3755	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3756
3757	bfa_trc(sfp, req->memtype);
3758
3759	/* build host command */
3760	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3761			bfa_ioc_portid(sfp->ioc));
3762
3763	/* send mbox cmd */
3764	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3765}
3766
3767/*
3768 *	SFP is valid, read sfp data
3769 */
3770static void
3771bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3772{
3773	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3774
3775	WARN_ON(sfp->lock != 0);
3776	bfa_trc(sfp, sfp->state);
3777
3778	sfp->lock = 1;
3779	sfp->memtype = memtype;
3780	req->memtype = memtype;
3781
3782	/* Setup SG list */
3783	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3784
3785	bfa_sfp_getdata_send(sfp);
3786}
3787
3788/*
3789 *	SFP scn handler
3790 */
3791static void
3792bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3793{
3794	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3795
3796	switch (rsp->event) {
3797	case BFA_SFP_SCN_INSERTED:
3798		sfp->state = BFA_SFP_STATE_INSERTED;
3799		sfp->data_valid = 0;
3800		bfa_sfp_scn_aen_post(sfp, rsp);
3801		break;
3802	case BFA_SFP_SCN_REMOVED:
3803		sfp->state = BFA_SFP_STATE_REMOVED;
3804		sfp->data_valid = 0;
3805		bfa_sfp_scn_aen_post(sfp, rsp);
3806		break;
3807	case BFA_SFP_SCN_FAILED:
3808		sfp->state = BFA_SFP_STATE_FAILED;
3809		sfp->data_valid = 0;
3810		bfa_sfp_scn_aen_post(sfp, rsp);
3811		break;
3812	case BFA_SFP_SCN_UNSUPPORT:
3813		sfp->state = BFA_SFP_STATE_UNSUPPORT;
3814		bfa_sfp_scn_aen_post(sfp, rsp);
3815		if (!sfp->lock)
3816			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3817		break;
3818	case BFA_SFP_SCN_POM:
3819		bfa_sfp_scn_aen_post(sfp, rsp);
3820		break;
3821	case BFA_SFP_SCN_VALID:
3822		sfp->state = BFA_SFP_STATE_VALID;
3823		if (!sfp->lock)
3824			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3825		break;
3826	default:
3827		bfa_trc(sfp, rsp->event);
3828		WARN_ON(1);
3829	}
3830}
3831
3832/*
3833 * SFP show complete
3834 */
3835static void
3836bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3837{
3838	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3839
3840	if (!sfp->lock) {
3841		/*
3842		 * receiving response after ioc failure
3843		 */
3844		bfa_trc(sfp, sfp->lock);
3845		return;
3846	}
3847
3848	bfa_trc(sfp, rsp->status);
3849	if (rsp->status == BFA_STATUS_OK) {
3850		sfp->data_valid = 1;
3851		if (sfp->state == BFA_SFP_STATE_VALID)
3852			sfp->status = BFA_STATUS_OK;
3853		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3854			sfp->status = BFA_STATUS_SFP_UNSUPP;
3855		else
3856			bfa_trc(sfp, sfp->state);
3857	} else {
3858		sfp->data_valid = 0;
3859		sfp->status = rsp->status;
3860		/* sfpshow shouldn't change sfp state */
3861	}
3862
3863	bfa_trc(sfp, sfp->memtype);
3864	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3865		bfa_trc(sfp, sfp->data_valid);
3866		if (sfp->data_valid) {
3867			u32	size = sizeof(struct sfp_mem_s);
3868			u8 *des = (u8 *)(sfp->sfpmem);
3869			memcpy(des, sfp->dbuf_kva, size);
3870		}
3871		/*
3872		 * Queue completion callback.
3873		 */
3874		bfa_cb_sfp_show(sfp);
3875	} else
3876		sfp->lock = 0;
3877
3878	bfa_trc(sfp, sfp->state_query_lock);
3879	if (sfp->state_query_lock) {
3880		sfp->state = rsp->state;
3881		/* Complete callback */
3882		bfa_cb_sfp_state_query(sfp);
3883	}
3884}
3885
3886/*
3887 *	SFP query fw sfp state
3888 */
3889static void
3890bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3891{
3892	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3893
3894	/* Should not be doing query if not in _INIT state */
3895	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3896	WARN_ON(sfp->state_query_lock != 0);
3897	bfa_trc(sfp, sfp->state);
3898
3899	sfp->state_query_lock = 1;
3900	req->memtype = 0;
3901
3902	if (!sfp->lock)
3903		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3904}
3905
3906static void
3907bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3908{
3909	enum bfa_defs_sfp_media_e *media = sfp->media;
3910
3911	*media = BFA_SFP_MEDIA_UNKNOWN;
3912
3913	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3914		*media = BFA_SFP_MEDIA_UNSUPPORT;
3915	else if (sfp->state == BFA_SFP_STATE_VALID) {
3916		union sfp_xcvr_e10g_code_u e10g;
3917		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3918		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3919				(sfpmem->srlid_base.xcvr[5] >> 1);
3920
3921		e10g.b = sfpmem->srlid_base.xcvr[0];
3922		bfa_trc(sfp, e10g.b);
3923		bfa_trc(sfp, xmtr_tech);
3924		/* check fc transmitter tech */
3925		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3926		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3927		    (xmtr_tech & SFP_XMTR_TECH_CA))
3928			*media = BFA_SFP_MEDIA_CU;
3929		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3930			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3931			*media = BFA_SFP_MEDIA_EL;
3932		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3933			 (xmtr_tech & SFP_XMTR_TECH_LC))
3934			*media = BFA_SFP_MEDIA_LW;
3935		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3936			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3937			 (xmtr_tech & SFP_XMTR_TECH_SA))
3938			*media = BFA_SFP_MEDIA_SW;
3939		/* Check 10G Ethernet Compilance code */
3940		else if (e10g.r.e10g_sr)
3941			*media = BFA_SFP_MEDIA_SW;
3942		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3943			*media = BFA_SFP_MEDIA_LW;
3944		else if (e10g.r.e10g_unall)
3945			*media = BFA_SFP_MEDIA_UNKNOWN;
3946		else
3947			bfa_trc(sfp, 0);
3948	} else
3949		bfa_trc(sfp, sfp->state);
3950}
3951
3952static bfa_status_t
3953bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3954{
3955	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3956	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3957	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3958	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3959
3960	if (portspeed == BFA_PORT_SPEED_10GBPS) {
3961		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3962			return BFA_STATUS_OK;
3963		else {
3964			bfa_trc(sfp, e10g.b);
3965			return BFA_STATUS_UNSUPP_SPEED;
3966		}
3967	}
3968	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3969	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3970	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3971	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3972	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3973		return BFA_STATUS_OK;
3974	else {
3975		bfa_trc(sfp, portspeed);
3976		bfa_trc(sfp, fc3.b);
3977		bfa_trc(sfp, e10g.b);
3978		return BFA_STATUS_UNSUPP_SPEED;
3979	}
3980}
3981
3982/*
3983 *	SFP hmbox handler
3984 */
3985void
3986bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3987{
3988	struct bfa_sfp_s *sfp = sfparg;
3989
3990	switch (msg->mh.msg_id) {
3991	case BFI_SFP_I2H_SHOW:
3992		bfa_sfp_show_comp(sfp, msg);
3993		break;
3994
3995	case BFI_SFP_I2H_SCN:
3996		bfa_sfp_scn(sfp, msg);
3997		break;
3998
3999	default:
4000		bfa_trc(sfp, msg->mh.msg_id);
4001		WARN_ON(1);
4002	}
4003}
4004
4005/*
4006 *	Return DMA memory needed by sfp module.
4007 */
4008u32
4009bfa_sfp_meminfo(void)
4010{
4011	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4012}
4013
4014/*
4015 *	Attach virtual and physical memory for SFP.
4016 */
4017void
4018bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4019		struct bfa_trc_mod_s *trcmod)
4020{
4021	sfp->dev = dev;
4022	sfp->ioc = ioc;
4023	sfp->trcmod = trcmod;
4024
4025	sfp->cbfn = NULL;
4026	sfp->cbarg = NULL;
4027	sfp->sfpmem = NULL;
4028	sfp->lock = 0;
4029	sfp->data_valid = 0;
4030	sfp->state = BFA_SFP_STATE_INIT;
4031	sfp->state_query_lock = 0;
4032	sfp->state_query_cbfn = NULL;
4033	sfp->state_query_cbarg = NULL;
4034	sfp->media = NULL;
4035	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4036	sfp->is_elb = BFA_FALSE;
4037
4038	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4039	bfa_q_qe_init(&sfp->ioc_notify);
4040	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4041	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4042}
4043
4044/*
4045 *	Claim Memory for SFP
4046 */
4047void
4048bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4049{
4050	sfp->dbuf_kva   = dm_kva;
4051	sfp->dbuf_pa    = dm_pa;
4052	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4053
4054	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4055	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4056}
4057
4058/*
4059 * Show SFP eeprom content
4060 *
4061 * @param[in] sfp   - bfa sfp module
4062 *
4063 * @param[out] sfpmem - sfp eeprom data
4064 *
4065 */
4066bfa_status_t
4067bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4068		bfa_cb_sfp_t cbfn, void *cbarg)
4069{
4070
4071	if (!bfa_ioc_is_operational(sfp->ioc)) {
4072		bfa_trc(sfp, 0);
4073		return BFA_STATUS_IOC_NON_OP;
4074	}
4075
4076	if (sfp->lock) {
4077		bfa_trc(sfp, 0);
4078		return BFA_STATUS_DEVBUSY;
4079	}
4080
4081	sfp->cbfn = cbfn;
4082	sfp->cbarg = cbarg;
4083	sfp->sfpmem = sfpmem;
4084
4085	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4086	return BFA_STATUS_OK;
4087}
4088
4089/*
4090 * Return SFP Media type
4091 *
4092 * @param[in] sfp   - bfa sfp module
4093 *
4094 * @param[out] media - port speed from user
4095 *
4096 */
4097bfa_status_t
4098bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4099		bfa_cb_sfp_t cbfn, void *cbarg)
4100{
4101	if (!bfa_ioc_is_operational(sfp->ioc)) {
4102		bfa_trc(sfp, 0);
4103		return BFA_STATUS_IOC_NON_OP;
4104	}
4105
4106	sfp->media = media;
4107	if (sfp->state == BFA_SFP_STATE_INIT) {
4108		if (sfp->state_query_lock) {
4109			bfa_trc(sfp, 0);
4110			return BFA_STATUS_DEVBUSY;
4111		} else {
4112			sfp->state_query_cbfn = cbfn;
4113			sfp->state_query_cbarg = cbarg;
4114			bfa_sfp_state_query(sfp);
4115			return BFA_STATUS_SFP_NOT_READY;
4116		}
4117	}
4118
4119	bfa_sfp_media_get(sfp);
4120	return BFA_STATUS_OK;
4121}
4122
4123/*
4124 * Check if user set port speed is allowed by the SFP
4125 *
4126 * @param[in] sfp   - bfa sfp module
4127 * @param[in] portspeed - port speed from user
4128 *
4129 */
4130bfa_status_t
4131bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4132		bfa_cb_sfp_t cbfn, void *cbarg)
4133{
4134	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4135
4136	if (!bfa_ioc_is_operational(sfp->ioc))
4137		return BFA_STATUS_IOC_NON_OP;
4138
4139	/* For Mezz card, all speed is allowed */
4140	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4141		return BFA_STATUS_OK;
4142
4143	/* Check SFP state */
4144	sfp->portspeed = portspeed;
4145	if (sfp->state == BFA_SFP_STATE_INIT) {
4146		if (sfp->state_query_lock) {
4147			bfa_trc(sfp, 0);
4148			return BFA_STATUS_DEVBUSY;
4149		} else {
4150			sfp->state_query_cbfn = cbfn;
4151			sfp->state_query_cbarg = cbarg;
4152			bfa_sfp_state_query(sfp);
4153			return BFA_STATUS_SFP_NOT_READY;
4154		}
4155	}
4156
4157	if (sfp->state == BFA_SFP_STATE_REMOVED ||
4158	    sfp->state == BFA_SFP_STATE_FAILED) {
4159		bfa_trc(sfp, sfp->state);
4160		return BFA_STATUS_NO_SFP_DEV;
4161	}
4162
4163	if (sfp->state == BFA_SFP_STATE_INSERTED) {
4164		bfa_trc(sfp, sfp->state);
4165		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
4166	}
4167
4168	/* For eloopback, all speed is allowed */
4169	if (sfp->is_elb)
4170		return BFA_STATUS_OK;
4171
4172	return bfa_sfp_speed_valid(sfp, portspeed);
4173}
4174
4175/*
4176 *	Flash module specific
4177 */
4178
4179/*
4180 * FLASH DMA buffer should be big enough to hold both MFG block and
4181 * asic block(64k) at the same time and also should be 2k aligned to
4182 * avoid write segement to cross sector boundary.
4183 */
4184#define BFA_FLASH_SEG_SZ	2048
4185#define BFA_FLASH_DMA_BUF_SZ	\
4186	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4187
4188static void
4189bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4190			int inst, int type)
4191{
4192	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4193	struct bfa_aen_entry_s  *aen_entry;
4194
4195	bfad_get_aen_entry(bfad, aen_entry);
4196	if (!aen_entry)
4197		return;
4198
4199	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4200	aen_entry->aen_data.audit.partition_inst = inst;
4201	aen_entry->aen_data.audit.partition_type = type;
4202
4203	/* Send the AEN notification */
4204	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4205				  BFA_AEN_CAT_AUDIT, event);
4206}
4207
4208static void
4209bfa_flash_cb(struct bfa_flash_s *flash)
4210{
4211	flash->op_busy = 0;
4212	if (flash->cbfn)
4213		flash->cbfn(flash->cbarg, flash->status);
4214}
4215
4216static void
4217bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4218{
4219	struct bfa_flash_s	*flash = cbarg;
4220
4221	bfa_trc(flash, event);
4222	switch (event) {
4223	case BFA_IOC_E_DISABLED:
4224	case BFA_IOC_E_FAILED:
4225		if (flash->op_busy) {
4226			flash->status = BFA_STATUS_IOC_FAILURE;
4227			flash->cbfn(flash->cbarg, flash->status);
4228			flash->op_busy = 0;
4229		}
4230		break;
4231
4232	default:
4233		break;
4234	}
4235}
4236
4237/*
4238 * Send flash attribute query request.
4239 *
4240 * @param[in] cbarg - callback argument
4241 */
4242static void
4243bfa_flash_query_send(void *cbarg)
4244{
4245	struct bfa_flash_s *flash = cbarg;
4246	struct bfi_flash_query_req_s *msg =
4247			(struct bfi_flash_query_req_s *) flash->mb.msg;
4248
4249	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4250		bfa_ioc_portid(flash->ioc));
4251	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4252		flash->dbuf_pa);
4253	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4254}
4255
4256/*
4257 * Send flash write request.
4258 *
4259 * @param[in] cbarg - callback argument
4260 */
4261static void
4262bfa_flash_write_send(struct bfa_flash_s *flash)
4263{
4264	struct bfi_flash_write_req_s *msg =
4265			(struct bfi_flash_write_req_s *) flash->mb.msg;
4266	u32	len;
4267
4268	msg->type = be32_to_cpu(flash->type);
4269	msg->instance = flash->instance;
4270	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4271	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4272		flash->residue : BFA_FLASH_DMA_BUF_SZ;
4273	msg->length = be32_to_cpu(len);
4274
4275	/* indicate if it's the last msg of the whole write operation */
4276	msg->last = (len == flash->residue) ? 1 : 0;
4277
4278	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4279			bfa_ioc_portid(flash->ioc));
4280	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4281	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4282	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4283
4284	flash->residue -= len;
4285	flash->offset += len;
4286}
4287
4288/*
4289 * Send flash read request.
4290 *
4291 * @param[in] cbarg - callback argument
4292 */
4293static void
4294bfa_flash_read_send(void *cbarg)
4295{
4296	struct bfa_flash_s *flash = cbarg;
4297	struct bfi_flash_read_req_s *msg =
4298			(struct bfi_flash_read_req_s *) flash->mb.msg;
4299	u32	len;
4300
4301	msg->type = be32_to_cpu(flash->type);
4302	msg->instance = flash->instance;
4303	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4304	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4305			flash->residue : BFA_FLASH_DMA_BUF_SZ;
4306	msg->length = be32_to_cpu(len);
4307	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4308		bfa_ioc_portid(flash->ioc));
4309	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4310	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4311}
4312
4313/*
4314 * Send flash erase request.
4315 *
4316 * @param[in] cbarg - callback argument
4317 */
4318static void
4319bfa_flash_erase_send(void *cbarg)
4320{
4321	struct bfa_flash_s *flash = cbarg;
4322	struct bfi_flash_erase_req_s *msg =
4323			(struct bfi_flash_erase_req_s *) flash->mb.msg;
4324
4325	msg->type = be32_to_cpu(flash->type);
4326	msg->instance = flash->instance;
4327	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4328			bfa_ioc_portid(flash->ioc));
4329	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4330}
4331
4332/*
4333 * Process flash response messages upon receiving interrupts.
4334 *
4335 * @param[in] flasharg - flash structure
4336 * @param[in] msg - message structure
4337 */
4338static void
4339bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4340{
4341	struct bfa_flash_s *flash = flasharg;
4342	u32	status;
4343
4344	union {
4345		struct bfi_flash_query_rsp_s *query;
4346		struct bfi_flash_erase_rsp_s *erase;
4347		struct bfi_flash_write_rsp_s *write;
4348		struct bfi_flash_read_rsp_s *read;
4349		struct bfi_flash_event_s *event;
4350		struct bfi_mbmsg_s   *msg;
4351	} m;
4352
4353	m.msg = msg;
4354	bfa_trc(flash, msg->mh.msg_id);
4355
4356	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4357		/* receiving response after ioc failure */
4358		bfa_trc(flash, 0x9999);
4359		return;
4360	}
4361
4362	switch (msg->mh.msg_id) {
4363	case BFI_FLASH_I2H_QUERY_RSP:
4364		status = be32_to_cpu(m.query->status);
4365		bfa_trc(flash, status);
4366		if (status == BFA_STATUS_OK) {
4367			u32	i;
4368			struct bfa_flash_attr_s *attr, *f;
4369
4370			attr = (struct bfa_flash_attr_s *) flash->ubuf;
4371			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4372			attr->status = be32_to_cpu(f->status);
4373			attr->npart = be32_to_cpu(f->npart);
4374			bfa_trc(flash, attr->status);
4375			bfa_trc(flash, attr->npart);
4376			for (i = 0; i < attr->npart; i++) {
4377				attr->part[i].part_type =
4378					be32_to_cpu(f->part[i].part_type);
4379				attr->part[i].part_instance =
4380					be32_to_cpu(f->part[i].part_instance);
4381				attr->part[i].part_off =
4382					be32_to_cpu(f->part[i].part_off);
4383				attr->part[i].part_size =
4384					be32_to_cpu(f->part[i].part_size);
4385				attr->part[i].part_len =
4386					be32_to_cpu(f->part[i].part_len);
4387				attr->part[i].part_status =
4388					be32_to_cpu(f->part[i].part_status);
4389			}
4390		}
4391		flash->status = status;
4392		bfa_flash_cb(flash);
4393		break;
4394	case BFI_FLASH_I2H_ERASE_RSP:
4395		status = be32_to_cpu(m.erase->status);
4396		bfa_trc(flash, status);
4397		flash->status = status;
4398		bfa_flash_cb(flash);
4399		break;
4400	case BFI_FLASH_I2H_WRITE_RSP:
4401		status = be32_to_cpu(m.write->status);
4402		bfa_trc(flash, status);
4403		if (status != BFA_STATUS_OK || flash->residue == 0) {
4404			flash->status = status;
4405			bfa_flash_cb(flash);
4406		} else {
4407			bfa_trc(flash, flash->offset);
4408			bfa_flash_write_send(flash);
4409		}
4410		break;
4411	case BFI_FLASH_I2H_READ_RSP:
4412		status = be32_to_cpu(m.read->status);
4413		bfa_trc(flash, status);
4414		if (status != BFA_STATUS_OK) {
4415			flash->status = status;
4416			bfa_flash_cb(flash);
4417		} else {
4418			u32 len = be32_to_cpu(m.read->length);
4419			bfa_trc(flash, flash->offset);
4420			bfa_trc(flash, len);
4421			memcpy(flash->ubuf + flash->offset,
4422				flash->dbuf_kva, len);
4423			flash->residue -= len;
4424			flash->offset += len;
4425			if (flash->residue == 0) {
4426				flash->status = status;
4427				bfa_flash_cb(flash);
4428			} else
4429				bfa_flash_read_send(flash);
4430		}
4431		break;
4432	case BFI_FLASH_I2H_BOOT_VER_RSP:
4433		break;
4434	case BFI_FLASH_I2H_EVENT:
4435		status = be32_to_cpu(m.event->status);
4436		bfa_trc(flash, status);
4437		if (status == BFA_STATUS_BAD_FWCFG)
4438			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4439		else if (status == BFA_STATUS_INVALID_VENDOR) {
4440			u32 param;
4441			param = be32_to_cpu(m.event->param);
4442			bfa_trc(flash, param);
4443			bfa_ioc_aen_post(flash->ioc,
4444				BFA_IOC_AEN_INVALID_VENDOR);
4445		}
4446		break;
4447
4448	default:
4449		WARN_ON(1);
4450	}
4451}
4452
4453/*
4454 * Flash memory info API.
4455 *
4456 * @param[in] mincfg - minimal cfg variable
4457 */
4458u32
4459bfa_flash_meminfo(bfa_boolean_t mincfg)
4460{
4461	/* min driver doesn't need flash */
4462	if (mincfg)
4463		return 0;
4464	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4465}
4466
4467/*
4468 * Flash attach API.
4469 *
4470 * @param[in] flash - flash structure
4471 * @param[in] ioc  - ioc structure
4472 * @param[in] dev  - device structure
4473 * @param[in] trcmod - trace module
4474 * @param[in] logmod - log module
4475 */
4476void
4477bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4478		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4479{
4480	flash->ioc = ioc;
4481	flash->trcmod = trcmod;
4482	flash->cbfn = NULL;
4483	flash->cbarg = NULL;
4484	flash->op_busy = 0;
4485
4486	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4487	bfa_q_qe_init(&flash->ioc_notify);
4488	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4489	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4490
4491	/* min driver doesn't need flash */
4492	if (mincfg) {
4493		flash->dbuf_kva = NULL;
4494		flash->dbuf_pa = 0;
4495	}
4496}
4497
4498/*
4499 * Claim memory for flash
4500 *
4501 * @param[in] flash - flash structure
4502 * @param[in] dm_kva - pointer to virtual memory address
4503 * @param[in] dm_pa - physical memory address
4504 * @param[in] mincfg - minimal cfg variable
4505 */
4506void
4507bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4508		bfa_boolean_t mincfg)
4509{
4510	if (mincfg)
4511		return;
4512
4513	flash->dbuf_kva = dm_kva;
4514	flash->dbuf_pa = dm_pa;
4515	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4516	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4517	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4518}
4519
4520/*
4521 * Get flash attribute.
4522 *
4523 * @param[in] flash - flash structure
4524 * @param[in] attr - flash attribute structure
4525 * @param[in] cbfn - callback function
4526 * @param[in] cbarg - callback argument
4527 *
4528 * Return status.
4529 */
4530bfa_status_t
4531bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4532		bfa_cb_flash_t cbfn, void *cbarg)
4533{
4534	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4535
4536	if (!bfa_ioc_is_operational(flash->ioc))
4537		return BFA_STATUS_IOC_NON_OP;
4538
4539	if (flash->op_busy) {
4540		bfa_trc(flash, flash->op_busy);
4541		return BFA_STATUS_DEVBUSY;
4542	}
4543
4544	flash->op_busy = 1;
4545	flash->cbfn = cbfn;
4546	flash->cbarg = cbarg;
4547	flash->ubuf = (u8 *) attr;
4548	bfa_flash_query_send(flash);
4549
4550	return BFA_STATUS_OK;
4551}
4552
4553/*
4554 * Erase flash partition.
4555 *
4556 * @param[in] flash - flash structure
4557 * @param[in] type - flash partition type
4558 * @param[in] instance - flash partition instance
4559 * @param[in] cbfn - callback function
4560 * @param[in] cbarg - callback argument
4561 *
4562 * Return status.
4563 */
4564bfa_status_t
4565bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4566		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4567{
4568	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4569	bfa_trc(flash, type);
4570	bfa_trc(flash, instance);
4571
4572	if (!bfa_ioc_is_operational(flash->ioc))
4573		return BFA_STATUS_IOC_NON_OP;
4574
4575	if (flash->op_busy) {
4576		bfa_trc(flash, flash->op_busy);
4577		return BFA_STATUS_DEVBUSY;
4578	}
4579
4580	flash->op_busy = 1;
4581	flash->cbfn = cbfn;
4582	flash->cbarg = cbarg;
4583	flash->type = type;
4584	flash->instance = instance;
4585
4586	bfa_flash_erase_send(flash);
4587	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4588				instance, type);
4589	return BFA_STATUS_OK;
4590}
4591
4592/*
4593 * Update flash partition.
4594 *
4595 * @param[in] flash - flash structure
4596 * @param[in] type - flash partition type
4597 * @param[in] instance - flash partition instance
4598 * @param[in] buf - update data buffer
4599 * @param[in] len - data buffer length
4600 * @param[in] offset - offset relative to the partition starting address
4601 * @param[in] cbfn - callback function
4602 * @param[in] cbarg - callback argument
4603 *
4604 * Return status.
4605 */
4606bfa_status_t
4607bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4608		u8 instance, void *buf, u32 len, u32 offset,
4609		bfa_cb_flash_t cbfn, void *cbarg)
4610{
4611	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4612	bfa_trc(flash, type);
4613	bfa_trc(flash, instance);
4614	bfa_trc(flash, len);
4615	bfa_trc(flash, offset);
4616
4617	if (!bfa_ioc_is_operational(flash->ioc))
4618		return BFA_STATUS_IOC_NON_OP;
4619
4620	/*
4621	 * 'len' must be in word (4-byte) boundary
4622	 * 'offset' must be in sector (16kb) boundary
4623	 */
4624	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4625		return BFA_STATUS_FLASH_BAD_LEN;
4626
4627	if (type == BFA_FLASH_PART_MFG)
4628		return BFA_STATUS_EINVAL;
4629
4630	if (flash->op_busy) {
4631		bfa_trc(flash, flash->op_busy);
4632		return BFA_STATUS_DEVBUSY;
4633	}
4634
4635	flash->op_busy = 1;
4636	flash->cbfn = cbfn;
4637	flash->cbarg = cbarg;
4638	flash->type = type;
4639	flash->instance = instance;
4640	flash->residue = len;
4641	flash->offset = 0;
4642	flash->addr_off = offset;
4643	flash->ubuf = buf;
4644
4645	bfa_flash_write_send(flash);
4646	return BFA_STATUS_OK;
4647}
4648
4649/*
4650 * Read flash partition.
4651 *
4652 * @param[in] flash - flash structure
4653 * @param[in] type - flash partition type
4654 * @param[in] instance - flash partition instance
4655 * @param[in] buf - read data buffer
4656 * @param[in] len - data buffer length
4657 * @param[in] offset - offset relative to the partition starting address
4658 * @param[in] cbfn - callback function
4659 * @param[in] cbarg - callback argument
4660 *
4661 * Return status.
4662 */
4663bfa_status_t
4664bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4665		u8 instance, void *buf, u32 len, u32 offset,
4666		bfa_cb_flash_t cbfn, void *cbarg)
4667{
4668	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4669	bfa_trc(flash, type);
4670	bfa_trc(flash, instance);
4671	bfa_trc(flash, len);
4672	bfa_trc(flash, offset);
4673
4674	if (!bfa_ioc_is_operational(flash->ioc))
4675		return BFA_STATUS_IOC_NON_OP;
4676
4677	/*
4678	 * 'len' must be in word (4-byte) boundary
4679	 * 'offset' must be in sector (16kb) boundary
4680	 */
4681	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4682		return BFA_STATUS_FLASH_BAD_LEN;
4683
4684	if (flash->op_busy) {
4685		bfa_trc(flash, flash->op_busy);
4686		return BFA_STATUS_DEVBUSY;
4687	}
4688
4689	flash->op_busy = 1;
4690	flash->cbfn = cbfn;
4691	flash->cbarg = cbarg;
4692	flash->type = type;
4693	flash->instance = instance;
4694	flash->residue = len;
4695	flash->offset = 0;
4696	flash->addr_off = offset;
4697	flash->ubuf = buf;
4698	bfa_flash_read_send(flash);
4699
4700	return BFA_STATUS_OK;
4701}
4702
4703/*
4704 *	DIAG module specific
4705 */
4706
4707#define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
4708#define CT2_BFA_DIAG_MEMTEST_TOV	(9*30*1000)  /* 4.5 min */
4709
4710/* IOC event handler */
4711static void
4712bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4713{
4714	struct bfa_diag_s *diag = diag_arg;
4715
4716	bfa_trc(diag, event);
4717	bfa_trc(diag, diag->block);
4718	bfa_trc(diag, diag->fwping.lock);
4719	bfa_trc(diag, diag->tsensor.lock);
4720
4721	switch (event) {
4722	case BFA_IOC_E_DISABLED:
4723	case BFA_IOC_E_FAILED:
4724		if (diag->fwping.lock) {
4725			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4726			diag->fwping.cbfn(diag->fwping.cbarg,
4727					diag->fwping.status);
4728			diag->fwping.lock = 0;
4729		}
4730
4731		if (diag->tsensor.lock) {
4732			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4733			diag->tsensor.cbfn(diag->tsensor.cbarg,
4734					   diag->tsensor.status);
4735			diag->tsensor.lock = 0;
4736		}
4737
4738		if (diag->block) {
4739			if (diag->timer_active) {
4740				bfa_timer_stop(&diag->timer);
4741				diag->timer_active = 0;
4742			}
4743
4744			diag->status = BFA_STATUS_IOC_FAILURE;
4745			diag->cbfn(diag->cbarg, diag->status);
4746			diag->block = 0;
4747		}
4748		break;
4749
4750	default:
4751		break;
4752	}
4753}
4754
4755static void
4756bfa_diag_memtest_done(void *cbarg)
4757{
4758	struct bfa_diag_s *diag = cbarg;
4759	struct bfa_ioc_s  *ioc = diag->ioc;
4760	struct bfa_diag_memtest_result *res = diag->result;
4761	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
4762	u32	pgnum, i;
4763
4764	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4765	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4766
4767	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4768			 sizeof(u32)); i++) {
4769		/* read test result from smem */
4770		*((u32 *) res + i) =
4771			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4772		loff += sizeof(u32);
4773	}
4774
4775	/* Reset IOC fwstates to BFI_IOC_UNINIT */
4776	bfa_ioc_reset_fwstate(ioc);
4777
4778	res->status = swab32(res->status);
4779	bfa_trc(diag, res->status);
4780
4781	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4782		diag->status = BFA_STATUS_OK;
4783	else {
4784		diag->status = BFA_STATUS_MEMTEST_FAILED;
4785		res->addr = swab32(res->addr);
4786		res->exp = swab32(res->exp);
4787		res->act = swab32(res->act);
4788		res->err_status = swab32(res->err_status);
4789		res->err_status1 = swab32(res->err_status1);
4790		res->err_addr = swab32(res->err_addr);
4791		bfa_trc(diag, res->addr);
4792		bfa_trc(diag, res->exp);
4793		bfa_trc(diag, res->act);
4794		bfa_trc(diag, res->err_status);
4795		bfa_trc(diag, res->err_status1);
4796		bfa_trc(diag, res->err_addr);
4797	}
4798	diag->timer_active = 0;
4799	diag->cbfn(diag->cbarg, diag->status);
4800	diag->block = 0;
4801}
4802
4803/*
4804 * Firmware ping
4805 */
4806
4807/*
4808 * Perform DMA test directly
4809 */
4810static void
4811diag_fwping_send(struct bfa_diag_s *diag)
4812{
4813	struct bfi_diag_fwping_req_s *fwping_req;
4814	u32	i;
4815
4816	bfa_trc(diag, diag->fwping.dbuf_pa);
4817
4818	/* fill DMA area with pattern */
4819	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4820		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4821
4822	/* Fill mbox msg */
4823	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4824
4825	/* Setup SG list */
4826	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4827			diag->fwping.dbuf_pa);
4828	/* Set up dma count */
4829	fwping_req->count = cpu_to_be32(diag->fwping.count);
4830	/* Set up data pattern */
4831	fwping_req->data = diag->fwping.data;
4832
4833	/* build host command */
4834	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4835		bfa_ioc_portid(diag->ioc));
4836
4837	/* send mbox cmd */
4838	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4839}
4840
4841static void
4842diag_fwping_comp(struct bfa_diag_s *diag,
4843		 struct bfi_diag_fwping_rsp_s *diag_rsp)
4844{
4845	u32	rsp_data = diag_rsp->data;
4846	u8	rsp_dma_status = diag_rsp->dma_status;
4847
4848	bfa_trc(diag, rsp_data);
4849	bfa_trc(diag, rsp_dma_status);
4850
4851	if (rsp_dma_status == BFA_STATUS_OK) {
4852		u32	i, pat;
4853		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4854			diag->fwping.data;
4855		/* Check mbox data */
4856		if (diag->fwping.data != rsp_data) {
4857			bfa_trc(diag, rsp_data);
4858			diag->fwping.result->dmastatus =
4859					BFA_STATUS_DATACORRUPTED;
4860			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4861			diag->fwping.cbfn(diag->fwping.cbarg,
4862					diag->fwping.status);
4863			diag->fwping.lock = 0;
4864			return;
4865		}
4866		/* Check dma pattern */
4867		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4868			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4869				bfa_trc(diag, i);
4870				bfa_trc(diag, pat);
4871				bfa_trc(diag,
4872					*((u32 *)diag->fwping.dbuf_kva + i));
4873				diag->fwping.result->dmastatus =
4874						BFA_STATUS_DATACORRUPTED;
4875				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4876				diag->fwping.cbfn(diag->fwping.cbarg,
4877						diag->fwping.status);
4878				diag->fwping.lock = 0;
4879				return;
4880			}
4881		}
4882		diag->fwping.result->dmastatus = BFA_STATUS_OK;
4883		diag->fwping.status = BFA_STATUS_OK;
4884		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4885		diag->fwping.lock = 0;
4886	} else {
4887		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4888		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4889		diag->fwping.lock = 0;
4890	}
4891}
4892
4893/*
4894 * Temperature Sensor
4895 */
4896
4897static void
4898diag_tempsensor_send(struct bfa_diag_s *diag)
4899{
4900	struct bfi_diag_ts_req_s *msg;
4901
4902	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4903	bfa_trc(diag, msg->temp);
4904	/* build host command */
4905	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4906		bfa_ioc_portid(diag->ioc));
4907	/* send mbox cmd */
4908	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4909}
4910
4911static void
4912diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4913{
4914	if (!diag->tsensor.lock) {
4915		/* receiving response after ioc failure */
4916		bfa_trc(diag, diag->tsensor.lock);
4917		return;
4918	}
4919
4920	/*
4921	 * ASIC junction tempsensor is a reg read operation
4922	 * it will always return OK
4923	 */
4924	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4925	diag->tsensor.temp->ts_junc = rsp->ts_junc;
4926	diag->tsensor.temp->ts_brd = rsp->ts_brd;
4927
4928	if (rsp->ts_brd) {
4929		/* tsensor.temp->status is brd_temp status */
4930		diag->tsensor.temp->status = rsp->status;
4931		if (rsp->status == BFA_STATUS_OK) {
4932			diag->tsensor.temp->brd_temp =
4933				be16_to_cpu(rsp->brd_temp);
4934		} else
4935			diag->tsensor.temp->brd_temp = 0;
4936	}
4937
4938	bfa_trc(diag, rsp->status);
4939	bfa_trc(diag, rsp->ts_junc);
4940	bfa_trc(diag, rsp->temp);
4941	bfa_trc(diag, rsp->ts_brd);
4942	bfa_trc(diag, rsp->brd_temp);
4943
4944	/* tsensor status is always good bcos we always have junction temp */
4945	diag->tsensor.status = BFA_STATUS_OK;
4946	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4947	diag->tsensor.lock = 0;
4948}
4949
4950/*
4951 *	LED Test command
4952 */
4953static void
4954diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4955{
4956	struct bfi_diag_ledtest_req_s  *msg;
4957
4958	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4959	/* build host command */
4960	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4961			bfa_ioc_portid(diag->ioc));
4962
4963	/*
4964	 * convert the freq from N blinks per 10 sec to
4965	 * crossbow ontime value. We do it here because division is need
4966	 */
4967	if (ledtest->freq)
4968		ledtest->freq = 500 / ledtest->freq;
4969
4970	if (ledtest->freq == 0)
4971		ledtest->freq = 1;
4972
4973	bfa_trc(diag, ledtest->freq);
4974	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4975	msg->cmd = (u8) ledtest->cmd;
4976	msg->color = (u8) ledtest->color;
4977	msg->portid = bfa_ioc_portid(diag->ioc);
4978	msg->led = ledtest->led;
4979	msg->freq = cpu_to_be16(ledtest->freq);
4980
4981	/* send mbox cmd */
4982	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4983}
4984
4985static void
4986diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4987{
4988	bfa_trc(diag, diag->ledtest.lock);
4989	diag->ledtest.lock = BFA_FALSE;
4990	/* no bfa_cb_queue is needed because driver is not waiting */
4991}
4992
4993/*
4994 * Port beaconing
4995 */
4996static void
4997diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4998{
4999	struct bfi_diag_portbeacon_req_s *msg;
5000
5001	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
5002	/* build host command */
5003	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
5004		bfa_ioc_portid(diag->ioc));
5005	msg->beacon = beacon;
5006	msg->period = cpu_to_be32(sec);
5007	/* send mbox cmd */
5008	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
5009}
5010
5011static void
5012diag_portbeacon_comp(struct bfa_diag_s *diag)
5013{
5014	bfa_trc(diag, diag->beacon.state);
5015	diag->beacon.state = BFA_FALSE;
5016	if (diag->cbfn_beacon)
5017		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
5018}
5019
5020/*
5021 *	Diag hmbox handler
5022 */
5023static void
5024bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5025{
5026	struct bfa_diag_s *diag = diagarg;
5027
5028	switch (msg->mh.msg_id) {
5029	case BFI_DIAG_I2H_PORTBEACON:
5030		diag_portbeacon_comp(diag);
5031		break;
5032	case BFI_DIAG_I2H_FWPING:
5033		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5034		break;
5035	case BFI_DIAG_I2H_TEMPSENSOR:
5036		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5037		break;
5038	case BFI_DIAG_I2H_LEDTEST:
5039		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5040		break;
5041	default:
5042		bfa_trc(diag, msg->mh.msg_id);
5043		WARN_ON(1);
5044	}
5045}
5046
5047/*
5048 * Gen RAM Test
5049 *
5050 *   @param[in] *diag           - diag data struct
5051 *   @param[in] *memtest        - mem test params input from upper layer,
5052 *   @param[in] pattern         - mem test pattern
5053 *   @param[in] *result         - mem test result
5054 *   @param[in] cbfn            - mem test callback functioin
5055 *   @param[in] cbarg           - callback functioin arg
5056 *
5057 *   @param[out]
5058 */
5059bfa_status_t
5060bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5061		u32 pattern, struct bfa_diag_memtest_result *result,
5062		bfa_cb_diag_t cbfn, void *cbarg)
5063{
5064	u32	memtest_tov;
5065
5066	bfa_trc(diag, pattern);
5067
5068	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5069		return BFA_STATUS_ADAPTER_ENABLED;
5070
5071	/* check to see if there is another destructive diag cmd running */
5072	if (diag->block) {
5073		bfa_trc(diag, diag->block);
5074		return BFA_STATUS_DEVBUSY;
5075	} else
5076		diag->block = 1;
5077
5078	diag->result = result;
5079	diag->cbfn = cbfn;
5080	diag->cbarg = cbarg;
5081
5082	/* download memtest code and take LPU0 out of reset */
5083	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5084
5085	memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5086		       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5087	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5088			bfa_diag_memtest_done, diag, memtest_tov);
5089	diag->timer_active = 1;
5090	return BFA_STATUS_OK;
5091}
5092
5093/*
5094 * DIAG firmware ping command
5095 *
5096 *   @param[in] *diag           - diag data struct
5097 *   @param[in] cnt             - dma loop count for testing PCIE
5098 *   @param[in] data            - data pattern to pass in fw
5099 *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
5100 *   @param[in] cbfn            - callback function
5101 *   @param[in] *cbarg          - callback functioin arg
5102 *
5103 *   @param[out]
5104 */
5105bfa_status_t
5106bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5107		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5108		void *cbarg)
5109{
5110	bfa_trc(diag, cnt);
5111	bfa_trc(diag, data);
5112
5113	if (!bfa_ioc_is_operational(diag->ioc))
5114		return BFA_STATUS_IOC_NON_OP;
5115
5116	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5117	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5118		return BFA_STATUS_CMD_NOTSUPP;
5119
5120	/* check to see if there is another destructive diag cmd running */
5121	if (diag->block || diag->fwping.lock) {
5122		bfa_trc(diag, diag->block);
5123		bfa_trc(diag, diag->fwping.lock);
5124		return BFA_STATUS_DEVBUSY;
5125	}
5126
5127	/* Initialization */
5128	diag->fwping.lock = 1;
5129	diag->fwping.cbfn = cbfn;
5130	diag->fwping.cbarg = cbarg;
5131	diag->fwping.result = result;
5132	diag->fwping.data = data;
5133	diag->fwping.count = cnt;
5134
5135	/* Init test results */
5136	diag->fwping.result->data = 0;
5137	diag->fwping.result->status = BFA_STATUS_OK;
5138
5139	/* kick off the first ping */
5140	diag_fwping_send(diag);
5141	return BFA_STATUS_OK;
5142}
5143
5144/*
5145 * Read Temperature Sensor
5146 *
5147 *   @param[in] *diag           - diag data struct
5148 *   @param[in] *result         - pt to bfa_diag_temp_t data struct
5149 *   @param[in] cbfn            - callback function
5150 *   @param[in] *cbarg          - callback functioin arg
5151 *
5152 *   @param[out]
5153 */
5154bfa_status_t
5155bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5156		struct bfa_diag_results_tempsensor_s *result,
5157		bfa_cb_diag_t cbfn, void *cbarg)
5158{
5159	/* check to see if there is a destructive diag cmd running */
5160	if (diag->block || diag->tsensor.lock) {
5161		bfa_trc(diag, diag->block);
5162		bfa_trc(diag, diag->tsensor.lock);
5163		return BFA_STATUS_DEVBUSY;
5164	}
5165
5166	if (!bfa_ioc_is_operational(diag->ioc))
5167		return BFA_STATUS_IOC_NON_OP;
5168
5169	/* Init diag mod params */
5170	diag->tsensor.lock = 1;
5171	diag->tsensor.temp = result;
5172	diag->tsensor.cbfn = cbfn;
5173	diag->tsensor.cbarg = cbarg;
5174	diag->tsensor.status = BFA_STATUS_OK;
5175
5176	/* Send msg to fw */
5177	diag_tempsensor_send(diag);
5178
5179	return BFA_STATUS_OK;
5180}
5181
5182/*
5183 * LED Test command
5184 *
5185 *   @param[in] *diag           - diag data struct
5186 *   @param[in] *ledtest        - pt to ledtest data structure
5187 *
5188 *   @param[out]
5189 */
5190bfa_status_t
5191bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5192{
5193	bfa_trc(diag, ledtest->cmd);
5194
5195	if (!bfa_ioc_is_operational(diag->ioc))
5196		return BFA_STATUS_IOC_NON_OP;
5197
5198	if (diag->beacon.state)
5199		return BFA_STATUS_BEACON_ON;
5200
5201	if (diag->ledtest.lock)
5202		return BFA_STATUS_LEDTEST_OP;
5203
5204	/* Send msg to fw */
5205	diag->ledtest.lock = BFA_TRUE;
5206	diag_ledtest_send(diag, ledtest);
5207
5208	return BFA_STATUS_OK;
5209}
5210
5211/*
5212 * Port beaconing command
5213 *
5214 *   @param[in] *diag           - diag data struct
5215 *   @param[in] beacon          - port beaconing 1:ON   0:OFF
5216 *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
5217 *   @param[in] sec             - beaconing duration in seconds
5218 *
5219 *   @param[out]
5220 */
5221bfa_status_t
5222bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5223		bfa_boolean_t link_e2e_beacon, uint32_t sec)
5224{
5225	bfa_trc(diag, beacon);
5226	bfa_trc(diag, link_e2e_beacon);
5227	bfa_trc(diag, sec);
5228
5229	if (!bfa_ioc_is_operational(diag->ioc))
5230		return BFA_STATUS_IOC_NON_OP;
5231
5232	if (diag->ledtest.lock)
5233		return BFA_STATUS_LEDTEST_OP;
5234
5235	if (diag->beacon.state && beacon)       /* beacon alread on */
5236		return BFA_STATUS_BEACON_ON;
5237
5238	diag->beacon.state	= beacon;
5239	diag->beacon.link_e2e	= link_e2e_beacon;
5240	if (diag->cbfn_beacon)
5241		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5242
5243	/* Send msg to fw */
5244	diag_portbeacon_send(diag, beacon, sec);
5245
5246	return BFA_STATUS_OK;
5247}
5248
5249/*
5250 * Return DMA memory needed by diag module.
5251 */
5252u32
5253bfa_diag_meminfo(void)
5254{
5255	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5256}
5257
5258/*
5259 *	Attach virtual and physical memory for Diag.
5260 */
5261void
5262bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5263	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5264{
5265	diag->dev = dev;
5266	diag->ioc = ioc;
5267	diag->trcmod = trcmod;
5268
5269	diag->block = 0;
5270	diag->cbfn = NULL;
5271	diag->cbarg = NULL;
5272	diag->result = NULL;
5273	diag->cbfn_beacon = cbfn_beacon;
5274
5275	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5276	bfa_q_qe_init(&diag->ioc_notify);
5277	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5278	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5279}
5280
5281void
5282bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5283{
5284	diag->fwping.dbuf_kva = dm_kva;
5285	diag->fwping.dbuf_pa = dm_pa;
5286	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5287}
5288
5289/*
5290 *	PHY module specific
5291 */
5292#define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
5293#define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
5294
5295static void
5296bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5297{
5298	int i, m = sz >> 2;
5299
5300	for (i = 0; i < m; i++)
5301		obuf[i] = be32_to_cpu(ibuf[i]);
5302}
5303
5304static bfa_boolean_t
5305bfa_phy_present(struct bfa_phy_s *phy)
5306{
5307	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5308}
5309
5310static void
5311bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5312{
5313	struct bfa_phy_s *phy = cbarg;
5314
5315	bfa_trc(phy, event);
5316
5317	switch (event) {
5318	case BFA_IOC_E_DISABLED:
5319	case BFA_IOC_E_FAILED:
5320		if (phy->op_busy) {
5321			phy->status = BFA_STATUS_IOC_FAILURE;
5322			phy->cbfn(phy->cbarg, phy->status);
5323			phy->op_busy = 0;
5324		}
5325		break;
5326
5327	default:
5328		break;
5329	}
5330}
5331
5332/*
5333 * Send phy attribute query request.
5334 *
5335 * @param[in] cbarg - callback argument
5336 */
5337static void
5338bfa_phy_query_send(void *cbarg)
5339{
5340	struct bfa_phy_s *phy = cbarg;
5341	struct bfi_phy_query_req_s *msg =
5342			(struct bfi_phy_query_req_s *) phy->mb.msg;
5343
5344	msg->instance = phy->instance;
5345	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5346		bfa_ioc_portid(phy->ioc));
5347	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5348	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5349}
5350
5351/*
5352 * Send phy write request.
5353 *
5354 * @param[in] cbarg - callback argument
5355 */
5356static void
5357bfa_phy_write_send(void *cbarg)
5358{
5359	struct bfa_phy_s *phy = cbarg;
5360	struct bfi_phy_write_req_s *msg =
5361			(struct bfi_phy_write_req_s *) phy->mb.msg;
5362	u32	len;
5363	u16	*buf, *dbuf;
5364	int	i, sz;
5365
5366	msg->instance = phy->instance;
5367	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5368	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5369			phy->residue : BFA_PHY_DMA_BUF_SZ;
5370	msg->length = cpu_to_be32(len);
5371
5372	/* indicate if it's the last msg of the whole write operation */
5373	msg->last = (len == phy->residue) ? 1 : 0;
5374
5375	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5376		bfa_ioc_portid(phy->ioc));
5377	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5378
5379	buf = (u16 *) (phy->ubuf + phy->offset);
5380	dbuf = (u16 *)phy->dbuf_kva;
5381	sz = len >> 1;
5382	for (i = 0; i < sz; i++)
5383		buf[i] = cpu_to_be16(dbuf[i]);
5384
5385	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5386
5387	phy->residue -= len;
5388	phy->offset += len;
5389}
5390
5391/*
5392 * Send phy read request.
5393 *
5394 * @param[in] cbarg - callback argument
5395 */
5396static void
5397bfa_phy_read_send(void *cbarg)
5398{
5399	struct bfa_phy_s *phy = cbarg;
5400	struct bfi_phy_read_req_s *msg =
5401			(struct bfi_phy_read_req_s *) phy->mb.msg;
5402	u32	len;
5403
5404	msg->instance = phy->instance;
5405	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5406	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5407			phy->residue : BFA_PHY_DMA_BUF_SZ;
5408	msg->length = cpu_to_be32(len);
5409	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5410		bfa_ioc_portid(phy->ioc));
5411	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5412	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5413}
5414
5415/*
5416 * Send phy stats request.
5417 *
5418 * @param[in] cbarg - callback argument
5419 */
5420static void
5421bfa_phy_stats_send(void *cbarg)
5422{
5423	struct bfa_phy_s *phy = cbarg;
5424	struct bfi_phy_stats_req_s *msg =
5425			(struct bfi_phy_stats_req_s *) phy->mb.msg;
5426
5427	msg->instance = phy->instance;
5428	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5429		bfa_ioc_portid(phy->ioc));
5430	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5431	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5432}
5433
5434/*
5435 * Flash memory info API.
5436 *
5437 * @param[in] mincfg - minimal cfg variable
5438 */
5439u32
5440bfa_phy_meminfo(bfa_boolean_t mincfg)
5441{
5442	/* min driver doesn't need phy */
5443	if (mincfg)
5444		return 0;
5445
5446	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5447}
5448
5449/*
5450 * Flash attach API.
5451 *
5452 * @param[in] phy - phy structure
5453 * @param[in] ioc  - ioc structure
5454 * @param[in] dev  - device structure
5455 * @param[in] trcmod - trace module
5456 * @param[in] logmod - log module
5457 */
5458void
5459bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5460		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5461{
5462	phy->ioc = ioc;
5463	phy->trcmod = trcmod;
5464	phy->cbfn = NULL;
5465	phy->cbarg = NULL;
5466	phy->op_busy = 0;
5467
5468	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5469	bfa_q_qe_init(&phy->ioc_notify);
5470	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5471	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5472
5473	/* min driver doesn't need phy */
5474	if (mincfg) {
5475		phy->dbuf_kva = NULL;
5476		phy->dbuf_pa = 0;
5477	}
5478}
5479
5480/*
5481 * Claim memory for phy
5482 *
5483 * @param[in] phy - phy structure
5484 * @param[in] dm_kva - pointer to virtual memory address
5485 * @param[in] dm_pa - physical memory address
5486 * @param[in] mincfg - minimal cfg variable
5487 */
5488void
5489bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5490		bfa_boolean_t mincfg)
5491{
5492	if (mincfg)
5493		return;
5494
5495	phy->dbuf_kva = dm_kva;
5496	phy->dbuf_pa = dm_pa;
5497	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5498	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5499	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5500}
5501
5502bfa_boolean_t
5503bfa_phy_busy(struct bfa_ioc_s *ioc)
5504{
5505	void __iomem	*rb;
5506
5507	rb = bfa_ioc_bar0(ioc);
5508	return readl(rb + BFA_PHY_LOCK_STATUS);
5509}
5510
5511/*
5512 * Get phy attribute.
5513 *
5514 * @param[in] phy - phy structure
5515 * @param[in] attr - phy attribute structure
5516 * @param[in] cbfn - callback function
5517 * @param[in] cbarg - callback argument
5518 *
5519 * Return status.
5520 */
5521bfa_status_t
5522bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5523		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5524{
5525	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5526	bfa_trc(phy, instance);
5527
5528	if (!bfa_phy_present(phy))
5529		return BFA_STATUS_PHY_NOT_PRESENT;
5530
5531	if (!bfa_ioc_is_operational(phy->ioc))
5532		return BFA_STATUS_IOC_NON_OP;
5533
5534	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5535		bfa_trc(phy, phy->op_busy);
5536		return BFA_STATUS_DEVBUSY;
5537	}
5538
5539	phy->op_busy = 1;
5540	phy->cbfn = cbfn;
5541	phy->cbarg = cbarg;
5542	phy->instance = instance;
5543	phy->ubuf = (uint8_t *) attr;
5544	bfa_phy_query_send(phy);
5545
5546	return BFA_STATUS_OK;
5547}
5548
5549/*
5550 * Get phy stats.
5551 *
5552 * @param[in] phy - phy structure
5553 * @param[in] instance - phy image instance
5554 * @param[in] stats - pointer to phy stats
5555 * @param[in] cbfn - callback function
5556 * @param[in] cbarg - callback argument
5557 *
5558 * Return status.
5559 */
5560bfa_status_t
5561bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5562		struct bfa_phy_stats_s *stats,
5563		bfa_cb_phy_t cbfn, void *cbarg)
5564{
5565	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5566	bfa_trc(phy, instance);
5567
5568	if (!bfa_phy_present(phy))
5569		return BFA_STATUS_PHY_NOT_PRESENT;
5570
5571	if (!bfa_ioc_is_operational(phy->ioc))
5572		return BFA_STATUS_IOC_NON_OP;
5573
5574	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5575		bfa_trc(phy, phy->op_busy);
5576		return BFA_STATUS_DEVBUSY;
5577	}
5578
5579	phy->op_busy = 1;
5580	phy->cbfn = cbfn;
5581	phy->cbarg = cbarg;
5582	phy->instance = instance;
5583	phy->ubuf = (u8 *) stats;
5584	bfa_phy_stats_send(phy);
5585
5586	return BFA_STATUS_OK;
5587}
5588
5589/*
5590 * Update phy image.
5591 *
5592 * @param[in] phy - phy structure
5593 * @param[in] instance - phy image instance
5594 * @param[in] buf - update data buffer
5595 * @param[in] len - data buffer length
5596 * @param[in] offset - offset relative to starting address
5597 * @param[in] cbfn - callback function
5598 * @param[in] cbarg - callback argument
5599 *
5600 * Return status.
5601 */
5602bfa_status_t
5603bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5604		void *buf, u32 len, u32 offset,
5605		bfa_cb_phy_t cbfn, void *cbarg)
5606{
5607	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5608	bfa_trc(phy, instance);
5609	bfa_trc(phy, len);
5610	bfa_trc(phy, offset);
5611
5612	if (!bfa_phy_present(phy))
5613		return BFA_STATUS_PHY_NOT_PRESENT;
5614
5615	if (!bfa_ioc_is_operational(phy->ioc))
5616		return BFA_STATUS_IOC_NON_OP;
5617
5618	/* 'len' must be in word (4-byte) boundary */
5619	if (!len || (len & 0x03))
5620		return BFA_STATUS_FAILED;
5621
5622	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5623		bfa_trc(phy, phy->op_busy);
5624		return BFA_STATUS_DEVBUSY;
5625	}
5626
5627	phy->op_busy = 1;
5628	phy->cbfn = cbfn;
5629	phy->cbarg = cbarg;
5630	phy->instance = instance;
5631	phy->residue = len;
5632	phy->offset = 0;
5633	phy->addr_off = offset;
5634	phy->ubuf = buf;
5635
5636	bfa_phy_write_send(phy);
5637	return BFA_STATUS_OK;
5638}
5639
5640/*
5641 * Read phy image.
5642 *
5643 * @param[in] phy - phy structure
5644 * @param[in] instance - phy image instance
5645 * @param[in] buf - read data buffer
5646 * @param[in] len - data buffer length
5647 * @param[in] offset - offset relative to starting address
5648 * @param[in] cbfn - callback function
5649 * @param[in] cbarg - callback argument
5650 *
5651 * Return status.
5652 */
5653bfa_status_t
5654bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5655		void *buf, u32 len, u32 offset,
5656		bfa_cb_phy_t cbfn, void *cbarg)
5657{
5658	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5659	bfa_trc(phy, instance);
5660	bfa_trc(phy, len);
5661	bfa_trc(phy, offset);
5662
5663	if (!bfa_phy_present(phy))
5664		return BFA_STATUS_PHY_NOT_PRESENT;
5665
5666	if (!bfa_ioc_is_operational(phy->ioc))
5667		return BFA_STATUS_IOC_NON_OP;
5668
5669	/* 'len' must be in word (4-byte) boundary */
5670	if (!len || (len & 0x03))
5671		return BFA_STATUS_FAILED;
5672
5673	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5674		bfa_trc(phy, phy->op_busy);
5675		return BFA_STATUS_DEVBUSY;
5676	}
5677
5678	phy->op_busy = 1;
5679	phy->cbfn = cbfn;
5680	phy->cbarg = cbarg;
5681	phy->instance = instance;
5682	phy->residue = len;
5683	phy->offset = 0;
5684	phy->addr_off = offset;
5685	phy->ubuf = buf;
5686	bfa_phy_read_send(phy);
5687
5688	return BFA_STATUS_OK;
5689}
5690
5691/*
5692 * Process phy response messages upon receiving interrupts.
5693 *
5694 * @param[in] phyarg - phy structure
5695 * @param[in] msg - message structure
5696 */
5697void
5698bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5699{
5700	struct bfa_phy_s *phy = phyarg;
5701	u32	status;
5702
5703	union {
5704		struct bfi_phy_query_rsp_s *query;
5705		struct bfi_phy_stats_rsp_s *stats;
5706		struct bfi_phy_write_rsp_s *write;
5707		struct bfi_phy_read_rsp_s *read;
5708		struct bfi_mbmsg_s   *msg;
5709	} m;
5710
5711	m.msg = msg;
5712	bfa_trc(phy, msg->mh.msg_id);
5713
5714	if (!phy->op_busy) {
5715		/* receiving response after ioc failure */
5716		bfa_trc(phy, 0x9999);
5717		return;
5718	}
5719
5720	switch (msg->mh.msg_id) {
5721	case BFI_PHY_I2H_QUERY_RSP:
5722		status = be32_to_cpu(m.query->status);
5723		bfa_trc(phy, status);
5724
5725		if (status == BFA_STATUS_OK) {
5726			struct bfa_phy_attr_s *attr =
5727				(struct bfa_phy_attr_s *) phy->ubuf;
5728			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5729					sizeof(struct bfa_phy_attr_s));
5730			bfa_trc(phy, attr->status);
5731			bfa_trc(phy, attr->length);
5732		}
5733
5734		phy->status = status;
5735		phy->op_busy = 0;
5736		if (phy->cbfn)
5737			phy->cbfn(phy->cbarg, phy->status);
5738		break;
5739	case BFI_PHY_I2H_STATS_RSP:
5740		status = be32_to_cpu(m.stats->status);
5741		bfa_trc(phy, status);
5742
5743		if (status == BFA_STATUS_OK) {
5744			struct bfa_phy_stats_s *stats =
5745				(struct bfa_phy_stats_s *) phy->ubuf;
5746			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5747				sizeof(struct bfa_phy_stats_s));
5748			bfa_trc(phy, stats->status);
5749		}
5750
5751		phy->status = status;
5752		phy->op_busy = 0;
5753		if (phy->cbfn)
5754			phy->cbfn(phy->cbarg, phy->status);
5755		break;
5756	case BFI_PHY_I2H_WRITE_RSP:
5757		status = be32_to_cpu(m.write->status);
5758		bfa_trc(phy, status);
5759
5760		if (status != BFA_STATUS_OK || phy->residue == 0) {
5761			phy->status = status;
5762			phy->op_busy = 0;
5763			if (phy->cbfn)
5764				phy->cbfn(phy->cbarg, phy->status);
5765		} else {
5766			bfa_trc(phy, phy->offset);
5767			bfa_phy_write_send(phy);
5768		}
5769		break;
5770	case BFI_PHY_I2H_READ_RSP:
5771		status = be32_to_cpu(m.read->status);
5772		bfa_trc(phy, status);
5773
5774		if (status != BFA_STATUS_OK) {
5775			phy->status = status;
5776			phy->op_busy = 0;
5777			if (phy->cbfn)
5778				phy->cbfn(phy->cbarg, phy->status);
5779		} else {
5780			u32 len = be32_to_cpu(m.read->length);
5781			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5782			u16 *dbuf = (u16 *)phy->dbuf_kva;
5783			int i, sz = len >> 1;
5784
5785			bfa_trc(phy, phy->offset);
5786			bfa_trc(phy, len);
5787
5788			for (i = 0; i < sz; i++)
5789				buf[i] = be16_to_cpu(dbuf[i]);
5790
5791			phy->residue -= len;
5792			phy->offset += len;
5793
5794			if (phy->residue == 0) {
5795				phy->status = status;
5796				phy->op_busy = 0;
5797				if (phy->cbfn)
5798					phy->cbfn(phy->cbarg, phy->status);
5799			} else
5800				bfa_phy_read_send(phy);
5801		}
5802		break;
5803	default:
5804		WARN_ON(1);
5805	}
5806}
5807
5808/*
5809 * DCONF state machine events
5810 */
5811enum bfa_dconf_event {
5812	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
5813	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
5814	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
5815	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
5816	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
5817	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
5818};
5819
5820/* forward declaration of DCONF state machine */
5821static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5822				enum bfa_dconf_event event);
5823static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5824				enum bfa_dconf_event event);
5825static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5826				enum bfa_dconf_event event);
5827static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5828				enum bfa_dconf_event event);
5829static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5830				enum bfa_dconf_event event);
5831static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5832				enum bfa_dconf_event event);
5833static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5834				enum bfa_dconf_event event);
5835
5836static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5837static void bfa_dconf_timer(void *cbarg);
5838static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5839static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5840
5841/*
5842 * Beginning state of dconf module. Waiting for an event to start.
5843 */
5844static void
5845bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5846{
5847	bfa_status_t bfa_status;
5848	bfa_trc(dconf->bfa, event);
5849
5850	switch (event) {
5851	case BFA_DCONF_SM_INIT:
5852		if (dconf->min_cfg) {
5853			bfa_trc(dconf->bfa, dconf->min_cfg);
5854			bfa_fsm_send_event(&dconf->bfa->iocfc,
5855					IOCFC_E_DCONF_DONE);
5856			return;
5857		}
5858		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5859		bfa_timer_start(dconf->bfa, &dconf->timer,
5860			bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5861		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5862					BFA_FLASH_PART_DRV, dconf->instance,
5863					dconf->dconf,
5864					sizeof(struct bfa_dconf_s), 0,
5865					bfa_dconf_init_cb, dconf->bfa);
5866		if (bfa_status != BFA_STATUS_OK) {
5867			bfa_timer_stop(&dconf->timer);
5868			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5869			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5870			return;
5871		}
5872		break;
5873	case BFA_DCONF_SM_EXIT:
5874		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5875		break;
5876	case BFA_DCONF_SM_IOCDISABLE:
5877	case BFA_DCONF_SM_WR:
5878	case BFA_DCONF_SM_FLASH_COMP:
5879		break;
5880	default:
5881		bfa_sm_fault(dconf->bfa, event);
5882	}
5883}
5884
5885/*
5886 * Read flash for dconf entries and make a call back to the driver once done.
5887 */
5888static void
5889bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5890			enum bfa_dconf_event event)
5891{
5892	bfa_trc(dconf->bfa, event);
5893
5894	switch (event) {
5895	case BFA_DCONF_SM_FLASH_COMP:
5896		bfa_timer_stop(&dconf->timer);
5897		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5898		break;
5899	case BFA_DCONF_SM_TIMEOUT:
5900		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5901		bfa_ioc_suspend(&dconf->bfa->ioc);
5902		break;
5903	case BFA_DCONF_SM_EXIT:
5904		bfa_timer_stop(&dconf->timer);
5905		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5906		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5907		break;
5908	case BFA_DCONF_SM_IOCDISABLE:
5909		bfa_timer_stop(&dconf->timer);
5910		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5911		break;
5912	default:
5913		bfa_sm_fault(dconf->bfa, event);
5914	}
5915}
5916
5917/*
5918 * DCONF Module is in ready state. Has completed the initialization.
5919 */
5920static void
5921bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5922{
5923	bfa_trc(dconf->bfa, event);
5924
5925	switch (event) {
5926	case BFA_DCONF_SM_WR:
5927		bfa_timer_start(dconf->bfa, &dconf->timer,
5928			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5929		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5930		break;
5931	case BFA_DCONF_SM_EXIT:
5932		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5933		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5934		break;
5935	case BFA_DCONF_SM_INIT:
5936	case BFA_DCONF_SM_IOCDISABLE:
5937		break;
5938	default:
5939		bfa_sm_fault(dconf->bfa, event);
5940	}
5941}
5942
5943/*
5944 * entries are dirty, write back to the flash.
5945 */
5946
5947static void
5948bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5949{
5950	bfa_trc(dconf->bfa, event);
5951
5952	switch (event) {
5953	case BFA_DCONF_SM_TIMEOUT:
5954		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5955		bfa_dconf_flash_write(dconf);
5956		break;
5957	case BFA_DCONF_SM_WR:
5958		bfa_timer_stop(&dconf->timer);
5959		bfa_timer_start(dconf->bfa, &dconf->timer,
5960			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5961		break;
5962	case BFA_DCONF_SM_EXIT:
5963		bfa_timer_stop(&dconf->timer);
5964		bfa_timer_start(dconf->bfa, &dconf->timer,
5965			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5966		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5967		bfa_dconf_flash_write(dconf);
5968		break;
5969	case BFA_DCONF_SM_FLASH_COMP:
5970		break;
5971	case BFA_DCONF_SM_IOCDISABLE:
5972		bfa_timer_stop(&dconf->timer);
5973		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5974		break;
5975	default:
5976		bfa_sm_fault(dconf->bfa, event);
5977	}
5978}
5979
5980/*
5981 * Sync the dconf entries to the flash.
5982 */
5983static void
5984bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5985			enum bfa_dconf_event event)
5986{
5987	bfa_trc(dconf->bfa, event);
5988
5989	switch (event) {
5990	case BFA_DCONF_SM_IOCDISABLE:
5991	case BFA_DCONF_SM_FLASH_COMP:
5992		bfa_timer_stop(&dconf->timer);
5993		fallthrough;
5994	case BFA_DCONF_SM_TIMEOUT:
5995		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5996		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5997		break;
5998	default:
5999		bfa_sm_fault(dconf->bfa, event);
6000	}
6001}
6002
6003static void
6004bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
6005{
6006	bfa_trc(dconf->bfa, event);
6007
6008	switch (event) {
6009	case BFA_DCONF_SM_FLASH_COMP:
6010		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
6011		break;
6012	case BFA_DCONF_SM_WR:
6013		bfa_timer_start(dconf->bfa, &dconf->timer,
6014			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6015		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6016		break;
6017	case BFA_DCONF_SM_EXIT:
6018		bfa_timer_start(dconf->bfa, &dconf->timer,
6019			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6020		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
6021		break;
6022	case BFA_DCONF_SM_IOCDISABLE:
6023		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
6024		break;
6025	default:
6026		bfa_sm_fault(dconf->bfa, event);
6027	}
6028}
6029
6030static void
6031bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
6032			enum bfa_dconf_event event)
6033{
6034	bfa_trc(dconf->bfa, event);
6035
6036	switch (event) {
6037	case BFA_DCONF_SM_INIT:
6038		bfa_timer_start(dconf->bfa, &dconf->timer,
6039			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6040		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6041		break;
6042	case BFA_DCONF_SM_EXIT:
6043		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6044		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6045		break;
6046	case BFA_DCONF_SM_IOCDISABLE:
6047		break;
6048	default:
6049		bfa_sm_fault(dconf->bfa, event);
6050	}
6051}
6052
6053/*
6054 * Compute and return memory needed by DRV_CFG module.
6055 */
6056void
6057bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6058		  struct bfa_s *bfa)
6059{
6060	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6061
6062	if (cfg->drvcfg.min_cfg)
6063		bfa_mem_kva_setup(meminfo, dconf_kva,
6064				sizeof(struct bfa_dconf_hdr_s));
6065	else
6066		bfa_mem_kva_setup(meminfo, dconf_kva,
6067				sizeof(struct bfa_dconf_s));
6068}
6069
6070void
6071bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
6072{
6073	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6074
6075	dconf->bfad = bfad;
6076	dconf->bfa = bfa;
6077	dconf->instance = bfa->ioc.port_id;
6078	bfa_trc(bfa, dconf->instance);
6079
6080	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6081	if (cfg->drvcfg.min_cfg) {
6082		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6083		dconf->min_cfg = BFA_TRUE;
6084	} else {
6085		dconf->min_cfg = BFA_FALSE;
6086		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6087	}
6088
6089	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6090	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6091}
6092
6093static void
6094bfa_dconf_init_cb(void *arg, bfa_status_t status)
6095{
6096	struct bfa_s *bfa = arg;
6097	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6098
6099	if (status == BFA_STATUS_OK) {
6100		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6101		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6102			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6103		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6104			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6105	}
6106	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6107	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6108}
6109
6110void
6111bfa_dconf_modinit(struct bfa_s *bfa)
6112{
6113	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6114	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6115}
6116
6117static void bfa_dconf_timer(void *cbarg)
6118{
6119	struct bfa_dconf_mod_s *dconf = cbarg;
6120	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6121}
6122
6123void
6124bfa_dconf_iocdisable(struct bfa_s *bfa)
6125{
6126	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6127	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6128}
6129
6130static bfa_status_t
6131bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6132{
6133	bfa_status_t bfa_status;
6134	bfa_trc(dconf->bfa, 0);
6135
6136	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6137				BFA_FLASH_PART_DRV, dconf->instance,
6138				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
6139				bfa_dconf_cbfn, dconf);
6140	if (bfa_status != BFA_STATUS_OK)
6141		WARN_ON(bfa_status);
6142	bfa_trc(dconf->bfa, bfa_status);
6143
6144	return bfa_status;
6145}
6146
6147bfa_status_t
6148bfa_dconf_update(struct bfa_s *bfa)
6149{
6150	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6151	bfa_trc(dconf->bfa, 0);
6152	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6153		return BFA_STATUS_FAILED;
6154
6155	if (dconf->min_cfg) {
6156		bfa_trc(dconf->bfa, dconf->min_cfg);
6157		return BFA_STATUS_FAILED;
6158	}
6159
6160	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6161	return BFA_STATUS_OK;
6162}
6163
6164static void
6165bfa_dconf_cbfn(void *arg, bfa_status_t status)
6166{
6167	struct bfa_dconf_mod_s *dconf = arg;
6168	WARN_ON(status);
6169	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6170}
6171
6172void
6173bfa_dconf_modexit(struct bfa_s *bfa)
6174{
6175	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6176	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6177}
6178
6179/*
6180 * FRU specific functions
6181 */
6182
6183#define BFA_FRU_DMA_BUF_SZ	0x02000		/* 8k dma buffer */
6184#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6185#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6186
6187static void
6188bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6189{
6190	struct bfa_fru_s *fru = cbarg;
6191
6192	bfa_trc(fru, event);
6193
6194	switch (event) {
6195	case BFA_IOC_E_DISABLED:
6196	case BFA_IOC_E_FAILED:
6197		if (fru->op_busy) {
6198			fru->status = BFA_STATUS_IOC_FAILURE;
6199			fru->cbfn(fru->cbarg, fru->status);
6200			fru->op_busy = 0;
6201		}
6202		break;
6203
6204	default:
6205		break;
6206	}
6207}
6208
6209/*
6210 * Send fru write request.
6211 *
6212 * @param[in] cbarg - callback argument
6213 */
6214static void
6215bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6216{
6217	struct bfa_fru_s *fru = cbarg;
6218	struct bfi_fru_write_req_s *msg =
6219			(struct bfi_fru_write_req_s *) fru->mb.msg;
6220	u32 len;
6221
6222	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6223	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6224				fru->residue : BFA_FRU_DMA_BUF_SZ;
6225	msg->length = cpu_to_be32(len);
6226
6227	/*
6228	 * indicate if it's the last msg of the whole write operation
6229	 */
6230	msg->last = (len == fru->residue) ? 1 : 0;
6231
6232	msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6233	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6234	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6235
6236	memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6237	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6238
6239	fru->residue -= len;
6240	fru->offset += len;
6241}
6242
6243/*
6244 * Send fru read request.
6245 *
6246 * @param[in] cbarg - callback argument
6247 */
6248static void
6249bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6250{
6251	struct bfa_fru_s *fru = cbarg;
6252	struct bfi_fru_read_req_s *msg =
6253			(struct bfi_fru_read_req_s *) fru->mb.msg;
6254	u32 len;
6255
6256	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6257	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6258				fru->residue : BFA_FRU_DMA_BUF_SZ;
6259	msg->length = cpu_to_be32(len);
6260	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6261	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6262	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6263}
6264
6265/*
6266 * Flash memory info API.
6267 *
6268 * @param[in] mincfg - minimal cfg variable
6269 */
6270u32
6271bfa_fru_meminfo(bfa_boolean_t mincfg)
6272{
6273	/* min driver doesn't need fru */
6274	if (mincfg)
6275		return 0;
6276
6277	return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6278}
6279
6280/*
6281 * Flash attach API.
6282 *
6283 * @param[in] fru - fru structure
6284 * @param[in] ioc  - ioc structure
6285 * @param[in] dev  - device structure
6286 * @param[in] trcmod - trace module
6287 * @param[in] logmod - log module
6288 */
6289void
6290bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6291	struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6292{
6293	fru->ioc = ioc;
6294	fru->trcmod = trcmod;
6295	fru->cbfn = NULL;
6296	fru->cbarg = NULL;
6297	fru->op_busy = 0;
6298
6299	bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6300	bfa_q_qe_init(&fru->ioc_notify);
6301	bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6302	list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6303
6304	/* min driver doesn't need fru */
6305	if (mincfg) {
6306		fru->dbuf_kva = NULL;
6307		fru->dbuf_pa = 0;
6308	}
6309}
6310
6311/*
6312 * Claim memory for fru
6313 *
6314 * @param[in] fru - fru structure
6315 * @param[in] dm_kva - pointer to virtual memory address
6316 * @param[in] dm_pa - frusical memory address
6317 * @param[in] mincfg - minimal cfg variable
6318 */
6319void
6320bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6321	bfa_boolean_t mincfg)
6322{
6323	if (mincfg)
6324		return;
6325
6326	fru->dbuf_kva = dm_kva;
6327	fru->dbuf_pa = dm_pa;
6328	memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6329	dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6330	dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6331}
6332
6333/*
6334 * Update fru vpd image.
6335 *
6336 * @param[in] fru - fru structure
6337 * @param[in] buf - update data buffer
6338 * @param[in] len - data buffer length
6339 * @param[in] offset - offset relative to starting address
6340 * @param[in] cbfn - callback function
6341 * @param[in] cbarg - callback argument
6342 *
6343 * Return status.
6344 */
6345bfa_status_t
6346bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6347		  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6348{
6349	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6350	bfa_trc(fru, len);
6351	bfa_trc(fru, offset);
6352
6353	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6354		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6355		return BFA_STATUS_FRU_NOT_PRESENT;
6356
6357	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6358		return BFA_STATUS_CMD_NOTSUPP;
6359
6360	if (!bfa_ioc_is_operational(fru->ioc))
6361		return BFA_STATUS_IOC_NON_OP;
6362
6363	if (fru->op_busy) {
6364		bfa_trc(fru, fru->op_busy);
6365		return BFA_STATUS_DEVBUSY;
6366	}
6367
6368	fru->op_busy = 1;
6369
6370	fru->cbfn = cbfn;
6371	fru->cbarg = cbarg;
6372	fru->residue = len;
6373	fru->offset = 0;
6374	fru->addr_off = offset;
6375	fru->ubuf = buf;
6376	fru->trfr_cmpl = trfr_cmpl;
6377
6378	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6379
6380	return BFA_STATUS_OK;
6381}
6382
6383/*
6384 * Read fru vpd image.
6385 *
6386 * @param[in] fru - fru structure
6387 * @param[in] buf - read data buffer
6388 * @param[in] len - data buffer length
6389 * @param[in] offset - offset relative to starting address
6390 * @param[in] cbfn - callback function
6391 * @param[in] cbarg - callback argument
6392 *
6393 * Return status.
6394 */
6395bfa_status_t
6396bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6397		bfa_cb_fru_t cbfn, void *cbarg)
6398{
6399	bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6400	bfa_trc(fru, len);
6401	bfa_trc(fru, offset);
6402
6403	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6404		return BFA_STATUS_FRU_NOT_PRESENT;
6405
6406	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6407		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6408		return BFA_STATUS_CMD_NOTSUPP;
6409
6410	if (!bfa_ioc_is_operational(fru->ioc))
6411		return BFA_STATUS_IOC_NON_OP;
6412
6413	if (fru->op_busy) {
6414		bfa_trc(fru, fru->op_busy);
6415		return BFA_STATUS_DEVBUSY;
6416	}
6417
6418	fru->op_busy = 1;
6419
6420	fru->cbfn = cbfn;
6421	fru->cbarg = cbarg;
6422	fru->residue = len;
6423	fru->offset = 0;
6424	fru->addr_off = offset;
6425	fru->ubuf = buf;
6426	bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6427
6428	return BFA_STATUS_OK;
6429}
6430
6431/*
6432 * Get maximum size fru vpd image.
6433 *
6434 * @param[in] fru - fru structure
6435 * @param[out] size - maximum size of fru vpd data
6436 *
6437 * Return status.
6438 */
6439bfa_status_t
6440bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6441{
6442	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6443		return BFA_STATUS_FRU_NOT_PRESENT;
6444
6445	if (!bfa_ioc_is_operational(fru->ioc))
6446		return BFA_STATUS_IOC_NON_OP;
6447
6448	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6449		fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6450		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6451	else
6452		return BFA_STATUS_CMD_NOTSUPP;
6453	return BFA_STATUS_OK;
6454}
6455/*
6456 * tfru write.
6457 *
6458 * @param[in] fru - fru structure
6459 * @param[in] buf - update data buffer
6460 * @param[in] len - data buffer length
6461 * @param[in] offset - offset relative to starting address
6462 * @param[in] cbfn - callback function
6463 * @param[in] cbarg - callback argument
6464 *
6465 * Return status.
6466 */
6467bfa_status_t
6468bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6469	       bfa_cb_fru_t cbfn, void *cbarg)
6470{
6471	bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6472	bfa_trc(fru, len);
6473	bfa_trc(fru, offset);
6474	bfa_trc(fru, *((u8 *) buf));
6475
6476	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6477		return BFA_STATUS_FRU_NOT_PRESENT;
6478
6479	if (!bfa_ioc_is_operational(fru->ioc))
6480		return BFA_STATUS_IOC_NON_OP;
6481
6482	if (fru->op_busy) {
6483		bfa_trc(fru, fru->op_busy);
6484		return BFA_STATUS_DEVBUSY;
6485	}
6486
6487	fru->op_busy = 1;
6488
6489	fru->cbfn = cbfn;
6490	fru->cbarg = cbarg;
6491	fru->residue = len;
6492	fru->offset = 0;
6493	fru->addr_off = offset;
6494	fru->ubuf = buf;
6495
6496	bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6497
6498	return BFA_STATUS_OK;
6499}
6500
6501/*
6502 * tfru read.
6503 *
6504 * @param[in] fru - fru structure
6505 * @param[in] buf - read data buffer
6506 * @param[in] len - data buffer length
6507 * @param[in] offset - offset relative to starting address
6508 * @param[in] cbfn - callback function
6509 * @param[in] cbarg - callback argument
6510 *
6511 * Return status.
6512 */
6513bfa_status_t
6514bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6515	      bfa_cb_fru_t cbfn, void *cbarg)
6516{
6517	bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6518	bfa_trc(fru, len);
6519	bfa_trc(fru, offset);
6520
6521	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6522		return BFA_STATUS_FRU_NOT_PRESENT;
6523
6524	if (!bfa_ioc_is_operational(fru->ioc))
6525		return BFA_STATUS_IOC_NON_OP;
6526
6527	if (fru->op_busy) {
6528		bfa_trc(fru, fru->op_busy);
6529		return BFA_STATUS_DEVBUSY;
6530	}
6531
6532	fru->op_busy = 1;
6533
6534	fru->cbfn = cbfn;
6535	fru->cbarg = cbarg;
6536	fru->residue = len;
6537	fru->offset = 0;
6538	fru->addr_off = offset;
6539	fru->ubuf = buf;
6540	bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6541
6542	return BFA_STATUS_OK;
6543}
6544
6545/*
6546 * Process fru response messages upon receiving interrupts.
6547 *
6548 * @param[in] fruarg - fru structure
6549 * @param[in] msg - message structure
6550 */
6551void
6552bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6553{
6554	struct bfa_fru_s *fru = fruarg;
6555	struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6556	u32 status;
6557
6558	bfa_trc(fru, msg->mh.msg_id);
6559
6560	if (!fru->op_busy) {
6561		/*
6562		 * receiving response after ioc failure
6563		 */
6564		bfa_trc(fru, 0x9999);
6565		return;
6566	}
6567
6568	switch (msg->mh.msg_id) {
6569	case BFI_FRUVPD_I2H_WRITE_RSP:
6570	case BFI_TFRU_I2H_WRITE_RSP:
6571		status = be32_to_cpu(rsp->status);
6572		bfa_trc(fru, status);
6573
6574		if (status != BFA_STATUS_OK || fru->residue == 0) {
6575			fru->status = status;
6576			fru->op_busy = 0;
6577			if (fru->cbfn)
6578				fru->cbfn(fru->cbarg, fru->status);
6579		} else {
6580			bfa_trc(fru, fru->offset);
6581			if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6582				bfa_fru_write_send(fru,
6583					BFI_FRUVPD_H2I_WRITE_REQ);
6584			else
6585				bfa_fru_write_send(fru,
6586					BFI_TFRU_H2I_WRITE_REQ);
6587		}
6588		break;
6589	case BFI_FRUVPD_I2H_READ_RSP:
6590	case BFI_TFRU_I2H_READ_RSP:
6591		status = be32_to_cpu(rsp->status);
6592		bfa_trc(fru, status);
6593
6594		if (status != BFA_STATUS_OK) {
6595			fru->status = status;
6596			fru->op_busy = 0;
6597			if (fru->cbfn)
6598				fru->cbfn(fru->cbarg, fru->status);
6599		} else {
6600			u32 len = be32_to_cpu(rsp->length);
6601
6602			bfa_trc(fru, fru->offset);
6603			bfa_trc(fru, len);
6604
6605			memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6606			fru->residue -= len;
6607			fru->offset += len;
6608
6609			if (fru->residue == 0) {
6610				fru->status = status;
6611				fru->op_busy = 0;
6612				if (fru->cbfn)
6613					fru->cbfn(fru->cbarg, fru->status);
6614			} else {
6615				if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6616					bfa_fru_read_send(fru,
6617						BFI_FRUVPD_H2I_READ_REQ);
6618				else
6619					bfa_fru_read_send(fru,
6620						BFI_TFRU_H2I_READ_REQ);
6621			}
6622		}
6623		break;
6624	default:
6625		WARN_ON(1);
6626	}
6627}
6628
6629/*
6630 * register definitions
6631 */
6632#define FLI_CMD_REG			0x0001d000
6633#define FLI_RDDATA_REG			0x0001d010
6634#define FLI_ADDR_REG			0x0001d004
6635#define FLI_DEV_STATUS_REG		0x0001d014
6636
6637#define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
6638#define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
6639#define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
6640#define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
6641
6642enum bfa_flash_cmd {
6643	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
6644	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
6645};
6646
6647/*
6648 * Hardware error definition
6649 */
6650enum bfa_flash_err {
6651	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
6652	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
6653	BFA_FLASH_BAD		= -3,	/*!< flash bad */
6654	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
6655	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
6656	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
6657	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
6658	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
6659	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
6660};
6661
6662/*
6663 * Flash command register data structure
6664 */
6665union bfa_flash_cmd_reg_u {
6666	struct {
6667#ifdef __BIG_ENDIAN
6668		u32	act:1;
6669		u32	rsv:1;
6670		u32	write_cnt:9;
6671		u32	read_cnt:9;
6672		u32	addr_cnt:4;
6673		u32	cmd:8;
6674#else
6675		u32	cmd:8;
6676		u32	addr_cnt:4;
6677		u32	read_cnt:9;
6678		u32	write_cnt:9;
6679		u32	rsv:1;
6680		u32	act:1;
6681#endif
6682	} r;
6683	u32	i;
6684};
6685
6686/*
6687 * Flash device status register data structure
6688 */
6689union bfa_flash_dev_status_reg_u {
6690	struct {
6691#ifdef __BIG_ENDIAN
6692		u32	rsv:21;
6693		u32	fifo_cnt:6;
6694		u32	busy:1;
6695		u32	init_status:1;
6696		u32	present:1;
6697		u32	bad:1;
6698		u32	good:1;
6699#else
6700		u32	good:1;
6701		u32	bad:1;
6702		u32	present:1;
6703		u32	init_status:1;
6704		u32	busy:1;
6705		u32	fifo_cnt:6;
6706		u32	rsv:21;
6707#endif
6708	} r;
6709	u32	i;
6710};
6711
6712/*
6713 * Flash address register data structure
6714 */
6715union bfa_flash_addr_reg_u {
6716	struct {
6717#ifdef __BIG_ENDIAN
6718		u32	addr:24;
6719		u32	dummy:8;
6720#else
6721		u32	dummy:8;
6722		u32	addr:24;
6723#endif
6724	} r;
6725	u32	i;
6726};
6727
6728/*
6729 * dg flash_raw_private Flash raw private functions
6730 */
6731static void
6732bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6733		  u8 rd_cnt, u8 ad_cnt, u8 op)
6734{
6735	union bfa_flash_cmd_reg_u cmd;
6736
6737	cmd.i = 0;
6738	cmd.r.act = 1;
6739	cmd.r.write_cnt = wr_cnt;
6740	cmd.r.read_cnt = rd_cnt;
6741	cmd.r.addr_cnt = ad_cnt;
6742	cmd.r.cmd = op;
6743	writel(cmd.i, (pci_bar + FLI_CMD_REG));
6744}
6745
6746static void
6747bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6748{
6749	union bfa_flash_addr_reg_u addr;
6750
6751	addr.r.addr = address & 0x00ffffff;
6752	addr.r.dummy = 0;
6753	writel(addr.i, (pci_bar + FLI_ADDR_REG));
6754}
6755
6756static int
6757bfa_flash_cmd_act_check(void __iomem *pci_bar)
6758{
6759	union bfa_flash_cmd_reg_u cmd;
6760
6761	cmd.i = readl(pci_bar + FLI_CMD_REG);
6762
6763	if (cmd.r.act)
6764		return BFA_FLASH_ERR_CMD_ACT;
6765
6766	return 0;
6767}
6768
6769/*
6770 * @brief
6771 * Flush FLI data fifo.
6772 *
6773 * @param[in] pci_bar - pci bar address
6774 * @param[in] dev_status - device status
6775 *
6776 * Return 0 on success, negative error number on error.
6777 */
6778static u32
6779bfa_flash_fifo_flush(void __iomem *pci_bar)
6780{
6781	u32 i;
6782	union bfa_flash_dev_status_reg_u dev_status;
6783
6784	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6785
6786	if (!dev_status.r.fifo_cnt)
6787		return 0;
6788
6789	/* fifo counter in terms of words */
6790	for (i = 0; i < dev_status.r.fifo_cnt; i++)
6791		readl(pci_bar + FLI_RDDATA_REG);
6792
6793	/*
6794	 * Check the device status. It may take some time.
6795	 */
6796	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6797		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6798		if (!dev_status.r.fifo_cnt)
6799			break;
6800	}
6801
6802	if (dev_status.r.fifo_cnt)
6803		return BFA_FLASH_ERR_FIFO_CNT;
6804
6805	return 0;
6806}
6807
6808/*
6809 * @brief
6810 * Read flash status.
6811 *
6812 * @param[in] pci_bar - pci bar address
6813 *
6814 * Return 0 on success, negative error number on error.
6815*/
6816static u32
6817bfa_flash_status_read(void __iomem *pci_bar)
6818{
6819	union bfa_flash_dev_status_reg_u	dev_status;
6820	int				status;
6821	u32			ret_status;
6822	int				i;
6823
6824	status = bfa_flash_fifo_flush(pci_bar);
6825	if (status < 0)
6826		return status;
6827
6828	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6829
6830	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6831		status = bfa_flash_cmd_act_check(pci_bar);
6832		if (!status)
6833			break;
6834	}
6835
6836	if (status)
6837		return status;
6838
6839	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6840	if (!dev_status.r.fifo_cnt)
6841		return BFA_FLASH_BUSY;
6842
6843	ret_status = readl(pci_bar + FLI_RDDATA_REG);
6844	ret_status >>= 24;
6845
6846	status = bfa_flash_fifo_flush(pci_bar);
6847	if (status < 0)
6848		return status;
6849
6850	return ret_status;
6851}
6852
6853/*
6854 * @brief
6855 * Start flash read operation.
6856 *
6857 * @param[in] pci_bar - pci bar address
6858 * @param[in] offset - flash address offset
6859 * @param[in] len - read data length
6860 * @param[in] buf - read data buffer
6861 *
6862 * Return 0 on success, negative error number on error.
6863 */
6864static u32
6865bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6866			 char *buf)
6867{
6868	int status;
6869
6870	/*
6871	 * len must be mutiple of 4 and not exceeding fifo size
6872	 */
6873	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6874		return BFA_FLASH_ERR_LEN;
6875
6876	/*
6877	 * check status
6878	 */
6879	status = bfa_flash_status_read(pci_bar);
6880	if (status == BFA_FLASH_BUSY)
6881		status = bfa_flash_status_read(pci_bar);
6882
6883	if (status < 0)
6884		return status;
6885
6886	/*
6887	 * check if write-in-progress bit is cleared
6888	 */
6889	if (status & BFA_FLASH_WIP_MASK)
6890		return BFA_FLASH_ERR_WIP;
6891
6892	bfa_flash_set_addr(pci_bar, offset);
6893
6894	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6895
6896	return 0;
6897}
6898
6899/*
6900 * @brief
6901 * Check flash read operation.
6902 *
6903 * @param[in] pci_bar - pci bar address
6904 *
6905 * Return flash device status, 1 if busy, 0 if not.
6906 */
6907static u32
6908bfa_flash_read_check(void __iomem *pci_bar)
6909{
6910	if (bfa_flash_cmd_act_check(pci_bar))
6911		return 1;
6912
6913	return 0;
6914}
6915
6916/*
6917 * @brief
6918 * End flash read operation.
6919 *
6920 * @param[in] pci_bar - pci bar address
6921 * @param[in] len - read data length
6922 * @param[in] buf - read data buffer
6923 *
6924 */
6925static void
6926bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6927{
6928
6929	u32 i;
6930
6931	/*
6932	 * read data fifo up to 32 words
6933	 */
6934	for (i = 0; i < len; i += 4) {
6935		u32 w = readl(pci_bar + FLI_RDDATA_REG);
6936		*((u32 *) (buf + i)) = swab32(w);
6937	}
6938
6939	bfa_flash_fifo_flush(pci_bar);
6940}
6941
6942/*
6943 * @brief
6944 * Perform flash raw read.
6945 *
6946 * @param[in] pci_bar - pci bar address
6947 * @param[in] offset - flash partition address offset
6948 * @param[in] buf - read data buffer
6949 * @param[in] len - read data length
6950 *
6951 * Return status.
6952 */
6953
6954
6955#define FLASH_BLOCKING_OP_MAX   500
6956#define FLASH_SEM_LOCK_REG	0x18820
6957
6958static int
6959bfa_raw_sem_get(void __iomem *bar)
6960{
6961	int	locked;
6962
6963	locked = readl((bar + FLASH_SEM_LOCK_REG));
6964	return !locked;
6965
6966}
6967
6968static bfa_status_t
6969bfa_flash_sem_get(void __iomem *bar)
6970{
6971	u32 n = FLASH_BLOCKING_OP_MAX;
6972
6973	while (!bfa_raw_sem_get(bar)) {
6974		if (--n <= 0)
6975			return BFA_STATUS_BADFLASH;
6976		mdelay(10);
6977	}
6978	return BFA_STATUS_OK;
6979}
6980
6981static void
6982bfa_flash_sem_put(void __iomem *bar)
6983{
6984	writel(0, (bar + FLASH_SEM_LOCK_REG));
6985}
6986
6987bfa_status_t
6988bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
6989		       u32 len)
6990{
6991	u32 n;
6992	int status;
6993	u32 off, l, s, residue, fifo_sz;
6994
6995	residue = len;
6996	off = 0;
6997	fifo_sz = BFA_FLASH_FIFO_SIZE;
6998	status = bfa_flash_sem_get(pci_bar);
6999	if (status != BFA_STATUS_OK)
7000		return status;
7001
7002	while (residue) {
7003		s = offset + off;
7004		n = s / fifo_sz;
7005		l = (n + 1) * fifo_sz - s;
7006		if (l > residue)
7007			l = residue;
7008
7009		status = bfa_flash_read_start(pci_bar, offset + off, l,
7010								&buf[off]);
7011		if (status < 0) {
7012			bfa_flash_sem_put(pci_bar);
7013			return BFA_STATUS_FAILED;
7014		}
7015
7016		n = BFA_FLASH_BLOCKING_OP_MAX;
7017		while (bfa_flash_read_check(pci_bar)) {
7018			if (--n <= 0) {
7019				bfa_flash_sem_put(pci_bar);
7020				return BFA_STATUS_FAILED;
7021			}
7022		}
7023
7024		bfa_flash_read_end(pci_bar, l, &buf[off]);
7025
7026		residue -= l;
7027		off += l;
7028	}
7029	bfa_flash_sem_put(pci_bar);
7030
7031	return BFA_STATUS_OK;
7032}