Linux Audio

Check our new training course

Loading...
   1/* bnx2x_cmn.h: Broadcom Everest network driver.
   2 *
   3 * Copyright (c) 2007-2012 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10 * Written by: Eliezer Tamir
  11 * Based on code from Michael Chan's bnx2 driver
  12 * UDP CSUM errata workaround by Arik Gendelman
  13 * Slowpath and fastpath rework by Vladislav Zolotarov
  14 * Statistics and Link management by Yitchak Gertner
  15 *
  16 */
  17#ifndef BNX2X_CMN_H
  18#define BNX2X_CMN_H
  19
  20#include <linux/types.h>
  21#include <linux/pci.h>
  22#include <linux/netdevice.h>
  23#include <linux/etherdevice.h>
  24
  25
  26#include "bnx2x.h"
  27
  28/* This is used as a replacement for an MCP if it's not present */
  29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
  30
  31extern int num_queues;
  32
  33/************************ Macros ********************************/
  34#define BNX2X_PCI_FREE(x, y, size) \
  35	do { \
  36		if (x) { \
  37			dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
  38			x = NULL; \
  39			y = 0; \
  40		} \
  41	} while (0)
  42
  43#define BNX2X_FREE(x) \
  44	do { \
  45		if (x) { \
  46			kfree((void *)x); \
  47			x = NULL; \
  48		} \
  49	} while (0)
  50
  51#define BNX2X_PCI_ALLOC(x, y, size) \
  52	do { \
  53		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
  54		if (x == NULL) \
  55			goto alloc_mem_err; \
  56		memset((void *)x, 0, size); \
  57	} while (0)
  58
  59#define BNX2X_ALLOC(x, size) \
  60	do { \
  61		x = kzalloc(size, GFP_KERNEL); \
  62		if (x == NULL) \
  63			goto alloc_mem_err; \
  64	} while (0)
  65
  66/*********************** Interfaces ****************************
  67 *  Functions that need to be implemented by each driver version
  68 */
  69/* Init */
  70
  71/**
  72 * bnx2x_send_unload_req - request unload mode from the MCP.
  73 *
  74 * @bp:			driver handle
  75 * @unload_mode:	requested function's unload mode
  76 *
  77 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
  78 */
  79u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
  80
  81/**
  82 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
  83 *
  84 * @bp:		driver handle
  85 */
  86void bnx2x_send_unload_done(struct bnx2x *bp);
  87
  88/**
  89 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
  90 *
  91 * @bp:			driver handle
  92 * @rss_obj		RSS object to use
  93 * @ind_table:		indirection table to configure
  94 * @config_hash:	re-configure RSS hash keys configuration
  95 */
  96int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
  97			u8 *ind_table, bool config_hash);
  98
  99/**
 100 * bnx2x__init_func_obj - init function object
 101 *
 102 * @bp:			driver handle
 103 *
 104 * Initializes the Function Object with the appropriate
 105 * parameters which include a function slow path driver
 106 * interface.
 107 */
 108void bnx2x__init_func_obj(struct bnx2x *bp);
 109
 110/**
 111 * bnx2x_setup_queue - setup eth queue.
 112 *
 113 * @bp:		driver handle
 114 * @fp:		pointer to the fastpath structure
 115 * @leading:	boolean
 116 *
 117 */
 118int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 119		       bool leading);
 120
 121/**
 122 * bnx2x_setup_leading - bring up a leading eth queue.
 123 *
 124 * @bp:		driver handle
 125 */
 126int bnx2x_setup_leading(struct bnx2x *bp);
 127
 128/**
 129 * bnx2x_fw_command - send the MCP a request
 130 *
 131 * @bp:		driver handle
 132 * @command:	request
 133 * @param:	request's parameter
 134 *
 135 * block until there is a reply
 136 */
 137u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
 138
 139/**
 140 * bnx2x_initial_phy_init - initialize link parameters structure variables.
 141 *
 142 * @bp:		driver handle
 143 * @load_mode:	current mode
 144 */
 145u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
 146
 147/**
 148 * bnx2x_link_set - configure hw according to link parameters structure.
 149 *
 150 * @bp:		driver handle
 151 */
 152void bnx2x_link_set(struct bnx2x *bp);
 153
 154/**
 155 * bnx2x_link_test - query link status.
 156 *
 157 * @bp:		driver handle
 158 * @is_serdes:	bool
 159 *
 160 * Returns 0 if link is UP.
 161 */
 162u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
 163
 164/**
 165 * bnx2x_drv_pulse - write driver pulse to shmem
 166 *
 167 * @bp:		driver handle
 168 *
 169 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
 170 * in the shmem.
 171 */
 172void bnx2x_drv_pulse(struct bnx2x *bp);
 173
 174/**
 175 * bnx2x_igu_ack_sb - update IGU with current SB value
 176 *
 177 * @bp:		driver handle
 178 * @igu_sb_id:	SB id
 179 * @segment:	SB segment
 180 * @index:	SB index
 181 * @op:		SB operation
 182 * @update:	is HW update required
 183 */
 184void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
 185		      u16 index, u8 op, u8 update);
 186
 187/* Disable transactions from chip to host */
 188void bnx2x_pf_disable(struct bnx2x *bp);
 189
 190/**
 191 * bnx2x__link_status_update - handles link status change.
 192 *
 193 * @bp:		driver handle
 194 */
 195void bnx2x__link_status_update(struct bnx2x *bp);
 196
 197/**
 198 * bnx2x_link_report - report link status to upper layer.
 199 *
 200 * @bp:		driver handle
 201 */
 202void bnx2x_link_report(struct bnx2x *bp);
 203
 204/* None-atomic version of bnx2x_link_report() */
 205void __bnx2x_link_report(struct bnx2x *bp);
 206
 207/**
 208 * bnx2x_get_mf_speed - calculate MF speed.
 209 *
 210 * @bp:		driver handle
 211 *
 212 * Takes into account current linespeed and MF configuration.
 213 */
 214u16 bnx2x_get_mf_speed(struct bnx2x *bp);
 215
 216/**
 217 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
 218 *
 219 * @irq:		irq number
 220 * @dev_instance:	private instance
 221 */
 222irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
 223
 224/**
 225 * bnx2x_interrupt - non MSI-X interrupt handler
 226 *
 227 * @irq:		irq number
 228 * @dev_instance:	private instance
 229 */
 230irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
 231#ifdef BCM_CNIC
 232
 233/**
 234 * bnx2x_cnic_notify - send command to cnic driver
 235 *
 236 * @bp:		driver handle
 237 * @cmd:	command
 238 */
 239int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
 240
 241/**
 242 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
 243 *
 244 * @bp:		driver handle
 245 */
 246void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
 247#endif
 248
 249/**
 250 * bnx2x_int_enable - enable HW interrupts.
 251 *
 252 * @bp:		driver handle
 253 */
 254void bnx2x_int_enable(struct bnx2x *bp);
 255
 256/**
 257 * bnx2x_int_disable_sync - disable interrupts.
 258 *
 259 * @bp:		driver handle
 260 * @disable_hw:	true, disable HW interrupts.
 261 *
 262 * This function ensures that there are no
 263 * ISRs or SP DPCs (sp_task) are running after it returns.
 264 */
 265void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
 266
 267/**
 268 * bnx2x_nic_init - init driver internals.
 269 *
 270 * @bp:		driver handle
 271 * @load_code:	COMMON, PORT or FUNCTION
 272 *
 273 * Initializes:
 274 *  - rings
 275 *  - status blocks
 276 *  - etc.
 277 */
 278void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
 279
 280/**
 281 * bnx2x_alloc_mem - allocate driver's memory.
 282 *
 283 * @bp:		driver handle
 284 */
 285int bnx2x_alloc_mem(struct bnx2x *bp);
 286
 287/**
 288 * bnx2x_free_mem - release driver's memory.
 289 *
 290 * @bp:		driver handle
 291 */
 292void bnx2x_free_mem(struct bnx2x *bp);
 293
 294/**
 295 * bnx2x_set_num_queues - set number of queues according to mode.
 296 *
 297 * @bp:		driver handle
 298 */
 299void bnx2x_set_num_queues(struct bnx2x *bp);
 300
 301/**
 302 * bnx2x_chip_cleanup - cleanup chip internals.
 303 *
 304 * @bp:			driver handle
 305 * @unload_mode:	COMMON, PORT, FUNCTION
 306 *
 307 * - Cleanup MAC configuration.
 308 * - Closes clients.
 309 * - etc.
 310 */
 311void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
 312
 313/**
 314 * bnx2x_acquire_hw_lock - acquire HW lock.
 315 *
 316 * @bp:		driver handle
 317 * @resource:	resource bit which was locked
 318 */
 319int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
 320
 321/**
 322 * bnx2x_release_hw_lock - release HW lock.
 323 *
 324 * @bp:		driver handle
 325 * @resource:	resource bit which was locked
 326 */
 327int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
 328
 329/**
 330 * bnx2x_release_leader_lock - release recovery leader lock
 331 *
 332 * @bp:		driver handle
 333 */
 334int bnx2x_release_leader_lock(struct bnx2x *bp);
 335
 336/**
 337 * bnx2x_set_eth_mac - configure eth MAC address in the HW
 338 *
 339 * @bp:		driver handle
 340 * @set:	set or clear
 341 *
 342 * Configures according to the value in netdev->dev_addr.
 343 */
 344int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
 345
 346/**
 347 * bnx2x_set_rx_mode - set MAC filtering configurations.
 348 *
 349 * @dev:	netdevice
 350 *
 351 * called with netif_tx_lock from dev_mcast.c
 352 * If bp->state is OPEN, should be called with
 353 * netif_addr_lock_bh()
 354 */
 355void bnx2x_set_rx_mode(struct net_device *dev);
 356
 357/**
 358 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
 359 *
 360 * @bp:		driver handle
 361 *
 362 * If bp->state is OPEN, should be called with
 363 * netif_addr_lock_bh().
 364 */
 365void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
 366
 367/**
 368 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
 369 *
 370 * @bp:			driver handle
 371 * @cl_id:		client id
 372 * @rx_mode_flags:	rx mode configuration
 373 * @rx_accept_flags:	rx accept configuration
 374 * @tx_accept_flags:	tx accept configuration (tx switch)
 375 * @ramrod_flags:	ramrod configuration
 376 */
 377void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
 378			 unsigned long rx_mode_flags,
 379			 unsigned long rx_accept_flags,
 380			 unsigned long tx_accept_flags,
 381			 unsigned long ramrod_flags);
 382
 383/* Parity errors related */
 384void bnx2x_set_pf_load(struct bnx2x *bp);
 385bool bnx2x_clear_pf_load(struct bnx2x *bp);
 386bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
 387bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
 388void bnx2x_set_reset_in_progress(struct bnx2x *bp);
 389void bnx2x_set_reset_global(struct bnx2x *bp);
 390void bnx2x_disable_close_the_gate(struct bnx2x *bp);
 391
 392/**
 393 * bnx2x_sp_event - handle ramrods completion.
 394 *
 395 * @fp:		fastpath handle for the event
 396 * @rr_cqe:	eth_rx_cqe
 397 */
 398void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
 399
 400/**
 401 * bnx2x_ilt_set_info - prepare ILT configurations.
 402 *
 403 * @bp:		driver handle
 404 */
 405void bnx2x_ilt_set_info(struct bnx2x *bp);
 406
 407/**
 408 * bnx2x_dcbx_init - initialize dcbx protocol.
 409 *
 410 * @bp:		driver handle
 411 */
 412void bnx2x_dcbx_init(struct bnx2x *bp);
 413
 414/**
 415 * bnx2x_set_power_state - set power state to the requested value.
 416 *
 417 * @bp:		driver handle
 418 * @state:	required state D0 or D3hot
 419 *
 420 * Currently only D0 and D3hot are supported.
 421 */
 422int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
 423
 424/**
 425 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
 426 *
 427 * @bp:		driver handle
 428 * @value:	new value
 429 */
 430void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
 431/* Error handling */
 432void bnx2x_panic_dump(struct bnx2x *bp);
 433
 434void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
 435
 436/* validate currect fw is loaded */
 437bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
 438
 439/* dev_close main block */
 440int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
 441
 442/* dev_open main block */
 443int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
 444
 445/* hard_xmit callback */
 446netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
 447
 448/* setup_tc callback */
 449int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
 450
 451/* select_queue callback */
 452u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
 453
 454/* reload helper */
 455int bnx2x_reload_if_running(struct net_device *dev);
 456
 457int bnx2x_change_mac_addr(struct net_device *dev, void *p);
 458
 459/* NAPI poll Rx part */
 460int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
 461
 462void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 463			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
 464
 465/* NAPI poll Tx part */
 466int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
 467
 468/* suspend/resume callbacks */
 469int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
 470int bnx2x_resume(struct pci_dev *pdev);
 471
 472/* Release IRQ vectors */
 473void bnx2x_free_irq(struct bnx2x *bp);
 474
 475void bnx2x_free_fp_mem(struct bnx2x *bp);
 476int bnx2x_alloc_fp_mem(struct bnx2x *bp);
 477void bnx2x_init_rx_rings(struct bnx2x *bp);
 478void bnx2x_free_skbs(struct bnx2x *bp);
 479void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
 480void bnx2x_netif_start(struct bnx2x *bp);
 481
 482/**
 483 * bnx2x_enable_msix - set msix configuration.
 484 *
 485 * @bp:		driver handle
 486 *
 487 * fills msix_table, requests vectors, updates num_queues
 488 * according to number of available vectors.
 489 */
 490int __devinit bnx2x_enable_msix(struct bnx2x *bp);
 491
 492/**
 493 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
 494 *
 495 * @bp:		driver handle
 496 */
 497int bnx2x_enable_msi(struct bnx2x *bp);
 498
 499/**
 500 * bnx2x_poll - NAPI callback
 501 *
 502 * @napi:	napi structure
 503 * @budget:
 504 *
 505 */
 506int bnx2x_poll(struct napi_struct *napi, int budget);
 507
 508/**
 509 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
 510 *
 511 * @bp:		driver handle
 512 */
 513int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
 514
 515/**
 516 * bnx2x_free_mem_bp - release memories outsize main driver structure
 517 *
 518 * @bp:		driver handle
 519 */
 520void bnx2x_free_mem_bp(struct bnx2x *bp);
 521
 522/**
 523 * bnx2x_change_mtu - change mtu netdev callback
 524 *
 525 * @dev:	net device
 526 * @new_mtu:	requested mtu
 527 *
 528 */
 529int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
 530
 531#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
 532/**
 533 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
 534 *
 535 * @dev:	net_device
 536 * @wwn:	output buffer
 537 * @type:	WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
 538 *
 539 */
 540int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
 541#endif
 542
 543netdev_features_t bnx2x_fix_features(struct net_device *dev,
 544				     netdev_features_t features);
 545int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
 546
 547/**
 548 * bnx2x_tx_timeout - tx timeout netdev callback
 549 *
 550 * @dev:	net device
 551 */
 552void bnx2x_tx_timeout(struct net_device *dev);
 553
 554/*********************** Inlines **********************************/
 555/*********************** Fast path ********************************/
 556static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
 557{
 558	barrier(); /* status block is written to by the chip */
 559	fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
 560}
 561
 562static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
 563			struct bnx2x_fastpath *fp, u16 bd_prod,
 564			u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
 565{
 566	struct ustorm_eth_rx_producers rx_prods = {0};
 567	u32 i;
 568
 569	/* Update producers */
 570	rx_prods.bd_prod = bd_prod;
 571	rx_prods.cqe_prod = rx_comp_prod;
 572	rx_prods.sge_prod = rx_sge_prod;
 573
 574	/*
 575	 * Make sure that the BD and SGE data is updated before updating the
 576	 * producers since FW might read the BD/SGE right after the producer
 577	 * is updated.
 578	 * This is only applicable for weak-ordered memory model archs such
 579	 * as IA-64. The following barrier is also mandatory since FW will
 580	 * assumes BDs must have buffers.
 581	 */
 582	wmb();
 583
 584	for (i = 0; i < sizeof(rx_prods)/4; i++)
 585		REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
 586
 587	mmiowb(); /* keep prod updates ordered */
 588
 589	DP(NETIF_MSG_RX_STATUS,
 590	   "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
 591	   fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
 592}
 593
 594static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
 595					u8 segment, u16 index, u8 op,
 596					u8 update, u32 igu_addr)
 597{
 598	struct igu_regular cmd_data = {0};
 599
 600	cmd_data.sb_id_and_flags =
 601			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
 602			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
 603			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
 604			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
 605
 606	DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
 607	   cmd_data.sb_id_and_flags, igu_addr);
 608	REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
 609
 610	/* Make sure that ACK is written */
 611	mmiowb();
 612	barrier();
 613}
 614
 615static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
 616				   u8 storm, u16 index, u8 op, u8 update)
 617{
 618	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
 619		       COMMAND_REG_INT_ACK);
 620	struct igu_ack_register igu_ack;
 621
 622	igu_ack.status_block_index = index;
 623	igu_ack.sb_id_and_flags =
 624			((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
 625			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
 626			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
 627			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
 628
 629	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
 630
 631	/* Make sure that ACK is written */
 632	mmiowb();
 633	barrier();
 634}
 635
 636static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
 637				u16 index, u8 op, u8 update)
 638{
 639	if (bp->common.int_block == INT_BLOCK_HC)
 640		bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
 641	else {
 642		u8 segment;
 643
 644		if (CHIP_INT_MODE_IS_BC(bp))
 645			segment = storm;
 646		else if (igu_sb_id != bp->igu_dsb_id)
 647			segment = IGU_SEG_ACCESS_DEF;
 648		else if (storm == ATTENTION_ID)
 649			segment = IGU_SEG_ACCESS_ATTN;
 650		else
 651			segment = IGU_SEG_ACCESS_DEF;
 652		bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
 653	}
 654}
 655
 656static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
 657{
 658	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
 659		       COMMAND_REG_SIMD_MASK);
 660	u32 result = REG_RD(bp, hc_addr);
 661
 662	barrier();
 663	return result;
 664}
 665
 666static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
 667{
 668	u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
 669	u32 result = REG_RD(bp, igu_addr);
 670
 671	DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
 672	   result, igu_addr);
 673
 674	barrier();
 675	return result;
 676}
 677
 678static inline u16 bnx2x_ack_int(struct bnx2x *bp)
 679{
 680	barrier();
 681	if (bp->common.int_block == INT_BLOCK_HC)
 682		return bnx2x_hc_ack_int(bp);
 683	else
 684		return bnx2x_igu_ack_int(bp);
 685}
 686
 687static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
 688{
 689	/* Tell compiler that consumer and producer can change */
 690	barrier();
 691	return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
 692}
 693
 694static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
 695				 struct bnx2x_fp_txdata *txdata)
 696{
 697	s16 used;
 698	u16 prod;
 699	u16 cons;
 700
 701	prod = txdata->tx_bd_prod;
 702	cons = txdata->tx_bd_cons;
 703
 704	/* NUM_TX_RINGS = number of "next-page" entries
 705	   It will be used as a threshold */
 706	used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
 707
 708#ifdef BNX2X_STOP_ON_ERROR
 709	WARN_ON(used < 0);
 710	WARN_ON(used > bp->tx_ring_size);
 711	WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
 712#endif
 713
 714	return (s16)(bp->tx_ring_size) - used;
 715}
 716
 717static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
 718{
 719	u16 hw_cons;
 720
 721	/* Tell compiler that status block fields can change */
 722	barrier();
 723	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 724	return hw_cons != txdata->tx_pkt_cons;
 725}
 726
 727static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
 728{
 729	u8 cos;
 730	for_each_cos_in_tx_queue(fp, cos)
 731		if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
 732			return true;
 733	return false;
 734}
 735
 736static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
 737{
 738	u16 rx_cons_sb;
 739
 740	/* Tell compiler that status block fields can change */
 741	barrier();
 742	rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
 743	if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
 744		rx_cons_sb++;
 745	return (fp->rx_comp_cons != rx_cons_sb);
 746}
 747
 748/**
 749 * bnx2x_tx_disable - disables tx from stack point of view
 750 *
 751 * @bp:		driver handle
 752 */
 753static inline void bnx2x_tx_disable(struct bnx2x *bp)
 754{
 755	netif_tx_disable(bp->dev);
 756	netif_carrier_off(bp->dev);
 757}
 758
 759static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
 760				     struct bnx2x_fastpath *fp, u16 index)
 761{
 762	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 763	struct page *page = sw_buf->page;
 764	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 765
 766	/* Skip "next page" elements */
 767	if (!page)
 768		return;
 769
 770	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
 771		       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
 772	__free_pages(page, PAGES_PER_SGE_SHIFT);
 773
 774	sw_buf->page = NULL;
 775	sge->addr_hi = 0;
 776	sge->addr_lo = 0;
 777}
 778
 779static inline void bnx2x_add_all_napi(struct bnx2x *bp)
 780{
 781	int i;
 782
 783	/* Add NAPI objects */
 784	for_each_rx_queue(bp, i)
 785		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 786			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
 787}
 788
 789static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 790{
 791	int i;
 792
 793	for_each_rx_queue(bp, i)
 794		netif_napi_del(&bnx2x_fp(bp, i, napi));
 795}
 796
 797static inline void bnx2x_disable_msi(struct bnx2x *bp)
 798{
 799	if (bp->flags & USING_MSIX_FLAG) {
 800		pci_disable_msix(bp->pdev);
 801		bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
 802	} else if (bp->flags & USING_MSI_FLAG) {
 803		pci_disable_msi(bp->pdev);
 804		bp->flags &= ~USING_MSI_FLAG;
 805	}
 806}
 807
 808static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
 809{
 810	return  num_queues ?
 811		 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
 812		 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
 813}
 814
 815static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
 816{
 817	int i, j;
 818
 819	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
 820		int idx = RX_SGE_CNT * i - 1;
 821
 822		for (j = 0; j < 2; j++) {
 823			BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
 824			idx--;
 825		}
 826	}
 827}
 828
 829static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
 830{
 831	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
 832	memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
 833
 834	/* Clear the two last indices in the page to 1:
 835	   these are the indices that correspond to the "next" element,
 836	   hence will never be indicated and should be removed from
 837	   the calculations. */
 838	bnx2x_clear_sge_mask_next_elems(fp);
 839}
 840
 841/* note that we are not allocating a new buffer,
 842 * we are just moving one from cons to prod
 843 * we are not creating a new mapping,
 844 * so there is no need to check for dma_mapping_error().
 845 */
 846static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
 847				      u16 cons, u16 prod)
 848{
 849	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 850	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 851	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
 852	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 853
 854	dma_unmap_addr_set(prod_rx_buf, mapping,
 855			   dma_unmap_addr(cons_rx_buf, mapping));
 856	prod_rx_buf->data = cons_rx_buf->data;
 857	*prod_bd = *cons_bd;
 858}
 859
 860/************************* Init ******************************************/
 861
 862/* returns func by VN for current port */
 863static inline int func_by_vn(struct bnx2x *bp, int vn)
 864{
 865	return 2 * vn + BP_PORT(bp);
 866}
 867
 868static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
 869				       bool config_hash)
 870{
 871	return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
 872				   config_hash);
 873}
 874
 875/**
 876 * bnx2x_func_start - init function
 877 *
 878 * @bp:		driver handle
 879 *
 880 * Must be called before sending CLIENT_SETUP for the first client.
 881 */
 882static inline int bnx2x_func_start(struct bnx2x *bp)
 883{
 884	struct bnx2x_func_state_params func_params = {NULL};
 885	struct bnx2x_func_start_params *start_params =
 886		&func_params.params.start;
 887
 888	/* Prepare parameters for function state transitions */
 889	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
 890
 891	func_params.f_obj = &bp->func_obj;
 892	func_params.cmd = BNX2X_F_CMD_START;
 893
 894	/* Function parameters */
 895	start_params->mf_mode = bp->mf_mode;
 896	start_params->sd_vlan_tag = bp->mf_ov;
 897
 898	if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
 899		start_params->network_cos_mode = STATIC_COS;
 900	else /* CHIP_IS_E1X */
 901		start_params->network_cos_mode = FW_WRR;
 902
 903	return bnx2x_func_state_change(bp, &func_params);
 904}
 905
 906
 907/**
 908 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
 909 *
 910 * @fw_hi:	pointer to upper part
 911 * @fw_mid:	pointer to middle part
 912 * @fw_lo:	pointer to lower part
 913 * @mac:	pointer to MAC address
 914 */
 915static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
 916					 u8 *mac)
 917{
 918	((u8 *)fw_hi)[0]  = mac[1];
 919	((u8 *)fw_hi)[1]  = mac[0];
 920	((u8 *)fw_mid)[0] = mac[3];
 921	((u8 *)fw_mid)[1] = mac[2];
 922	((u8 *)fw_lo)[0]  = mac[5];
 923	((u8 *)fw_lo)[1]  = mac[4];
 924}
 925
 926static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
 927					   struct bnx2x_fastpath *fp, int last)
 928{
 929	int i;
 930
 931	if (fp->disable_tpa)
 932		return;
 933
 934	for (i = 0; i < last; i++)
 935		bnx2x_free_rx_sge(bp, fp, i);
 936}
 937
 938static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
 939{
 940	int i;
 941
 942	for (i = 1; i <= NUM_RX_RINGS; i++) {
 943		struct eth_rx_bd *rx_bd;
 944
 945		rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
 946		rx_bd->addr_hi =
 947			cpu_to_le32(U64_HI(fp->rx_desc_mapping +
 948				    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
 949		rx_bd->addr_lo =
 950			cpu_to_le32(U64_LO(fp->rx_desc_mapping +
 951				    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
 952	}
 953}
 954
 955/* Statistics ID are global per chip/path, while Client IDs for E1x are per
 956 * port.
 957 */
 958static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
 959{
 960	struct bnx2x *bp = fp->bp;
 961	if (!CHIP_IS_E1x(bp)) {
 962#ifdef BCM_CNIC
 963		/* there are special statistics counters for FCoE 136..140 */
 964		if (IS_FCOE_FP(fp))
 965			return bp->cnic_base_cl_id + (bp->pf_num >> 1);
 966#endif
 967		return fp->cl_id;
 968	}
 969	return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
 970}
 971
 972static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
 973					       bnx2x_obj_type obj_type)
 974{
 975	struct bnx2x *bp = fp->bp;
 976
 977	/* Configure classification DBs */
 978	bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
 979			   BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
 980			   bnx2x_sp_mapping(bp, mac_rdata),
 981			   BNX2X_FILTER_MAC_PENDING,
 982			   &bp->sp_state, obj_type,
 983			   &bp->macs_pool);
 984}
 985
 986/**
 987 * bnx2x_get_path_func_num - get number of active functions
 988 *
 989 * @bp:		driver handle
 990 *
 991 * Calculates the number of active (not hidden) functions on the
 992 * current path.
 993 */
 994static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
 995{
 996	u8 func_num = 0, i;
 997
 998	/* 57710 has only one function per-port */
 999	if (CHIP_IS_E1(bp))
1000		return 1;
1001
1002	/* Calculate a number of functions enabled on the current
1003	 * PATH/PORT.
1004	 */
1005	if (CHIP_REV_IS_SLOW(bp)) {
1006		if (IS_MF(bp))
1007			func_num = 4;
1008		else
1009			func_num = 2;
1010	} else {
1011		for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
1012			u32 func_config =
1013				MF_CFG_RD(bp,
1014					  func_mf_config[BP_PORT(bp) + 2 * i].
1015					  config);
1016			func_num +=
1017				((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
1018		}
1019	}
1020
1021	WARN_ON(!func_num);
1022
1023	return func_num;
1024}
1025
1026static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
1027{
1028	/* RX_MODE controlling object */
1029	bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
1030
1031	/* multicast configuration controlling object */
1032	bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
1033			     BP_FUNC(bp), BP_FUNC(bp),
1034			     bnx2x_sp(bp, mcast_rdata),
1035			     bnx2x_sp_mapping(bp, mcast_rdata),
1036			     BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
1037			     BNX2X_OBJ_TYPE_RX);
1038
1039	/* Setup CAM credit pools */
1040	bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
1041				   bnx2x_get_path_func_num(bp));
1042
1043	/* RSS configuration object */
1044	bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
1045				  bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
1046				  bnx2x_sp(bp, rss_rdata),
1047				  bnx2x_sp_mapping(bp, rss_rdata),
1048				  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
1049				  BNX2X_OBJ_TYPE_RX);
1050}
1051
1052static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
1053{
1054	if (CHIP_IS_E1x(fp->bp))
1055		return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
1056	else
1057		return fp->cl_id;
1058}
1059
1060static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
1061{
1062	struct bnx2x *bp = fp->bp;
1063
1064	if (!CHIP_IS_E1x(bp))
1065		return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
1066	else
1067		return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
1068}
1069
1070static inline void bnx2x_init_txdata(struct bnx2x *bp,
1071	struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
1072	__le16 *tx_cons_sb)
1073{
1074	txdata->cid = cid;
1075	txdata->txq_index = txq_index;
1076	txdata->tx_cons_sb = tx_cons_sb;
1077
1078	DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
1079	   txdata->cid, txdata->txq_index);
1080}
1081
1082#ifdef BCM_CNIC
1083static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1084{
1085	return bp->cnic_base_cl_id + cl_idx +
1086		(bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
1087}
1088
1089static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
1090{
1091
1092	/* the 'first' id is allocated for the cnic */
1093	return bp->base_fw_ndsb;
1094}
1095
1096static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
1097{
1098	return bp->igu_base_sb;
1099}
1100
1101
1102static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1103{
1104	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
1105	unsigned long q_type = 0;
1106
1107	bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
1108	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
1109						     BNX2X_FCOE_ETH_CL_ID_IDX);
1110	/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
1111	 *  16 ETH clients per function when CNIC is enabled!
1112	 *
1113	 *  Fix it ASAP!!!
1114	 */
1115	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1116	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1117	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1118	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1119
1120	bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
1121			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
1122
1123	DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
1124
1125	/* qZone id equals to FW (per path) client id */
1126	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
1127	/* init shortcut */
1128	bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
1129		bnx2x_rx_ustorm_prods_offset(fp);
1130
1131	/* Configure Queue State object */
1132	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1133	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1134
1135	/* No multi-CoS for FCoE L2 client */
1136	BUG_ON(fp->max_cos != 1);
1137
1138	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
1139			     BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
1140			     bnx2x_sp_mapping(bp, q_rdata), q_type);
1141
1142	DP(NETIF_MSG_IFUP,
1143	   "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
1144	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
1145	   fp->igu_sb_id);
1146}
1147#endif
1148
1149static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1150				       struct bnx2x_fp_txdata *txdata)
1151{
1152	int cnt = 1000;
1153
1154	while (bnx2x_has_tx_work_unload(txdata)) {
1155		if (!cnt) {
1156			BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1157				  txdata->txq_index, txdata->tx_pkt_prod,
1158				  txdata->tx_pkt_cons);
1159#ifdef BNX2X_STOP_ON_ERROR
1160			bnx2x_panic();
1161			return -EBUSY;
1162#else
1163			break;
1164#endif
1165		}
1166		cnt--;
1167		usleep_range(1000, 1000);
1168	}
1169
1170	return 0;
1171}
1172
1173int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1174
1175static inline void __storm_memset_struct(struct bnx2x *bp,
1176					 u32 addr, size_t size, u32 *data)
1177{
1178	int i;
1179	for (i = 0; i < size/4; i++)
1180		REG_WR(bp, addr + (i * 4), data[i]);
1181}
1182
1183/**
1184 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
1185 *
1186 * @bp:		driver handle
1187 * @mask:	bits that need to be cleared
1188 */
1189static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
1190{
1191	int tout = 5000; /* Wait for 5 secs tops */
1192
1193	while (tout--) {
1194		smp_mb();
1195		netif_addr_lock_bh(bp->dev);
1196		if (!(bp->sp_state & mask)) {
1197			netif_addr_unlock_bh(bp->dev);
1198			return true;
1199		}
1200		netif_addr_unlock_bh(bp->dev);
1201
1202		usleep_range(1000, 1000);
1203	}
1204
1205	smp_mb();
1206
1207	netif_addr_lock_bh(bp->dev);
1208	if (bp->sp_state & mask) {
1209		BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
1210			  bp->sp_state, mask);
1211		netif_addr_unlock_bh(bp->dev);
1212		return false;
1213	}
1214	netif_addr_unlock_bh(bp->dev);
1215
1216	return true;
1217}
1218
1219/**
1220 * bnx2x_set_ctx_validation - set CDU context validation values
1221 *
1222 * @bp:		driver handle
1223 * @cxt:	context of the connection on the host memory
1224 * @cid:	SW CID of the connection to be configured
1225 */
1226void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
1227			      u32 cid);
1228
1229void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
1230				    u8 sb_index, u8 disable, u16 usec);
1231void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1232void bnx2x_release_phy_lock(struct bnx2x *bp);
1233
1234/**
1235 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
1236 *
1237 * @bp:		driver handle
1238 * @mf_cfg:	MF configuration
1239 *
1240 */
1241static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1242{
1243	u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1244			      FUNC_MF_CFG_MAX_BW_SHIFT;
1245	if (!max_cfg) {
1246		DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL,
1247		   "Max BW configured to 0 - using 100 instead\n");
1248		max_cfg = 100;
1249	}
1250	return max_cfg;
1251}
1252
1253/* checks if HW supports GRO for given MTU */
1254static inline bool bnx2x_mtu_allows_gro(int mtu)
1255{
1256	/* gro frags per page */
1257	int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
1258
1259	/*
1260	 * 1. number of frags should not grow above MAX_SKB_FRAGS
1261	 * 2. frag must fit the page
1262	 */
1263	return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1264}
1265#ifdef BCM_CNIC
1266/**
1267 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
1268 *
1269 * @bp:		driver handle
1270 *
1271 */
1272void bnx2x_get_iscsi_info(struct bnx2x *bp);
1273#endif
1274
1275/**
1276 * bnx2x_link_sync_notify - send notification to other functions.
1277 *
1278 * @bp:		driver handle
1279 *
1280 */
1281static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1282{
1283	int func;
1284	int vn;
1285
1286	/* Set the attention towards other drivers on the same port */
1287	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
1288		if (vn == BP_VN(bp))
1289			continue;
1290
1291		func = func_by_vn(bp, vn);
1292		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1293		       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1294	}
1295}
1296
1297/**
1298 * bnx2x_update_drv_flags - update flags in shmem
1299 *
1300 * @bp:		driver handle
1301 * @flags:	flags to update
1302 * @set:	set or clear
1303 *
1304 */
1305static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
1306{
1307	if (SHMEM2_HAS(bp, drv_flags)) {
1308		u32 drv_flags;
1309		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
1310		drv_flags = SHMEM2_RD(bp, drv_flags);
1311
1312		if (set)
1313			SET_FLAGS(drv_flags, flags);
1314		else
1315			RESET_FLAGS(drv_flags, flags);
1316
1317		SHMEM2_WR(bp, drv_flags, drv_flags);
1318		DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags);
1319		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
1320	}
1321}
1322
1323static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1324{
1325	if (is_valid_ether_addr(addr))
1326		return true;
1327#ifdef BCM_CNIC
1328	if (is_zero_ether_addr(addr) &&
1329	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
1330		return true;
1331#endif
1332	return false;
1333}
1334
1335#endif /* BNX2X_CMN_H */