Loading...
1/* bnx2.c: QLogic bnx2 network driver.
2 *
3 * Copyright (c) 2004-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Michael Chan (mchan@broadcom.com)
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17
18#include <linux/stringify.h>
19#include <linux/kernel.h>
20#include <linux/timer.h>
21#include <linux/errno.h>
22#include <linux/ioport.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/dma-mapping.h>
31#include <linux/bitops.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <linux/delay.h>
35#include <asm/byteorder.h>
36#include <asm/page.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/if.h>
41#include <linux/if_vlan.h>
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/firmware.h>
50#include <linux/log2.h>
51#include <linux/aer.h>
52#include <linux/crash_dump.h>
53
54#if IS_ENABLED(CONFIG_CNIC)
55#define BCM_CNIC 1
56#include "cnic_if.h"
57#endif
58#include "bnx2.h"
59#include "bnx2_fw.h"
60
61#define DRV_MODULE_NAME "bnx2"
62#define DRV_MODULE_VERSION "2.2.6"
63#define DRV_MODULE_RELDATE "January 29, 2014"
64#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
65#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
66#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
67#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
68#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
69
70#define RUN_AT(x) (jiffies + (x))
71
72/* Time in jiffies before concluding the transmitter is hung. */
73#define TX_TIMEOUT (5*HZ)
74
75static char version[] =
76 "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80MODULE_LICENSE("GPL");
81MODULE_VERSION(DRV_MODULE_VERSION);
82MODULE_FIRMWARE(FW_MIPS_FILE_06);
83MODULE_FIRMWARE(FW_RV2P_FILE_06);
84MODULE_FIRMWARE(FW_MIPS_FILE_09);
85MODULE_FIRMWARE(FW_RV2P_FILE_09);
86MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88static int disable_msi = 0;
89
90module_param(disable_msi, int, 0444);
91MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93typedef enum {
94 BCM5706 = 0,
95 NC370T,
96 NC370I,
97 BCM5706S,
98 NC370F,
99 BCM5708,
100 BCM5708S,
101 BCM5709,
102 BCM5709S,
103 BCM5716,
104 BCM5716S,
105} board_t;
106
107/* indexed by board_t, above */
108static struct {
109 char *name;
110} board_info[] = {
111 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 { "HP NC370T Multifunction Gigabit Server Adapter" },
113 { "HP NC370i Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 { "HP NC370F Multifunction Gigabit Server Adapter" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 };
123
124static const struct pci_device_id bnx2_pci_tbl[] = {
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 { PCI_VENDOR_ID_BROADCOM, 0x163b,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 { PCI_VENDOR_ID_BROADCOM, 0x163c,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 { 0, }
148};
149
150static const struct flash_spec flash_table[] =
151{
152#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 /* Slow EEPROM */
155 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 "EEPROM - slow"},
159 /* Expansion entry 0001 */
160 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 "Entry 0001"},
164 /* Saifun SA25F010 (non-buffered flash) */
165 /* strap, cfg1, & write1 need updates */
166 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 "Non-buffered flash (128kB)"},
170 /* Saifun SA25F020 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 "Non-buffered flash (256kB)"},
176 /* Expansion entry 0100 */
177 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 0100"},
181 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 /* Saifun SA25F005 (non-buffered flash) */
192 /* strap, cfg1, & write1 need updates */
193 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 "Non-buffered flash (64kB)"},
197 /* Fast EEPROM */
198 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 "EEPROM - fast"},
202 /* Expansion entry 1001 */
203 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1001"},
207 /* Expansion entry 1010 */
208 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 "Entry 1010"},
212 /* ATMEL AT45DB011B (buffered flash) */
213 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 "Buffered flash (128kB)"},
217 /* Expansion entry 1100 */
218 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 "Entry 1100"},
222 /* Expansion entry 1101 */
223 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 "Entry 1101"},
227 /* Ateml Expansion entry 1110 */
228 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 "Entry 1110 (Atmel)"},
232 /* ATMEL AT45DB021B (buffered flash) */
233 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 "Buffered flash (256kB)"},
237};
238
239static const struct flash_spec flash_5709 = {
240 .flags = BNX2_NV_BUFFERED,
241 .page_bits = BCM5709_FLASH_PAGE_BITS,
242 .page_size = BCM5709_FLASH_PAGE_SIZE,
243 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
244 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
245 .name = "5709 Buffered flash (256kB)",
246};
247
248MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250static void bnx2_init_napi(struct bnx2 *bp);
251static void bnx2_del_napi(struct bnx2 *bp);
252
253static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
254{
255 u32 diff;
256
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
259 */
260 diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
261 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == BNX2_TX_DESC_CNT)
264 diff = BNX2_MAX_TX_DESC_CNT;
265 }
266 return bp->tx_ring_size - diff;
267}
268
269static u32
270bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271{
272 unsigned long flags;
273 u32 val;
274
275 spin_lock_irqsave(&bp->indirect_lock, flags);
276 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 spin_unlock_irqrestore(&bp->indirect_lock, flags);
279 return val;
280}
281
282static void
283bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284{
285 unsigned long flags;
286
287 spin_lock_irqsave(&bp->indirect_lock, flags);
288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
289 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
290 spin_unlock_irqrestore(&bp->indirect_lock, flags);
291}
292
293static void
294bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295{
296 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
297}
298
299static u32
300bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301{
302 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
303}
304
305static void
306bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307{
308 unsigned long flags;
309
310 offset += cid_addr;
311 spin_lock_irqsave(&bp->indirect_lock, flags);
312 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
313 int i;
314
315 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
316 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
317 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
318 for (i = 0; i < 5; i++) {
319 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
320 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
321 break;
322 udelay(5);
323 }
324 } else {
325 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
326 BNX2_WR(bp, BNX2_CTX_DATA, val);
327 }
328 spin_unlock_irqrestore(&bp->indirect_lock, flags);
329}
330
331#ifdef BCM_CNIC
332static int
333bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
334{
335 struct bnx2 *bp = netdev_priv(dev);
336 struct drv_ctl_io *io = &info->data.io;
337
338 switch (info->cmd) {
339 case DRV_CTL_IO_WR_CMD:
340 bnx2_reg_wr_ind(bp, io->offset, io->data);
341 break;
342 case DRV_CTL_IO_RD_CMD:
343 io->data = bnx2_reg_rd_ind(bp, io->offset);
344 break;
345 case DRV_CTL_CTX_WR_CMD:
346 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
347 break;
348 default:
349 return -EINVAL;
350 }
351 return 0;
352}
353
354static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
355{
356 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
357 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
358 int sb_id;
359
360 if (bp->flags & BNX2_FLAG_USING_MSIX) {
361 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_present = 0;
363 sb_id = bp->irq_nvecs;
364 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
365 } else {
366 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
367 bnapi->cnic_tag = bnapi->last_status_idx;
368 bnapi->cnic_present = 1;
369 sb_id = 0;
370 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
371 }
372
373 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
374 cp->irq_arr[0].status_blk = (void *)
375 ((unsigned long) bnapi->status_blk.msi +
376 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
377 cp->irq_arr[0].status_blk_num = sb_id;
378 cp->num_irq = 1;
379}
380
381static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
382 void *data)
383{
384 struct bnx2 *bp = netdev_priv(dev);
385 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
386
387 if (ops == NULL)
388 return -EINVAL;
389
390 if (cp->drv_state & CNIC_DRV_STATE_REGD)
391 return -EBUSY;
392
393 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
394 return -ENODEV;
395
396 bp->cnic_data = data;
397 rcu_assign_pointer(bp->cnic_ops, ops);
398
399 cp->num_irq = 0;
400 cp->drv_state = CNIC_DRV_STATE_REGD;
401
402 bnx2_setup_cnic_irq_info(bp);
403
404 return 0;
405}
406
407static int bnx2_unregister_cnic(struct net_device *dev)
408{
409 struct bnx2 *bp = netdev_priv(dev);
410 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
411 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
412
413 mutex_lock(&bp->cnic_lock);
414 cp->drv_state = 0;
415 bnapi->cnic_present = 0;
416 RCU_INIT_POINTER(bp->cnic_ops, NULL);
417 mutex_unlock(&bp->cnic_lock);
418 synchronize_rcu();
419 return 0;
420}
421
422static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
423{
424 struct bnx2 *bp = netdev_priv(dev);
425 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
426
427 if (!cp->max_iscsi_conn)
428 return NULL;
429
430 cp->drv_owner = THIS_MODULE;
431 cp->chip_id = bp->chip_id;
432 cp->pdev = bp->pdev;
433 cp->io_base = bp->regview;
434 cp->drv_ctl = bnx2_drv_ctl;
435 cp->drv_register_cnic = bnx2_register_cnic;
436 cp->drv_unregister_cnic = bnx2_unregister_cnic;
437
438 return cp;
439}
440
441static void
442bnx2_cnic_stop(struct bnx2 *bp)
443{
444 struct cnic_ops *c_ops;
445 struct cnic_ctl_info info;
446
447 mutex_lock(&bp->cnic_lock);
448 c_ops = rcu_dereference_protected(bp->cnic_ops,
449 lockdep_is_held(&bp->cnic_lock));
450 if (c_ops) {
451 info.cmd = CNIC_CTL_STOP_CMD;
452 c_ops->cnic_ctl(bp->cnic_data, &info);
453 }
454 mutex_unlock(&bp->cnic_lock);
455}
456
457static void
458bnx2_cnic_start(struct bnx2 *bp)
459{
460 struct cnic_ops *c_ops;
461 struct cnic_ctl_info info;
462
463 mutex_lock(&bp->cnic_lock);
464 c_ops = rcu_dereference_protected(bp->cnic_ops,
465 lockdep_is_held(&bp->cnic_lock));
466 if (c_ops) {
467 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
468 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
469
470 bnapi->cnic_tag = bnapi->last_status_idx;
471 }
472 info.cmd = CNIC_CTL_START_CMD;
473 c_ops->cnic_ctl(bp->cnic_data, &info);
474 }
475 mutex_unlock(&bp->cnic_lock);
476}
477
478#else
479
480static void
481bnx2_cnic_stop(struct bnx2 *bp)
482{
483}
484
485static void
486bnx2_cnic_start(struct bnx2 *bp)
487{
488}
489
490#endif
491
492static int
493bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
494{
495 u32 val1;
496 int i, ret;
497
498 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
499 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
500 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
501
502 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
503 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
504
505 udelay(40);
506 }
507
508 val1 = (bp->phy_addr << 21) | (reg << 16) |
509 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
510 BNX2_EMAC_MDIO_COMM_START_BUSY;
511 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
512
513 for (i = 0; i < 50; i++) {
514 udelay(10);
515
516 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
518 udelay(5);
519
520 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
521 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
522
523 break;
524 }
525 }
526
527 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
528 *val = 0x0;
529 ret = -EBUSY;
530 }
531 else {
532 *val = val1;
533 ret = 0;
534 }
535
536 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
537 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
538 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
539
540 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
541 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
542
543 udelay(40);
544 }
545
546 return ret;
547}
548
549static int
550bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
551{
552 u32 val1;
553 int i, ret;
554
555 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
556 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
557 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
558
559 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
560 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
561
562 udelay(40);
563 }
564
565 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
566 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
567 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
568 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
569
570 for (i = 0; i < 50; i++) {
571 udelay(10);
572
573 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
574 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
575 udelay(5);
576 break;
577 }
578 }
579
580 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
581 ret = -EBUSY;
582 else
583 ret = 0;
584
585 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
586 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
587 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
588
589 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
590 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
591
592 udelay(40);
593 }
594
595 return ret;
596}
597
598static void
599bnx2_disable_int(struct bnx2 *bp)
600{
601 int i;
602 struct bnx2_napi *bnapi;
603
604 for (i = 0; i < bp->irq_nvecs; i++) {
605 bnapi = &bp->bnx2_napi[i];
606 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
607 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
608 }
609 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
610}
611
612static void
613bnx2_enable_int(struct bnx2 *bp)
614{
615 int i;
616 struct bnx2_napi *bnapi;
617
618 for (i = 0; i < bp->irq_nvecs; i++) {
619 bnapi = &bp->bnx2_napi[i];
620
621 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
622 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
623 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
624 bnapi->last_status_idx);
625
626 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
627 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
628 bnapi->last_status_idx);
629 }
630 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
631}
632
633static void
634bnx2_disable_int_sync(struct bnx2 *bp)
635{
636 int i;
637
638 atomic_inc(&bp->intr_sem);
639 if (!netif_running(bp->dev))
640 return;
641
642 bnx2_disable_int(bp);
643 for (i = 0; i < bp->irq_nvecs; i++)
644 synchronize_irq(bp->irq_tbl[i].vector);
645}
646
647static void
648bnx2_napi_disable(struct bnx2 *bp)
649{
650 int i;
651
652 for (i = 0; i < bp->irq_nvecs; i++)
653 napi_disable(&bp->bnx2_napi[i].napi);
654}
655
656static void
657bnx2_napi_enable(struct bnx2 *bp)
658{
659 int i;
660
661 for (i = 0; i < bp->irq_nvecs; i++)
662 napi_enable(&bp->bnx2_napi[i].napi);
663}
664
665static void
666bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
667{
668 if (stop_cnic)
669 bnx2_cnic_stop(bp);
670 if (netif_running(bp->dev)) {
671 bnx2_napi_disable(bp);
672 netif_tx_disable(bp->dev);
673 }
674 bnx2_disable_int_sync(bp);
675 netif_carrier_off(bp->dev); /* prevent tx timeout */
676}
677
678static void
679bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
680{
681 if (atomic_dec_and_test(&bp->intr_sem)) {
682 if (netif_running(bp->dev)) {
683 netif_tx_wake_all_queues(bp->dev);
684 spin_lock_bh(&bp->phy_lock);
685 if (bp->link_up)
686 netif_carrier_on(bp->dev);
687 spin_unlock_bh(&bp->phy_lock);
688 bnx2_napi_enable(bp);
689 bnx2_enable_int(bp);
690 if (start_cnic)
691 bnx2_cnic_start(bp);
692 }
693 }
694}
695
696static void
697bnx2_free_tx_mem(struct bnx2 *bp)
698{
699 int i;
700
701 for (i = 0; i < bp->num_tx_rings; i++) {
702 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
703 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
704
705 if (txr->tx_desc_ring) {
706 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
707 txr->tx_desc_ring,
708 txr->tx_desc_mapping);
709 txr->tx_desc_ring = NULL;
710 }
711 kfree(txr->tx_buf_ring);
712 txr->tx_buf_ring = NULL;
713 }
714}
715
716static void
717bnx2_free_rx_mem(struct bnx2 *bp)
718{
719 int i;
720
721 for (i = 0; i < bp->num_rx_rings; i++) {
722 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
723 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
724 int j;
725
726 for (j = 0; j < bp->rx_max_ring; j++) {
727 if (rxr->rx_desc_ring[j])
728 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729 rxr->rx_desc_ring[j],
730 rxr->rx_desc_mapping[j]);
731 rxr->rx_desc_ring[j] = NULL;
732 }
733 vfree(rxr->rx_buf_ring);
734 rxr->rx_buf_ring = NULL;
735
736 for (j = 0; j < bp->rx_max_pg_ring; j++) {
737 if (rxr->rx_pg_desc_ring[j])
738 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
739 rxr->rx_pg_desc_ring[j],
740 rxr->rx_pg_desc_mapping[j]);
741 rxr->rx_pg_desc_ring[j] = NULL;
742 }
743 vfree(rxr->rx_pg_ring);
744 rxr->rx_pg_ring = NULL;
745 }
746}
747
748static int
749bnx2_alloc_tx_mem(struct bnx2 *bp)
750{
751 int i;
752
753 for (i = 0; i < bp->num_tx_rings; i++) {
754 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
755 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
756
757 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
758 if (txr->tx_buf_ring == NULL)
759 return -ENOMEM;
760
761 txr->tx_desc_ring =
762 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
763 &txr->tx_desc_mapping, GFP_KERNEL);
764 if (txr->tx_desc_ring == NULL)
765 return -ENOMEM;
766 }
767 return 0;
768}
769
770static int
771bnx2_alloc_rx_mem(struct bnx2 *bp)
772{
773 int i;
774
775 for (i = 0; i < bp->num_rx_rings; i++) {
776 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
777 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
778 int j;
779
780 rxr->rx_buf_ring =
781 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
782 if (rxr->rx_buf_ring == NULL)
783 return -ENOMEM;
784
785 for (j = 0; j < bp->rx_max_ring; j++) {
786 rxr->rx_desc_ring[j] =
787 dma_alloc_coherent(&bp->pdev->dev,
788 RXBD_RING_SIZE,
789 &rxr->rx_desc_mapping[j],
790 GFP_KERNEL);
791 if (rxr->rx_desc_ring[j] == NULL)
792 return -ENOMEM;
793
794 }
795
796 if (bp->rx_pg_ring_size) {
797 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
798 bp->rx_max_pg_ring);
799 if (rxr->rx_pg_ring == NULL)
800 return -ENOMEM;
801
802 }
803
804 for (j = 0; j < bp->rx_max_pg_ring; j++) {
805 rxr->rx_pg_desc_ring[j] =
806 dma_alloc_coherent(&bp->pdev->dev,
807 RXBD_RING_SIZE,
808 &rxr->rx_pg_desc_mapping[j],
809 GFP_KERNEL);
810 if (rxr->rx_pg_desc_ring[j] == NULL)
811 return -ENOMEM;
812
813 }
814 }
815 return 0;
816}
817
818static void
819bnx2_free_stats_blk(struct net_device *dev)
820{
821 struct bnx2 *bp = netdev_priv(dev);
822
823 if (bp->status_blk) {
824 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
825 bp->status_blk,
826 bp->status_blk_mapping);
827 bp->status_blk = NULL;
828 bp->stats_blk = NULL;
829 }
830}
831
832static int
833bnx2_alloc_stats_blk(struct net_device *dev)
834{
835 int status_blk_size;
836 void *status_blk;
837 struct bnx2 *bp = netdev_priv(dev);
838
839 /* Combine status and statistics blocks into one allocation. */
840 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
841 if (bp->flags & BNX2_FLAG_MSIX_CAP)
842 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
843 BNX2_SBLK_MSIX_ALIGN_SIZE);
844 bp->status_stats_size = status_blk_size +
845 sizeof(struct statistics_block);
846 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
847 &bp->status_blk_mapping, GFP_KERNEL);
848 if (status_blk == NULL)
849 return -ENOMEM;
850
851 bp->status_blk = status_blk;
852 bp->stats_blk = status_blk + status_blk_size;
853 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
854
855 return 0;
856}
857
858static void
859bnx2_free_mem(struct bnx2 *bp)
860{
861 int i;
862 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
863
864 bnx2_free_tx_mem(bp);
865 bnx2_free_rx_mem(bp);
866
867 for (i = 0; i < bp->ctx_pages; i++) {
868 if (bp->ctx_blk[i]) {
869 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
870 bp->ctx_blk[i],
871 bp->ctx_blk_mapping[i]);
872 bp->ctx_blk[i] = NULL;
873 }
874 }
875
876 if (bnapi->status_blk.msi)
877 bnapi->status_blk.msi = NULL;
878}
879
880static int
881bnx2_alloc_mem(struct bnx2 *bp)
882{
883 int i, err;
884 struct bnx2_napi *bnapi;
885
886 bnapi = &bp->bnx2_napi[0];
887 bnapi->status_blk.msi = bp->status_blk;
888 bnapi->hw_tx_cons_ptr =
889 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
890 bnapi->hw_rx_cons_ptr =
891 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
892 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
893 for (i = 1; i < bp->irq_nvecs; i++) {
894 struct status_block_msix *sblk;
895
896 bnapi = &bp->bnx2_napi[i];
897
898 sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
899 bnapi->status_blk.msix = sblk;
900 bnapi->hw_tx_cons_ptr =
901 &sblk->status_tx_quick_consumer_index;
902 bnapi->hw_rx_cons_ptr =
903 &sblk->status_rx_quick_consumer_index;
904 bnapi->int_num = i << 24;
905 }
906 }
907
908 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
909 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
910 if (bp->ctx_pages == 0)
911 bp->ctx_pages = 1;
912 for (i = 0; i < bp->ctx_pages; i++) {
913 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
914 BNX2_PAGE_SIZE,
915 &bp->ctx_blk_mapping[i],
916 GFP_KERNEL);
917 if (bp->ctx_blk[i] == NULL)
918 goto alloc_mem_err;
919 }
920 }
921
922 err = bnx2_alloc_rx_mem(bp);
923 if (err)
924 goto alloc_mem_err;
925
926 err = bnx2_alloc_tx_mem(bp);
927 if (err)
928 goto alloc_mem_err;
929
930 return 0;
931
932alloc_mem_err:
933 bnx2_free_mem(bp);
934 return -ENOMEM;
935}
936
937static void
938bnx2_report_fw_link(struct bnx2 *bp)
939{
940 u32 fw_link_status = 0;
941
942 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
943 return;
944
945 if (bp->link_up) {
946 u32 bmsr;
947
948 switch (bp->line_speed) {
949 case SPEED_10:
950 if (bp->duplex == DUPLEX_HALF)
951 fw_link_status = BNX2_LINK_STATUS_10HALF;
952 else
953 fw_link_status = BNX2_LINK_STATUS_10FULL;
954 break;
955 case SPEED_100:
956 if (bp->duplex == DUPLEX_HALF)
957 fw_link_status = BNX2_LINK_STATUS_100HALF;
958 else
959 fw_link_status = BNX2_LINK_STATUS_100FULL;
960 break;
961 case SPEED_1000:
962 if (bp->duplex == DUPLEX_HALF)
963 fw_link_status = BNX2_LINK_STATUS_1000HALF;
964 else
965 fw_link_status = BNX2_LINK_STATUS_1000FULL;
966 break;
967 case SPEED_2500:
968 if (bp->duplex == DUPLEX_HALF)
969 fw_link_status = BNX2_LINK_STATUS_2500HALF;
970 else
971 fw_link_status = BNX2_LINK_STATUS_2500FULL;
972 break;
973 }
974
975 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
976
977 if (bp->autoneg) {
978 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
979
980 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
981 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
982
983 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
984 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
985 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
986 else
987 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
988 }
989 }
990 else
991 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
992
993 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
994}
995
996static char *
997bnx2_xceiver_str(struct bnx2 *bp)
998{
999 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
1000 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
1001 "Copper");
1002}
1003
1004static void
1005bnx2_report_link(struct bnx2 *bp)
1006{
1007 if (bp->link_up) {
1008 netif_carrier_on(bp->dev);
1009 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1010 bnx2_xceiver_str(bp),
1011 bp->line_speed,
1012 bp->duplex == DUPLEX_FULL ? "full" : "half");
1013
1014 if (bp->flow_ctrl) {
1015 if (bp->flow_ctrl & FLOW_CTRL_RX) {
1016 pr_cont(", receive ");
1017 if (bp->flow_ctrl & FLOW_CTRL_TX)
1018 pr_cont("& transmit ");
1019 }
1020 else {
1021 pr_cont(", transmit ");
1022 }
1023 pr_cont("flow control ON");
1024 }
1025 pr_cont("\n");
1026 } else {
1027 netif_carrier_off(bp->dev);
1028 netdev_err(bp->dev, "NIC %s Link is Down\n",
1029 bnx2_xceiver_str(bp));
1030 }
1031
1032 bnx2_report_fw_link(bp);
1033}
1034
1035static void
1036bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1037{
1038 u32 local_adv, remote_adv;
1039
1040 bp->flow_ctrl = 0;
1041 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1042 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1043
1044 if (bp->duplex == DUPLEX_FULL) {
1045 bp->flow_ctrl = bp->req_flow_ctrl;
1046 }
1047 return;
1048 }
1049
1050 if (bp->duplex != DUPLEX_FULL) {
1051 return;
1052 }
1053
1054 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1055 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1056 u32 val;
1057
1058 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1059 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1060 bp->flow_ctrl |= FLOW_CTRL_TX;
1061 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1062 bp->flow_ctrl |= FLOW_CTRL_RX;
1063 return;
1064 }
1065
1066 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1067 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1068
1069 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1070 u32 new_local_adv = 0;
1071 u32 new_remote_adv = 0;
1072
1073 if (local_adv & ADVERTISE_1000XPAUSE)
1074 new_local_adv |= ADVERTISE_PAUSE_CAP;
1075 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1076 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1077 if (remote_adv & ADVERTISE_1000XPAUSE)
1078 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1079 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1080 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1081
1082 local_adv = new_local_adv;
1083 remote_adv = new_remote_adv;
1084 }
1085
1086 /* See Table 28B-3 of 802.3ab-1999 spec. */
1087 if (local_adv & ADVERTISE_PAUSE_CAP) {
1088 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1089 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1090 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1091 }
1092 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1093 bp->flow_ctrl = FLOW_CTRL_RX;
1094 }
1095 }
1096 else {
1097 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1098 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1099 }
1100 }
1101 }
1102 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1103 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1104 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1105
1106 bp->flow_ctrl = FLOW_CTRL_TX;
1107 }
1108 }
1109}
1110
1111static int
1112bnx2_5709s_linkup(struct bnx2 *bp)
1113{
1114 u32 val, speed;
1115
1116 bp->link_up = 1;
1117
1118 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1119 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1120 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1121
1122 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1123 bp->line_speed = bp->req_line_speed;
1124 bp->duplex = bp->req_duplex;
1125 return 0;
1126 }
1127 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1128 switch (speed) {
1129 case MII_BNX2_GP_TOP_AN_SPEED_10:
1130 bp->line_speed = SPEED_10;
1131 break;
1132 case MII_BNX2_GP_TOP_AN_SPEED_100:
1133 bp->line_speed = SPEED_100;
1134 break;
1135 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1136 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1137 bp->line_speed = SPEED_1000;
1138 break;
1139 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1140 bp->line_speed = SPEED_2500;
1141 break;
1142 }
1143 if (val & MII_BNX2_GP_TOP_AN_FD)
1144 bp->duplex = DUPLEX_FULL;
1145 else
1146 bp->duplex = DUPLEX_HALF;
1147 return 0;
1148}
1149
1150static int
1151bnx2_5708s_linkup(struct bnx2 *bp)
1152{
1153 u32 val;
1154
1155 bp->link_up = 1;
1156 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1157 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1158 case BCM5708S_1000X_STAT1_SPEED_10:
1159 bp->line_speed = SPEED_10;
1160 break;
1161 case BCM5708S_1000X_STAT1_SPEED_100:
1162 bp->line_speed = SPEED_100;
1163 break;
1164 case BCM5708S_1000X_STAT1_SPEED_1G:
1165 bp->line_speed = SPEED_1000;
1166 break;
1167 case BCM5708S_1000X_STAT1_SPEED_2G5:
1168 bp->line_speed = SPEED_2500;
1169 break;
1170 }
1171 if (val & BCM5708S_1000X_STAT1_FD)
1172 bp->duplex = DUPLEX_FULL;
1173 else
1174 bp->duplex = DUPLEX_HALF;
1175
1176 return 0;
1177}
1178
1179static int
1180bnx2_5706s_linkup(struct bnx2 *bp)
1181{
1182 u32 bmcr, local_adv, remote_adv, common;
1183
1184 bp->link_up = 1;
1185 bp->line_speed = SPEED_1000;
1186
1187 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1188 if (bmcr & BMCR_FULLDPLX) {
1189 bp->duplex = DUPLEX_FULL;
1190 }
1191 else {
1192 bp->duplex = DUPLEX_HALF;
1193 }
1194
1195 if (!(bmcr & BMCR_ANENABLE)) {
1196 return 0;
1197 }
1198
1199 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1200 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1201
1202 common = local_adv & remote_adv;
1203 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1204
1205 if (common & ADVERTISE_1000XFULL) {
1206 bp->duplex = DUPLEX_FULL;
1207 }
1208 else {
1209 bp->duplex = DUPLEX_HALF;
1210 }
1211 }
1212
1213 return 0;
1214}
1215
1216static int
1217bnx2_copper_linkup(struct bnx2 *bp)
1218{
1219 u32 bmcr;
1220
1221 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1222
1223 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1224 if (bmcr & BMCR_ANENABLE) {
1225 u32 local_adv, remote_adv, common;
1226
1227 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1228 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1229
1230 common = local_adv & (remote_adv >> 2);
1231 if (common & ADVERTISE_1000FULL) {
1232 bp->line_speed = SPEED_1000;
1233 bp->duplex = DUPLEX_FULL;
1234 }
1235 else if (common & ADVERTISE_1000HALF) {
1236 bp->line_speed = SPEED_1000;
1237 bp->duplex = DUPLEX_HALF;
1238 }
1239 else {
1240 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1241 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1242
1243 common = local_adv & remote_adv;
1244 if (common & ADVERTISE_100FULL) {
1245 bp->line_speed = SPEED_100;
1246 bp->duplex = DUPLEX_FULL;
1247 }
1248 else if (common & ADVERTISE_100HALF) {
1249 bp->line_speed = SPEED_100;
1250 bp->duplex = DUPLEX_HALF;
1251 }
1252 else if (common & ADVERTISE_10FULL) {
1253 bp->line_speed = SPEED_10;
1254 bp->duplex = DUPLEX_FULL;
1255 }
1256 else if (common & ADVERTISE_10HALF) {
1257 bp->line_speed = SPEED_10;
1258 bp->duplex = DUPLEX_HALF;
1259 }
1260 else {
1261 bp->line_speed = 0;
1262 bp->link_up = 0;
1263 }
1264 }
1265 }
1266 else {
1267 if (bmcr & BMCR_SPEED100) {
1268 bp->line_speed = SPEED_100;
1269 }
1270 else {
1271 bp->line_speed = SPEED_10;
1272 }
1273 if (bmcr & BMCR_FULLDPLX) {
1274 bp->duplex = DUPLEX_FULL;
1275 }
1276 else {
1277 bp->duplex = DUPLEX_HALF;
1278 }
1279 }
1280
1281 if (bp->link_up) {
1282 u32 ext_status;
1283
1284 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1285 if (ext_status & EXT_STATUS_MDIX)
1286 bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1287 }
1288
1289 return 0;
1290}
1291
1292static void
1293bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1294{
1295 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1296
1297 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1298 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1299 val |= 0x02 << 8;
1300
1301 if (bp->flow_ctrl & FLOW_CTRL_TX)
1302 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1303
1304 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1305}
1306
1307static void
1308bnx2_init_all_rx_contexts(struct bnx2 *bp)
1309{
1310 int i;
1311 u32 cid;
1312
1313 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1314 if (i == 1)
1315 cid = RX_RSS_CID;
1316 bnx2_init_rx_context(bp, cid);
1317 }
1318}
1319
1320static void
1321bnx2_set_mac_link(struct bnx2 *bp)
1322{
1323 u32 val;
1324
1325 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1326 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1327 (bp->duplex == DUPLEX_HALF)) {
1328 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1329 }
1330
1331 /* Configure the EMAC mode register. */
1332 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1333
1334 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1335 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1336 BNX2_EMAC_MODE_25G_MODE);
1337
1338 if (bp->link_up) {
1339 switch (bp->line_speed) {
1340 case SPEED_10:
1341 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1342 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1343 break;
1344 }
1345 /* fall through */
1346 case SPEED_100:
1347 val |= BNX2_EMAC_MODE_PORT_MII;
1348 break;
1349 case SPEED_2500:
1350 val |= BNX2_EMAC_MODE_25G_MODE;
1351 /* fall through */
1352 case SPEED_1000:
1353 val |= BNX2_EMAC_MODE_PORT_GMII;
1354 break;
1355 }
1356 }
1357 else {
1358 val |= BNX2_EMAC_MODE_PORT_GMII;
1359 }
1360
1361 /* Set the MAC to operate in the appropriate duplex mode. */
1362 if (bp->duplex == DUPLEX_HALF)
1363 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1364 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1365
1366 /* Enable/disable rx PAUSE. */
1367 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1368
1369 if (bp->flow_ctrl & FLOW_CTRL_RX)
1370 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1371 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1372
1373 /* Enable/disable tx PAUSE. */
1374 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1375 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1376
1377 if (bp->flow_ctrl & FLOW_CTRL_TX)
1378 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1379 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1380
1381 /* Acknowledge the interrupt. */
1382 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1383
1384 bnx2_init_all_rx_contexts(bp);
1385}
1386
1387static void
1388bnx2_enable_bmsr1(struct bnx2 *bp)
1389{
1390 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1391 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1392 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1393 MII_BNX2_BLK_ADDR_GP_STATUS);
1394}
1395
1396static void
1397bnx2_disable_bmsr1(struct bnx2 *bp)
1398{
1399 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1400 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1401 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1402 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1403}
1404
1405static int
1406bnx2_test_and_enable_2g5(struct bnx2 *bp)
1407{
1408 u32 up1;
1409 int ret = 1;
1410
1411 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1412 return 0;
1413
1414 if (bp->autoneg & AUTONEG_SPEED)
1415 bp->advertising |= ADVERTISED_2500baseX_Full;
1416
1417 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1418 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1419
1420 bnx2_read_phy(bp, bp->mii_up1, &up1);
1421 if (!(up1 & BCM5708S_UP1_2G5)) {
1422 up1 |= BCM5708S_UP1_2G5;
1423 bnx2_write_phy(bp, bp->mii_up1, up1);
1424 ret = 0;
1425 }
1426
1427 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1428 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1429 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1430
1431 return ret;
1432}
1433
1434static int
1435bnx2_test_and_disable_2g5(struct bnx2 *bp)
1436{
1437 u32 up1;
1438 int ret = 0;
1439
1440 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1441 return 0;
1442
1443 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1444 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1445
1446 bnx2_read_phy(bp, bp->mii_up1, &up1);
1447 if (up1 & BCM5708S_UP1_2G5) {
1448 up1 &= ~BCM5708S_UP1_2G5;
1449 bnx2_write_phy(bp, bp->mii_up1, up1);
1450 ret = 1;
1451 }
1452
1453 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1454 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1455 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1456
1457 return ret;
1458}
1459
1460static void
1461bnx2_enable_forced_2g5(struct bnx2 *bp)
1462{
1463 u32 uninitialized_var(bmcr);
1464 int err;
1465
1466 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1467 return;
1468
1469 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1470 u32 val;
1471
1472 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1473 MII_BNX2_BLK_ADDR_SERDES_DIG);
1474 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1475 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1476 val |= MII_BNX2_SD_MISC1_FORCE |
1477 MII_BNX2_SD_MISC1_FORCE_2_5G;
1478 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1479 }
1480
1481 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1482 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1483 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1484
1485 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1486 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1487 if (!err)
1488 bmcr |= BCM5708S_BMCR_FORCE_2500;
1489 } else {
1490 return;
1491 }
1492
1493 if (err)
1494 return;
1495
1496 if (bp->autoneg & AUTONEG_SPEED) {
1497 bmcr &= ~BMCR_ANENABLE;
1498 if (bp->req_duplex == DUPLEX_FULL)
1499 bmcr |= BMCR_FULLDPLX;
1500 }
1501 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1502}
1503
1504static void
1505bnx2_disable_forced_2g5(struct bnx2 *bp)
1506{
1507 u32 uninitialized_var(bmcr);
1508 int err;
1509
1510 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1511 return;
1512
1513 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1514 u32 val;
1515
1516 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1517 MII_BNX2_BLK_ADDR_SERDES_DIG);
1518 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1519 val &= ~MII_BNX2_SD_MISC1_FORCE;
1520 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1521 }
1522
1523 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1524 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1525 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1526
1527 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1528 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1529 if (!err)
1530 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1531 } else {
1532 return;
1533 }
1534
1535 if (err)
1536 return;
1537
1538 if (bp->autoneg & AUTONEG_SPEED)
1539 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1540 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1541}
1542
1543static void
1544bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1545{
1546 u32 val;
1547
1548 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1549 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1550 if (start)
1551 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1552 else
1553 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1554}
1555
1556static int
1557bnx2_set_link(struct bnx2 *bp)
1558{
1559 u32 bmsr;
1560 u8 link_up;
1561
1562 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1563 bp->link_up = 1;
1564 return 0;
1565 }
1566
1567 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1568 return 0;
1569
1570 link_up = bp->link_up;
1571
1572 bnx2_enable_bmsr1(bp);
1573 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1574 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1575 bnx2_disable_bmsr1(bp);
1576
1577 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1578 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1579 u32 val, an_dbg;
1580
1581 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1582 bnx2_5706s_force_link_dn(bp, 0);
1583 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1584 }
1585 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1586
1587 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1588 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1589 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1590
1591 if ((val & BNX2_EMAC_STATUS_LINK) &&
1592 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1593 bmsr |= BMSR_LSTATUS;
1594 else
1595 bmsr &= ~BMSR_LSTATUS;
1596 }
1597
1598 if (bmsr & BMSR_LSTATUS) {
1599 bp->link_up = 1;
1600
1601 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1602 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1603 bnx2_5706s_linkup(bp);
1604 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1605 bnx2_5708s_linkup(bp);
1606 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1607 bnx2_5709s_linkup(bp);
1608 }
1609 else {
1610 bnx2_copper_linkup(bp);
1611 }
1612 bnx2_resolve_flow_ctrl(bp);
1613 }
1614 else {
1615 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1616 (bp->autoneg & AUTONEG_SPEED))
1617 bnx2_disable_forced_2g5(bp);
1618
1619 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1620 u32 bmcr;
1621
1622 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1623 bmcr |= BMCR_ANENABLE;
1624 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1625
1626 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1627 }
1628 bp->link_up = 0;
1629 }
1630
1631 if (bp->link_up != link_up) {
1632 bnx2_report_link(bp);
1633 }
1634
1635 bnx2_set_mac_link(bp);
1636
1637 return 0;
1638}
1639
1640static int
1641bnx2_reset_phy(struct bnx2 *bp)
1642{
1643 int i;
1644 u32 reg;
1645
1646 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1647
1648#define PHY_RESET_MAX_WAIT 100
1649 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1650 udelay(10);
1651
1652 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1653 if (!(reg & BMCR_RESET)) {
1654 udelay(20);
1655 break;
1656 }
1657 }
1658 if (i == PHY_RESET_MAX_WAIT) {
1659 return -EBUSY;
1660 }
1661 return 0;
1662}
1663
1664static u32
1665bnx2_phy_get_pause_adv(struct bnx2 *bp)
1666{
1667 u32 adv = 0;
1668
1669 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1670 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1671
1672 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1673 adv = ADVERTISE_1000XPAUSE;
1674 }
1675 else {
1676 adv = ADVERTISE_PAUSE_CAP;
1677 }
1678 }
1679 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1680 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1681 adv = ADVERTISE_1000XPSE_ASYM;
1682 }
1683 else {
1684 adv = ADVERTISE_PAUSE_ASYM;
1685 }
1686 }
1687 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1688 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1689 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1690 }
1691 else {
1692 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1693 }
1694 }
1695 return adv;
1696}
1697
1698static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1699
1700static int
1701bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1702__releases(&bp->phy_lock)
1703__acquires(&bp->phy_lock)
1704{
1705 u32 speed_arg = 0, pause_adv;
1706
1707 pause_adv = bnx2_phy_get_pause_adv(bp);
1708
1709 if (bp->autoneg & AUTONEG_SPEED) {
1710 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1711 if (bp->advertising & ADVERTISED_10baseT_Half)
1712 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1713 if (bp->advertising & ADVERTISED_10baseT_Full)
1714 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1715 if (bp->advertising & ADVERTISED_100baseT_Half)
1716 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1717 if (bp->advertising & ADVERTISED_100baseT_Full)
1718 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1719 if (bp->advertising & ADVERTISED_1000baseT_Full)
1720 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1721 if (bp->advertising & ADVERTISED_2500baseX_Full)
1722 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1723 } else {
1724 if (bp->req_line_speed == SPEED_2500)
1725 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1726 else if (bp->req_line_speed == SPEED_1000)
1727 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1728 else if (bp->req_line_speed == SPEED_100) {
1729 if (bp->req_duplex == DUPLEX_FULL)
1730 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1731 else
1732 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1733 } else if (bp->req_line_speed == SPEED_10) {
1734 if (bp->req_duplex == DUPLEX_FULL)
1735 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1736 else
1737 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1738 }
1739 }
1740
1741 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1742 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1743 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1744 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1745
1746 if (port == PORT_TP)
1747 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1748 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1749
1750 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1751
1752 spin_unlock_bh(&bp->phy_lock);
1753 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1754 spin_lock_bh(&bp->phy_lock);
1755
1756 return 0;
1757}
1758
1759static int
1760bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1761__releases(&bp->phy_lock)
1762__acquires(&bp->phy_lock)
1763{
1764 u32 adv, bmcr;
1765 u32 new_adv = 0;
1766
1767 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1768 return bnx2_setup_remote_phy(bp, port);
1769
1770 if (!(bp->autoneg & AUTONEG_SPEED)) {
1771 u32 new_bmcr;
1772 int force_link_down = 0;
1773
1774 if (bp->req_line_speed == SPEED_2500) {
1775 if (!bnx2_test_and_enable_2g5(bp))
1776 force_link_down = 1;
1777 } else if (bp->req_line_speed == SPEED_1000) {
1778 if (bnx2_test_and_disable_2g5(bp))
1779 force_link_down = 1;
1780 }
1781 bnx2_read_phy(bp, bp->mii_adv, &adv);
1782 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1783
1784 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1785 new_bmcr = bmcr & ~BMCR_ANENABLE;
1786 new_bmcr |= BMCR_SPEED1000;
1787
1788 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1789 if (bp->req_line_speed == SPEED_2500)
1790 bnx2_enable_forced_2g5(bp);
1791 else if (bp->req_line_speed == SPEED_1000) {
1792 bnx2_disable_forced_2g5(bp);
1793 new_bmcr &= ~0x2000;
1794 }
1795
1796 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1797 if (bp->req_line_speed == SPEED_2500)
1798 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1799 else
1800 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1801 }
1802
1803 if (bp->req_duplex == DUPLEX_FULL) {
1804 adv |= ADVERTISE_1000XFULL;
1805 new_bmcr |= BMCR_FULLDPLX;
1806 }
1807 else {
1808 adv |= ADVERTISE_1000XHALF;
1809 new_bmcr &= ~BMCR_FULLDPLX;
1810 }
1811 if ((new_bmcr != bmcr) || (force_link_down)) {
1812 /* Force a link down visible on the other side */
1813 if (bp->link_up) {
1814 bnx2_write_phy(bp, bp->mii_adv, adv &
1815 ~(ADVERTISE_1000XFULL |
1816 ADVERTISE_1000XHALF));
1817 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1818 BMCR_ANRESTART | BMCR_ANENABLE);
1819
1820 bp->link_up = 0;
1821 netif_carrier_off(bp->dev);
1822 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1823 bnx2_report_link(bp);
1824 }
1825 bnx2_write_phy(bp, bp->mii_adv, adv);
1826 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1827 } else {
1828 bnx2_resolve_flow_ctrl(bp);
1829 bnx2_set_mac_link(bp);
1830 }
1831 return 0;
1832 }
1833
1834 bnx2_test_and_enable_2g5(bp);
1835
1836 if (bp->advertising & ADVERTISED_1000baseT_Full)
1837 new_adv |= ADVERTISE_1000XFULL;
1838
1839 new_adv |= bnx2_phy_get_pause_adv(bp);
1840
1841 bnx2_read_phy(bp, bp->mii_adv, &adv);
1842 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1843
1844 bp->serdes_an_pending = 0;
1845 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1846 /* Force a link down visible on the other side */
1847 if (bp->link_up) {
1848 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1849 spin_unlock_bh(&bp->phy_lock);
1850 msleep(20);
1851 spin_lock_bh(&bp->phy_lock);
1852 }
1853
1854 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1855 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1856 BMCR_ANENABLE);
1857 /* Speed up link-up time when the link partner
1858 * does not autonegotiate which is very common
1859 * in blade servers. Some blade servers use
1860 * IPMI for kerboard input and it's important
1861 * to minimize link disruptions. Autoneg. involves
1862 * exchanging base pages plus 3 next pages and
1863 * normally completes in about 120 msec.
1864 */
1865 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1866 bp->serdes_an_pending = 1;
1867 mod_timer(&bp->timer, jiffies + bp->current_interval);
1868 } else {
1869 bnx2_resolve_flow_ctrl(bp);
1870 bnx2_set_mac_link(bp);
1871 }
1872
1873 return 0;
1874}
1875
1876#define ETHTOOL_ALL_FIBRE_SPEED \
1877 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1878 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1879 (ADVERTISED_1000baseT_Full)
1880
1881#define ETHTOOL_ALL_COPPER_SPEED \
1882 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1883 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1884 ADVERTISED_1000baseT_Full)
1885
1886#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1887 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1888
1889#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1890
1891static void
1892bnx2_set_default_remote_link(struct bnx2 *bp)
1893{
1894 u32 link;
1895
1896 if (bp->phy_port == PORT_TP)
1897 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1898 else
1899 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1900
1901 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1902 bp->req_line_speed = 0;
1903 bp->autoneg |= AUTONEG_SPEED;
1904 bp->advertising = ADVERTISED_Autoneg;
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1906 bp->advertising |= ADVERTISED_10baseT_Half;
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1908 bp->advertising |= ADVERTISED_10baseT_Full;
1909 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1910 bp->advertising |= ADVERTISED_100baseT_Half;
1911 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1912 bp->advertising |= ADVERTISED_100baseT_Full;
1913 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1914 bp->advertising |= ADVERTISED_1000baseT_Full;
1915 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1916 bp->advertising |= ADVERTISED_2500baseX_Full;
1917 } else {
1918 bp->autoneg = 0;
1919 bp->advertising = 0;
1920 bp->req_duplex = DUPLEX_FULL;
1921 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1922 bp->req_line_speed = SPEED_10;
1923 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1924 bp->req_duplex = DUPLEX_HALF;
1925 }
1926 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1927 bp->req_line_speed = SPEED_100;
1928 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1929 bp->req_duplex = DUPLEX_HALF;
1930 }
1931 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1932 bp->req_line_speed = SPEED_1000;
1933 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1934 bp->req_line_speed = SPEED_2500;
1935 }
1936}
1937
1938static void
1939bnx2_set_default_link(struct bnx2 *bp)
1940{
1941 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1942 bnx2_set_default_remote_link(bp);
1943 return;
1944 }
1945
1946 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1947 bp->req_line_speed = 0;
1948 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1949 u32 reg;
1950
1951 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1952
1953 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1954 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1955 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1956 bp->autoneg = 0;
1957 bp->req_line_speed = bp->line_speed = SPEED_1000;
1958 bp->req_duplex = DUPLEX_FULL;
1959 }
1960 } else
1961 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1962}
1963
1964static void
1965bnx2_send_heart_beat(struct bnx2 *bp)
1966{
1967 u32 msg;
1968 u32 addr;
1969
1970 spin_lock(&bp->indirect_lock);
1971 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1972 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1973 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1974 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1975 spin_unlock(&bp->indirect_lock);
1976}
1977
1978static void
1979bnx2_remote_phy_event(struct bnx2 *bp)
1980{
1981 u32 msg;
1982 u8 link_up = bp->link_up;
1983 u8 old_port;
1984
1985 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1986
1987 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1988 bnx2_send_heart_beat(bp);
1989
1990 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1991
1992 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1993 bp->link_up = 0;
1994 else {
1995 u32 speed;
1996
1997 bp->link_up = 1;
1998 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1999 bp->duplex = DUPLEX_FULL;
2000 switch (speed) {
2001 case BNX2_LINK_STATUS_10HALF:
2002 bp->duplex = DUPLEX_HALF;
2003 /* fall through */
2004 case BNX2_LINK_STATUS_10FULL:
2005 bp->line_speed = SPEED_10;
2006 break;
2007 case BNX2_LINK_STATUS_100HALF:
2008 bp->duplex = DUPLEX_HALF;
2009 /* fall through */
2010 case BNX2_LINK_STATUS_100BASE_T4:
2011 case BNX2_LINK_STATUS_100FULL:
2012 bp->line_speed = SPEED_100;
2013 break;
2014 case BNX2_LINK_STATUS_1000HALF:
2015 bp->duplex = DUPLEX_HALF;
2016 /* fall through */
2017 case BNX2_LINK_STATUS_1000FULL:
2018 bp->line_speed = SPEED_1000;
2019 break;
2020 case BNX2_LINK_STATUS_2500HALF:
2021 bp->duplex = DUPLEX_HALF;
2022 /* fall through */
2023 case BNX2_LINK_STATUS_2500FULL:
2024 bp->line_speed = SPEED_2500;
2025 break;
2026 default:
2027 bp->line_speed = 0;
2028 break;
2029 }
2030
2031 bp->flow_ctrl = 0;
2032 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2033 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2034 if (bp->duplex == DUPLEX_FULL)
2035 bp->flow_ctrl = bp->req_flow_ctrl;
2036 } else {
2037 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2038 bp->flow_ctrl |= FLOW_CTRL_TX;
2039 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2040 bp->flow_ctrl |= FLOW_CTRL_RX;
2041 }
2042
2043 old_port = bp->phy_port;
2044 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2045 bp->phy_port = PORT_FIBRE;
2046 else
2047 bp->phy_port = PORT_TP;
2048
2049 if (old_port != bp->phy_port)
2050 bnx2_set_default_link(bp);
2051
2052 }
2053 if (bp->link_up != link_up)
2054 bnx2_report_link(bp);
2055
2056 bnx2_set_mac_link(bp);
2057}
2058
2059static int
2060bnx2_set_remote_link(struct bnx2 *bp)
2061{
2062 u32 evt_code;
2063
2064 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2065 switch (evt_code) {
2066 case BNX2_FW_EVT_CODE_LINK_EVENT:
2067 bnx2_remote_phy_event(bp);
2068 break;
2069 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2070 default:
2071 bnx2_send_heart_beat(bp);
2072 break;
2073 }
2074 return 0;
2075}
2076
2077static int
2078bnx2_setup_copper_phy(struct bnx2 *bp)
2079__releases(&bp->phy_lock)
2080__acquires(&bp->phy_lock)
2081{
2082 u32 bmcr, adv_reg, new_adv = 0;
2083 u32 new_bmcr;
2084
2085 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2086
2087 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2088 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2089 ADVERTISE_PAUSE_ASYM);
2090
2091 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2092
2093 if (bp->autoneg & AUTONEG_SPEED) {
2094 u32 adv1000_reg;
2095 u32 new_adv1000 = 0;
2096
2097 new_adv |= bnx2_phy_get_pause_adv(bp);
2098
2099 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2100 adv1000_reg &= PHY_ALL_1000_SPEED;
2101
2102 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2103 if ((adv1000_reg != new_adv1000) ||
2104 (adv_reg != new_adv) ||
2105 ((bmcr & BMCR_ANENABLE) == 0)) {
2106
2107 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2108 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2109 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2110 BMCR_ANENABLE);
2111 }
2112 else if (bp->link_up) {
2113 /* Flow ctrl may have changed from auto to forced */
2114 /* or vice-versa. */
2115
2116 bnx2_resolve_flow_ctrl(bp);
2117 bnx2_set_mac_link(bp);
2118 }
2119 return 0;
2120 }
2121
2122 /* advertise nothing when forcing speed */
2123 if (adv_reg != new_adv)
2124 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2125
2126 new_bmcr = 0;
2127 if (bp->req_line_speed == SPEED_100) {
2128 new_bmcr |= BMCR_SPEED100;
2129 }
2130 if (bp->req_duplex == DUPLEX_FULL) {
2131 new_bmcr |= BMCR_FULLDPLX;
2132 }
2133 if (new_bmcr != bmcr) {
2134 u32 bmsr;
2135
2136 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2137 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2138
2139 if (bmsr & BMSR_LSTATUS) {
2140 /* Force link down */
2141 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2142 spin_unlock_bh(&bp->phy_lock);
2143 msleep(50);
2144 spin_lock_bh(&bp->phy_lock);
2145
2146 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2147 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2148 }
2149
2150 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2151
2152 /* Normally, the new speed is setup after the link has
2153 * gone down and up again. In some cases, link will not go
2154 * down so we need to set up the new speed here.
2155 */
2156 if (bmsr & BMSR_LSTATUS) {
2157 bp->line_speed = bp->req_line_speed;
2158 bp->duplex = bp->req_duplex;
2159 bnx2_resolve_flow_ctrl(bp);
2160 bnx2_set_mac_link(bp);
2161 }
2162 } else {
2163 bnx2_resolve_flow_ctrl(bp);
2164 bnx2_set_mac_link(bp);
2165 }
2166 return 0;
2167}
2168
2169static int
2170bnx2_setup_phy(struct bnx2 *bp, u8 port)
2171__releases(&bp->phy_lock)
2172__acquires(&bp->phy_lock)
2173{
2174 if (bp->loopback == MAC_LOOPBACK)
2175 return 0;
2176
2177 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2178 return bnx2_setup_serdes_phy(bp, port);
2179 }
2180 else {
2181 return bnx2_setup_copper_phy(bp);
2182 }
2183}
2184
2185static int
2186bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2187{
2188 u32 val;
2189
2190 bp->mii_bmcr = MII_BMCR + 0x10;
2191 bp->mii_bmsr = MII_BMSR + 0x10;
2192 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2193 bp->mii_adv = MII_ADVERTISE + 0x10;
2194 bp->mii_lpa = MII_LPA + 0x10;
2195 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2196
2197 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2198 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2199
2200 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2201 if (reset_phy)
2202 bnx2_reset_phy(bp);
2203
2204 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2205
2206 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2207 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2208 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2209 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2210
2211 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2212 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2213 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2214 val |= BCM5708S_UP1_2G5;
2215 else
2216 val &= ~BCM5708S_UP1_2G5;
2217 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2218
2219 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2220 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2221 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2222 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2223
2224 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2225
2226 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2227 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2228 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2229
2230 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2231
2232 return 0;
2233}
2234
2235static int
2236bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2237{
2238 u32 val;
2239
2240 if (reset_phy)
2241 bnx2_reset_phy(bp);
2242
2243 bp->mii_up1 = BCM5708S_UP1;
2244
2245 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2246 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2247 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2248
2249 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2250 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2251 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2252
2253 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2254 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2255 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2256
2257 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2258 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2259 val |= BCM5708S_UP1_2G5;
2260 bnx2_write_phy(bp, BCM5708S_UP1, val);
2261 }
2262
2263 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2264 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2265 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2266 /* increase tx signal amplitude */
2267 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2268 BCM5708S_BLK_ADDR_TX_MISC);
2269 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2270 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2271 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2272 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2273 }
2274
2275 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2276 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2277
2278 if (val) {
2279 u32 is_backplane;
2280
2281 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2282 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2283 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2284 BCM5708S_BLK_ADDR_TX_MISC);
2285 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2287 BCM5708S_BLK_ADDR_DIG);
2288 }
2289 }
2290 return 0;
2291}
2292
2293static int
2294bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2295{
2296 if (reset_phy)
2297 bnx2_reset_phy(bp);
2298
2299 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2300
2301 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2302 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2303
2304 if (bp->dev->mtu > ETH_DATA_LEN) {
2305 u32 val;
2306
2307 /* Set extended packet length bit */
2308 bnx2_write_phy(bp, 0x18, 0x7);
2309 bnx2_read_phy(bp, 0x18, &val);
2310 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2311
2312 bnx2_write_phy(bp, 0x1c, 0x6c00);
2313 bnx2_read_phy(bp, 0x1c, &val);
2314 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2315 }
2316 else {
2317 u32 val;
2318
2319 bnx2_write_phy(bp, 0x18, 0x7);
2320 bnx2_read_phy(bp, 0x18, &val);
2321 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2322
2323 bnx2_write_phy(bp, 0x1c, 0x6c00);
2324 bnx2_read_phy(bp, 0x1c, &val);
2325 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2326 }
2327
2328 return 0;
2329}
2330
2331static int
2332bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2333{
2334 u32 val;
2335
2336 if (reset_phy)
2337 bnx2_reset_phy(bp);
2338
2339 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2340 bnx2_write_phy(bp, 0x18, 0x0c00);
2341 bnx2_write_phy(bp, 0x17, 0x000a);
2342 bnx2_write_phy(bp, 0x15, 0x310b);
2343 bnx2_write_phy(bp, 0x17, 0x201f);
2344 bnx2_write_phy(bp, 0x15, 0x9506);
2345 bnx2_write_phy(bp, 0x17, 0x401f);
2346 bnx2_write_phy(bp, 0x15, 0x14e2);
2347 bnx2_write_phy(bp, 0x18, 0x0400);
2348 }
2349
2350 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2351 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2352 MII_BNX2_DSP_EXPAND_REG | 0x8);
2353 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2354 val &= ~(1 << 8);
2355 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2356 }
2357
2358 if (bp->dev->mtu > ETH_DATA_LEN) {
2359 /* Set extended packet length bit */
2360 bnx2_write_phy(bp, 0x18, 0x7);
2361 bnx2_read_phy(bp, 0x18, &val);
2362 bnx2_write_phy(bp, 0x18, val | 0x4000);
2363
2364 bnx2_read_phy(bp, 0x10, &val);
2365 bnx2_write_phy(bp, 0x10, val | 0x1);
2366 }
2367 else {
2368 bnx2_write_phy(bp, 0x18, 0x7);
2369 bnx2_read_phy(bp, 0x18, &val);
2370 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2371
2372 bnx2_read_phy(bp, 0x10, &val);
2373 bnx2_write_phy(bp, 0x10, val & ~0x1);
2374 }
2375
2376 /* ethernet@wirespeed */
2377 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2378 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2379 val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2380
2381 /* auto-mdix */
2382 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2383 val |= AUX_CTL_MISC_CTL_AUTOMDIX;
2384
2385 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2386 return 0;
2387}
2388
2389
2390static int
2391bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2392__releases(&bp->phy_lock)
2393__acquires(&bp->phy_lock)
2394{
2395 u32 val;
2396 int rc = 0;
2397
2398 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2399 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2400
2401 bp->mii_bmcr = MII_BMCR;
2402 bp->mii_bmsr = MII_BMSR;
2403 bp->mii_bmsr1 = MII_BMSR;
2404 bp->mii_adv = MII_ADVERTISE;
2405 bp->mii_lpa = MII_LPA;
2406
2407 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2408
2409 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2410 goto setup_phy;
2411
2412 bnx2_read_phy(bp, MII_PHYSID1, &val);
2413 bp->phy_id = val << 16;
2414 bnx2_read_phy(bp, MII_PHYSID2, &val);
2415 bp->phy_id |= val & 0xffff;
2416
2417 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2418 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2419 rc = bnx2_init_5706s_phy(bp, reset_phy);
2420 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2421 rc = bnx2_init_5708s_phy(bp, reset_phy);
2422 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2423 rc = bnx2_init_5709s_phy(bp, reset_phy);
2424 }
2425 else {
2426 rc = bnx2_init_copper_phy(bp, reset_phy);
2427 }
2428
2429setup_phy:
2430 if (!rc)
2431 rc = bnx2_setup_phy(bp, bp->phy_port);
2432
2433 return rc;
2434}
2435
2436static int
2437bnx2_set_mac_loopback(struct bnx2 *bp)
2438{
2439 u32 mac_mode;
2440
2441 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2442 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2443 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2444 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445 bp->link_up = 1;
2446 return 0;
2447}
2448
2449static int bnx2_test_link(struct bnx2 *);
2450
2451static int
2452bnx2_set_phy_loopback(struct bnx2 *bp)
2453{
2454 u32 mac_mode;
2455 int rc, i;
2456
2457 spin_lock_bh(&bp->phy_lock);
2458 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2459 BMCR_SPEED1000);
2460 spin_unlock_bh(&bp->phy_lock);
2461 if (rc)
2462 return rc;
2463
2464 for (i = 0; i < 10; i++) {
2465 if (bnx2_test_link(bp) == 0)
2466 break;
2467 msleep(100);
2468 }
2469
2470 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2471 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2472 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2473 BNX2_EMAC_MODE_25G_MODE);
2474
2475 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2476 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2477 bp->link_up = 1;
2478 return 0;
2479}
2480
2481static void
2482bnx2_dump_mcp_state(struct bnx2 *bp)
2483{
2484 struct net_device *dev = bp->dev;
2485 u32 mcp_p0, mcp_p1;
2486
2487 netdev_err(dev, "<--- start MCP states dump --->\n");
2488 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2489 mcp_p0 = BNX2_MCP_STATE_P0;
2490 mcp_p1 = BNX2_MCP_STATE_P1;
2491 } else {
2492 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2493 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2494 }
2495 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2496 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2497 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2498 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2499 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2500 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2501 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2502 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2503 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2504 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2505 netdev_err(dev, "DEBUG: shmem states:\n");
2506 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2507 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2508 bnx2_shmem_rd(bp, BNX2_FW_MB),
2509 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2510 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2511 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2512 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2513 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2514 pr_cont(" condition[%08x]\n",
2515 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2516 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2517 DP_SHMEM_LINE(bp, 0x3cc);
2518 DP_SHMEM_LINE(bp, 0x3dc);
2519 DP_SHMEM_LINE(bp, 0x3ec);
2520 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2521 netdev_err(dev, "<--- end MCP states dump --->\n");
2522}
2523
2524static int
2525bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2526{
2527 int i;
2528 u32 val;
2529
2530 bp->fw_wr_seq++;
2531 msg_data |= bp->fw_wr_seq;
2532 bp->fw_last_msg = msg_data;
2533
2534 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2535
2536 if (!ack)
2537 return 0;
2538
2539 /* wait for an acknowledgement. */
2540 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2541 msleep(10);
2542
2543 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2544
2545 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2546 break;
2547 }
2548 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2549 return 0;
2550
2551 /* If we timed out, inform the firmware that this is the case. */
2552 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2553 msg_data &= ~BNX2_DRV_MSG_CODE;
2554 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2555
2556 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2557 if (!silent) {
2558 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2559 bnx2_dump_mcp_state(bp);
2560 }
2561
2562 return -EBUSY;
2563 }
2564
2565 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2566 return -EIO;
2567
2568 return 0;
2569}
2570
2571static int
2572bnx2_init_5709_context(struct bnx2 *bp)
2573{
2574 int i, ret = 0;
2575 u32 val;
2576
2577 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2578 val |= (BNX2_PAGE_BITS - 8) << 16;
2579 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2580 for (i = 0; i < 10; i++) {
2581 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2582 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2583 break;
2584 udelay(2);
2585 }
2586 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2587 return -EBUSY;
2588
2589 for (i = 0; i < bp->ctx_pages; i++) {
2590 int j;
2591
2592 if (bp->ctx_blk[i])
2593 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2594 else
2595 return -ENOMEM;
2596
2597 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2598 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2599 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2600 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2601 (u64) bp->ctx_blk_mapping[i] >> 32);
2602 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2603 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2604 for (j = 0; j < 10; j++) {
2605
2606 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2607 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2608 break;
2609 udelay(5);
2610 }
2611 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2612 ret = -EBUSY;
2613 break;
2614 }
2615 }
2616 return ret;
2617}
2618
2619static void
2620bnx2_init_context(struct bnx2 *bp)
2621{
2622 u32 vcid;
2623
2624 vcid = 96;
2625 while (vcid) {
2626 u32 vcid_addr, pcid_addr, offset;
2627 int i;
2628
2629 vcid--;
2630
2631 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2632 u32 new_vcid;
2633
2634 vcid_addr = GET_PCID_ADDR(vcid);
2635 if (vcid & 0x8) {
2636 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2637 }
2638 else {
2639 new_vcid = vcid;
2640 }
2641 pcid_addr = GET_PCID_ADDR(new_vcid);
2642 }
2643 else {
2644 vcid_addr = GET_CID_ADDR(vcid);
2645 pcid_addr = vcid_addr;
2646 }
2647
2648 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2649 vcid_addr += (i << PHY_CTX_SHIFT);
2650 pcid_addr += (i << PHY_CTX_SHIFT);
2651
2652 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2653 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2654
2655 /* Zero out the context. */
2656 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2657 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2658 }
2659 }
2660}
2661
2662static int
2663bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2664{
2665 u16 *good_mbuf;
2666 u32 good_mbuf_cnt;
2667 u32 val;
2668
2669 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2670 if (good_mbuf == NULL)
2671 return -ENOMEM;
2672
2673 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2674 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2675
2676 good_mbuf_cnt = 0;
2677
2678 /* Allocate a bunch of mbufs and save the good ones in an array. */
2679 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2680 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2681 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2682 BNX2_RBUF_COMMAND_ALLOC_REQ);
2683
2684 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2685
2686 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2687
2688 /* The addresses with Bit 9 set are bad memory blocks. */
2689 if (!(val & (1 << 9))) {
2690 good_mbuf[good_mbuf_cnt] = (u16) val;
2691 good_mbuf_cnt++;
2692 }
2693
2694 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2695 }
2696
2697 /* Free the good ones back to the mbuf pool thus discarding
2698 * all the bad ones. */
2699 while (good_mbuf_cnt) {
2700 good_mbuf_cnt--;
2701
2702 val = good_mbuf[good_mbuf_cnt];
2703 val = (val << 9) | val | 1;
2704
2705 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2706 }
2707 kfree(good_mbuf);
2708 return 0;
2709}
2710
2711static void
2712bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2713{
2714 u32 val;
2715
2716 val = (mac_addr[0] << 8) | mac_addr[1];
2717
2718 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2719
2720 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2721 (mac_addr[4] << 8) | mac_addr[5];
2722
2723 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2724}
2725
2726static inline int
2727bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2728{
2729 dma_addr_t mapping;
2730 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2731 struct bnx2_rx_bd *rxbd =
2732 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2733 struct page *page = alloc_page(gfp);
2734
2735 if (!page)
2736 return -ENOMEM;
2737 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2738 PCI_DMA_FROMDEVICE);
2739 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2740 __free_page(page);
2741 return -EIO;
2742 }
2743
2744 rx_pg->page = page;
2745 dma_unmap_addr_set(rx_pg, mapping, mapping);
2746 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2747 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2748 return 0;
2749}
2750
2751static void
2752bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2753{
2754 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2755 struct page *page = rx_pg->page;
2756
2757 if (!page)
2758 return;
2759
2760 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2761 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2762
2763 __free_page(page);
2764 rx_pg->page = NULL;
2765}
2766
2767static inline int
2768bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2769{
2770 u8 *data;
2771 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2772 dma_addr_t mapping;
2773 struct bnx2_rx_bd *rxbd =
2774 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2775
2776 data = kmalloc(bp->rx_buf_size, gfp);
2777 if (!data)
2778 return -ENOMEM;
2779
2780 mapping = dma_map_single(&bp->pdev->dev,
2781 get_l2_fhdr(data),
2782 bp->rx_buf_use_size,
2783 PCI_DMA_FROMDEVICE);
2784 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2785 kfree(data);
2786 return -EIO;
2787 }
2788
2789 rx_buf->data = data;
2790 dma_unmap_addr_set(rx_buf, mapping, mapping);
2791
2792 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2793 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2794
2795 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2796
2797 return 0;
2798}
2799
2800static int
2801bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2802{
2803 struct status_block *sblk = bnapi->status_blk.msi;
2804 u32 new_link_state, old_link_state;
2805 int is_set = 1;
2806
2807 new_link_state = sblk->status_attn_bits & event;
2808 old_link_state = sblk->status_attn_bits_ack & event;
2809 if (new_link_state != old_link_state) {
2810 if (new_link_state)
2811 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2812 else
2813 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2814 } else
2815 is_set = 0;
2816
2817 return is_set;
2818}
2819
2820static void
2821bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2822{
2823 spin_lock(&bp->phy_lock);
2824
2825 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2826 bnx2_set_link(bp);
2827 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2828 bnx2_set_remote_link(bp);
2829
2830 spin_unlock(&bp->phy_lock);
2831
2832}
2833
2834static inline u16
2835bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2836{
2837 u16 cons;
2838
2839 cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2840
2841 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2842 cons++;
2843 return cons;
2844}
2845
2846static int
2847bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2848{
2849 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2850 u16 hw_cons, sw_cons, sw_ring_cons;
2851 int tx_pkt = 0, index;
2852 unsigned int tx_bytes = 0;
2853 struct netdev_queue *txq;
2854
2855 index = (bnapi - bp->bnx2_napi);
2856 txq = netdev_get_tx_queue(bp->dev, index);
2857
2858 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2859 sw_cons = txr->tx_cons;
2860
2861 while (sw_cons != hw_cons) {
2862 struct bnx2_sw_tx_bd *tx_buf;
2863 struct sk_buff *skb;
2864 int i, last;
2865
2866 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2867
2868 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2869 skb = tx_buf->skb;
2870
2871 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2872 prefetch(&skb->end);
2873
2874 /* partial BD completions possible with TSO packets */
2875 if (tx_buf->is_gso) {
2876 u16 last_idx, last_ring_idx;
2877
2878 last_idx = sw_cons + tx_buf->nr_frags + 1;
2879 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2880 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2881 last_idx++;
2882 }
2883 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2884 break;
2885 }
2886 }
2887
2888 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2889 skb_headlen(skb), PCI_DMA_TODEVICE);
2890
2891 tx_buf->skb = NULL;
2892 last = tx_buf->nr_frags;
2893
2894 for (i = 0; i < last; i++) {
2895 struct bnx2_sw_tx_bd *tx_buf;
2896
2897 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2898
2899 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2900 dma_unmap_page(&bp->pdev->dev,
2901 dma_unmap_addr(tx_buf, mapping),
2902 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2903 PCI_DMA_TODEVICE);
2904 }
2905
2906 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2907
2908 tx_bytes += skb->len;
2909 dev_kfree_skb_any(skb);
2910 tx_pkt++;
2911 if (tx_pkt == budget)
2912 break;
2913
2914 if (hw_cons == sw_cons)
2915 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2916 }
2917
2918 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2919 txr->hw_tx_cons = hw_cons;
2920 txr->tx_cons = sw_cons;
2921
2922 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2923 * before checking for netif_tx_queue_stopped(). Without the
2924 * memory barrier, there is a small possibility that bnx2_start_xmit()
2925 * will miss it and cause the queue to be stopped forever.
2926 */
2927 smp_mb();
2928
2929 if (unlikely(netif_tx_queue_stopped(txq)) &&
2930 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2931 __netif_tx_lock(txq, smp_processor_id());
2932 if ((netif_tx_queue_stopped(txq)) &&
2933 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2934 netif_tx_wake_queue(txq);
2935 __netif_tx_unlock(txq);
2936 }
2937
2938 return tx_pkt;
2939}
2940
2941static void
2942bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2943 struct sk_buff *skb, int count)
2944{
2945 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2946 struct bnx2_rx_bd *cons_bd, *prod_bd;
2947 int i;
2948 u16 hw_prod, prod;
2949 u16 cons = rxr->rx_pg_cons;
2950
2951 cons_rx_pg = &rxr->rx_pg_ring[cons];
2952
2953 /* The caller was unable to allocate a new page to replace the
2954 * last one in the frags array, so we need to recycle that page
2955 * and then free the skb.
2956 */
2957 if (skb) {
2958 struct page *page;
2959 struct skb_shared_info *shinfo;
2960
2961 shinfo = skb_shinfo(skb);
2962 shinfo->nr_frags--;
2963 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2964 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2965
2966 cons_rx_pg->page = page;
2967 dev_kfree_skb(skb);
2968 }
2969
2970 hw_prod = rxr->rx_pg_prod;
2971
2972 for (i = 0; i < count; i++) {
2973 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2974
2975 prod_rx_pg = &rxr->rx_pg_ring[prod];
2976 cons_rx_pg = &rxr->rx_pg_ring[cons];
2977 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2978 [BNX2_RX_IDX(cons)];
2979 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2980 [BNX2_RX_IDX(prod)];
2981
2982 if (prod != cons) {
2983 prod_rx_pg->page = cons_rx_pg->page;
2984 cons_rx_pg->page = NULL;
2985 dma_unmap_addr_set(prod_rx_pg, mapping,
2986 dma_unmap_addr(cons_rx_pg, mapping));
2987
2988 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2989 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2990
2991 }
2992 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2993 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2994 }
2995 rxr->rx_pg_prod = hw_prod;
2996 rxr->rx_pg_cons = cons;
2997}
2998
2999static inline void
3000bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3001 u8 *data, u16 cons, u16 prod)
3002{
3003 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3004 struct bnx2_rx_bd *cons_bd, *prod_bd;
3005
3006 cons_rx_buf = &rxr->rx_buf_ring[cons];
3007 prod_rx_buf = &rxr->rx_buf_ring[prod];
3008
3009 dma_sync_single_for_device(&bp->pdev->dev,
3010 dma_unmap_addr(cons_rx_buf, mapping),
3011 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3012
3013 rxr->rx_prod_bseq += bp->rx_buf_use_size;
3014
3015 prod_rx_buf->data = data;
3016
3017 if (cons == prod)
3018 return;
3019
3020 dma_unmap_addr_set(prod_rx_buf, mapping,
3021 dma_unmap_addr(cons_rx_buf, mapping));
3022
3023 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3024 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3025 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3026 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3027}
3028
3029static struct sk_buff *
3030bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3031 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3032 u32 ring_idx)
3033{
3034 int err;
3035 u16 prod = ring_idx & 0xffff;
3036 struct sk_buff *skb;
3037
3038 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3039 if (unlikely(err)) {
3040 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3041error:
3042 if (hdr_len) {
3043 unsigned int raw_len = len + 4;
3044 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3045
3046 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3047 }
3048 return NULL;
3049 }
3050
3051 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3052 PCI_DMA_FROMDEVICE);
3053 skb = build_skb(data, 0);
3054 if (!skb) {
3055 kfree(data);
3056 goto error;
3057 }
3058 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3059 if (hdr_len == 0) {
3060 skb_put(skb, len);
3061 return skb;
3062 } else {
3063 unsigned int i, frag_len, frag_size, pages;
3064 struct bnx2_sw_pg *rx_pg;
3065 u16 pg_cons = rxr->rx_pg_cons;
3066 u16 pg_prod = rxr->rx_pg_prod;
3067
3068 frag_size = len + 4 - hdr_len;
3069 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3070 skb_put(skb, hdr_len);
3071
3072 for (i = 0; i < pages; i++) {
3073 dma_addr_t mapping_old;
3074
3075 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3076 if (unlikely(frag_len <= 4)) {
3077 unsigned int tail = 4 - frag_len;
3078
3079 rxr->rx_pg_cons = pg_cons;
3080 rxr->rx_pg_prod = pg_prod;
3081 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3082 pages - i);
3083 skb->len -= tail;
3084 if (i == 0) {
3085 skb->tail -= tail;
3086 } else {
3087 skb_frag_t *frag =
3088 &skb_shinfo(skb)->frags[i - 1];
3089 skb_frag_size_sub(frag, tail);
3090 skb->data_len -= tail;
3091 }
3092 return skb;
3093 }
3094 rx_pg = &rxr->rx_pg_ring[pg_cons];
3095
3096 /* Don't unmap yet. If we're unable to allocate a new
3097 * page, we need to recycle the page and the DMA addr.
3098 */
3099 mapping_old = dma_unmap_addr(rx_pg, mapping);
3100 if (i == pages - 1)
3101 frag_len -= 4;
3102
3103 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3104 rx_pg->page = NULL;
3105
3106 err = bnx2_alloc_rx_page(bp, rxr,
3107 BNX2_RX_PG_RING_IDX(pg_prod),
3108 GFP_ATOMIC);
3109 if (unlikely(err)) {
3110 rxr->rx_pg_cons = pg_cons;
3111 rxr->rx_pg_prod = pg_prod;
3112 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3113 pages - i);
3114 return NULL;
3115 }
3116
3117 dma_unmap_page(&bp->pdev->dev, mapping_old,
3118 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3119
3120 frag_size -= frag_len;
3121 skb->data_len += frag_len;
3122 skb->truesize += PAGE_SIZE;
3123 skb->len += frag_len;
3124
3125 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3126 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3127 }
3128 rxr->rx_pg_prod = pg_prod;
3129 rxr->rx_pg_cons = pg_cons;
3130 }
3131 return skb;
3132}
3133
3134static inline u16
3135bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3136{
3137 u16 cons;
3138
3139 cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3140
3141 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3142 cons++;
3143 return cons;
3144}
3145
3146static int
3147bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3148{
3149 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3150 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3151 struct l2_fhdr *rx_hdr;
3152 int rx_pkt = 0, pg_ring_used = 0;
3153
3154 if (budget <= 0)
3155 return rx_pkt;
3156
3157 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3158 sw_cons = rxr->rx_cons;
3159 sw_prod = rxr->rx_prod;
3160
3161 /* Memory barrier necessary as speculative reads of the rx
3162 * buffer can be ahead of the index in the status block
3163 */
3164 rmb();
3165 while (sw_cons != hw_cons) {
3166 unsigned int len, hdr_len;
3167 u32 status;
3168 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3169 struct sk_buff *skb;
3170 dma_addr_t dma_addr;
3171 u8 *data;
3172 u16 next_ring_idx;
3173
3174 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3175 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3176
3177 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3178 data = rx_buf->data;
3179 rx_buf->data = NULL;
3180
3181 rx_hdr = get_l2_fhdr(data);
3182 prefetch(rx_hdr);
3183
3184 dma_addr = dma_unmap_addr(rx_buf, mapping);
3185
3186 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3187 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3188 PCI_DMA_FROMDEVICE);
3189
3190 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3191 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3192 prefetch(get_l2_fhdr(next_rx_buf->data));
3193
3194 len = rx_hdr->l2_fhdr_pkt_len;
3195 status = rx_hdr->l2_fhdr_status;
3196
3197 hdr_len = 0;
3198 if (status & L2_FHDR_STATUS_SPLIT) {
3199 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3200 pg_ring_used = 1;
3201 } else if (len > bp->rx_jumbo_thresh) {
3202 hdr_len = bp->rx_jumbo_thresh;
3203 pg_ring_used = 1;
3204 }
3205
3206 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3207 L2_FHDR_ERRORS_PHY_DECODE |
3208 L2_FHDR_ERRORS_ALIGNMENT |
3209 L2_FHDR_ERRORS_TOO_SHORT |
3210 L2_FHDR_ERRORS_GIANT_FRAME))) {
3211
3212 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3213 sw_ring_prod);
3214 if (pg_ring_used) {
3215 int pages;
3216
3217 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3218
3219 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3220 }
3221 goto next_rx;
3222 }
3223
3224 len -= 4;
3225
3226 if (len <= bp->rx_copy_thresh) {
3227 skb = netdev_alloc_skb(bp->dev, len + 6);
3228 if (skb == NULL) {
3229 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3230 sw_ring_prod);
3231 goto next_rx;
3232 }
3233
3234 /* aligned copy */
3235 memcpy(skb->data,
3236 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3237 len + 6);
3238 skb_reserve(skb, 6);
3239 skb_put(skb, len);
3240
3241 bnx2_reuse_rx_data(bp, rxr, data,
3242 sw_ring_cons, sw_ring_prod);
3243
3244 } else {
3245 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3246 (sw_ring_cons << 16) | sw_ring_prod);
3247 if (!skb)
3248 goto next_rx;
3249 }
3250 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3251 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3252 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3253
3254 skb->protocol = eth_type_trans(skb, bp->dev);
3255
3256 if (len > (bp->dev->mtu + ETH_HLEN) &&
3257 skb->protocol != htons(0x8100) &&
3258 skb->protocol != htons(ETH_P_8021AD)) {
3259
3260 dev_kfree_skb(skb);
3261 goto next_rx;
3262
3263 }
3264
3265 skb_checksum_none_assert(skb);
3266 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3267 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3268 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3269
3270 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3271 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3272 skb->ip_summed = CHECKSUM_UNNECESSARY;
3273 }
3274 if ((bp->dev->features & NETIF_F_RXHASH) &&
3275 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3276 L2_FHDR_STATUS_USE_RXHASH))
3277 skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3278 PKT_HASH_TYPE_L3);
3279
3280 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3281 napi_gro_receive(&bnapi->napi, skb);
3282 rx_pkt++;
3283
3284next_rx:
3285 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3286 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3287
3288 if ((rx_pkt == budget))
3289 break;
3290
3291 /* Refresh hw_cons to see if there is new work */
3292 if (sw_cons == hw_cons) {
3293 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3294 rmb();
3295 }
3296 }
3297 rxr->rx_cons = sw_cons;
3298 rxr->rx_prod = sw_prod;
3299
3300 if (pg_ring_used)
3301 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3302
3303 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3304
3305 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3306
3307 mmiowb();
3308
3309 return rx_pkt;
3310
3311}
3312
3313/* MSI ISR - The only difference between this and the INTx ISR
3314 * is that the MSI interrupt is always serviced.
3315 */
3316static irqreturn_t
3317bnx2_msi(int irq, void *dev_instance)
3318{
3319 struct bnx2_napi *bnapi = dev_instance;
3320 struct bnx2 *bp = bnapi->bp;
3321
3322 prefetch(bnapi->status_blk.msi);
3323 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3324 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3325 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3326
3327 /* Return here if interrupt is disabled. */
3328 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3329 return IRQ_HANDLED;
3330
3331 napi_schedule(&bnapi->napi);
3332
3333 return IRQ_HANDLED;
3334}
3335
3336static irqreturn_t
3337bnx2_msi_1shot(int irq, void *dev_instance)
3338{
3339 struct bnx2_napi *bnapi = dev_instance;
3340 struct bnx2 *bp = bnapi->bp;
3341
3342 prefetch(bnapi->status_blk.msi);
3343
3344 /* Return here if interrupt is disabled. */
3345 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3346 return IRQ_HANDLED;
3347
3348 napi_schedule(&bnapi->napi);
3349
3350 return IRQ_HANDLED;
3351}
3352
3353static irqreturn_t
3354bnx2_interrupt(int irq, void *dev_instance)
3355{
3356 struct bnx2_napi *bnapi = dev_instance;
3357 struct bnx2 *bp = bnapi->bp;
3358 struct status_block *sblk = bnapi->status_blk.msi;
3359
3360 /* When using INTx, it is possible for the interrupt to arrive
3361 * at the CPU before the status block posted prior to the
3362 * interrupt. Reading a register will flush the status block.
3363 * When using MSI, the MSI message will always complete after
3364 * the status block write.
3365 */
3366 if ((sblk->status_idx == bnapi->last_status_idx) &&
3367 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3368 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3369 return IRQ_NONE;
3370
3371 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3372 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3373 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3374
3375 /* Read back to deassert IRQ immediately to avoid too many
3376 * spurious interrupts.
3377 */
3378 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3379
3380 /* Return here if interrupt is shared and is disabled. */
3381 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3382 return IRQ_HANDLED;
3383
3384 if (napi_schedule_prep(&bnapi->napi)) {
3385 bnapi->last_status_idx = sblk->status_idx;
3386 __napi_schedule(&bnapi->napi);
3387 }
3388
3389 return IRQ_HANDLED;
3390}
3391
3392static inline int
3393bnx2_has_fast_work(struct bnx2_napi *bnapi)
3394{
3395 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3396 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3397
3398 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3399 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3400 return 1;
3401 return 0;
3402}
3403
3404#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3405 STATUS_ATTN_BITS_TIMER_ABORT)
3406
3407static inline int
3408bnx2_has_work(struct bnx2_napi *bnapi)
3409{
3410 struct status_block *sblk = bnapi->status_blk.msi;
3411
3412 if (bnx2_has_fast_work(bnapi))
3413 return 1;
3414
3415#ifdef BCM_CNIC
3416 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3417 return 1;
3418#endif
3419
3420 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3421 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3422 return 1;
3423
3424 return 0;
3425}
3426
3427static void
3428bnx2_chk_missed_msi(struct bnx2 *bp)
3429{
3430 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3431 u32 msi_ctrl;
3432
3433 if (bnx2_has_work(bnapi)) {
3434 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3435 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3436 return;
3437
3438 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3439 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3440 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3441 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3442 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3443 }
3444 }
3445
3446 bp->idle_chk_status_idx = bnapi->last_status_idx;
3447}
3448
3449#ifdef BCM_CNIC
3450static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3451{
3452 struct cnic_ops *c_ops;
3453
3454 if (!bnapi->cnic_present)
3455 return;
3456
3457 rcu_read_lock();
3458 c_ops = rcu_dereference(bp->cnic_ops);
3459 if (c_ops)
3460 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3461 bnapi->status_blk.msi);
3462 rcu_read_unlock();
3463}
3464#endif
3465
3466static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3467{
3468 struct status_block *sblk = bnapi->status_blk.msi;
3469 u32 status_attn_bits = sblk->status_attn_bits;
3470 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3471
3472 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3473 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3474
3475 bnx2_phy_int(bp, bnapi);
3476
3477 /* This is needed to take care of transient status
3478 * during link changes.
3479 */
3480 BNX2_WR(bp, BNX2_HC_COMMAND,
3481 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3482 BNX2_RD(bp, BNX2_HC_COMMAND);
3483 }
3484}
3485
3486static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3487 int work_done, int budget)
3488{
3489 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3490 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3491
3492 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3493 bnx2_tx_int(bp, bnapi, 0);
3494
3495 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3496 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3497
3498 return work_done;
3499}
3500
3501static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3502{
3503 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3504 struct bnx2 *bp = bnapi->bp;
3505 int work_done = 0;
3506 struct status_block_msix *sblk = bnapi->status_blk.msix;
3507
3508 while (1) {
3509 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3510 if (unlikely(work_done >= budget))
3511 break;
3512
3513 bnapi->last_status_idx = sblk->status_idx;
3514 /* status idx must be read before checking for more work. */
3515 rmb();
3516 if (likely(!bnx2_has_fast_work(bnapi))) {
3517
3518 napi_complete_done(napi, work_done);
3519 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3520 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3521 bnapi->last_status_idx);
3522 break;
3523 }
3524 }
3525 return work_done;
3526}
3527
3528static int bnx2_poll(struct napi_struct *napi, int budget)
3529{
3530 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3531 struct bnx2 *bp = bnapi->bp;
3532 int work_done = 0;
3533 struct status_block *sblk = bnapi->status_blk.msi;
3534
3535 while (1) {
3536 bnx2_poll_link(bp, bnapi);
3537
3538 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3539
3540#ifdef BCM_CNIC
3541 bnx2_poll_cnic(bp, bnapi);
3542#endif
3543
3544 /* bnapi->last_status_idx is used below to tell the hw how
3545 * much work has been processed, so we must read it before
3546 * checking for more work.
3547 */
3548 bnapi->last_status_idx = sblk->status_idx;
3549
3550 if (unlikely(work_done >= budget))
3551 break;
3552
3553 rmb();
3554 if (likely(!bnx2_has_work(bnapi))) {
3555 napi_complete_done(napi, work_done);
3556 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3557 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3558 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3559 bnapi->last_status_idx);
3560 break;
3561 }
3562 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3563 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3564 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3565 bnapi->last_status_idx);
3566
3567 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3568 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3569 bnapi->last_status_idx);
3570 break;
3571 }
3572 }
3573
3574 return work_done;
3575}
3576
3577/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3578 * from set_multicast.
3579 */
3580static void
3581bnx2_set_rx_mode(struct net_device *dev)
3582{
3583 struct bnx2 *bp = netdev_priv(dev);
3584 u32 rx_mode, sort_mode;
3585 struct netdev_hw_addr *ha;
3586 int i;
3587
3588 if (!netif_running(dev))
3589 return;
3590
3591 spin_lock_bh(&bp->phy_lock);
3592
3593 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3594 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3595 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3596 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3597 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3598 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3599 if (dev->flags & IFF_PROMISC) {
3600 /* Promiscuous mode. */
3601 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3602 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3603 BNX2_RPM_SORT_USER0_PROM_VLAN;
3604 }
3605 else if (dev->flags & IFF_ALLMULTI) {
3606 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3607 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3608 0xffffffff);
3609 }
3610 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3611 }
3612 else {
3613 /* Accept one or more multicast(s). */
3614 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3615 u32 regidx;
3616 u32 bit;
3617 u32 crc;
3618
3619 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3620
3621 netdev_for_each_mc_addr(ha, dev) {
3622 crc = ether_crc_le(ETH_ALEN, ha->addr);
3623 bit = crc & 0xff;
3624 regidx = (bit & 0xe0) >> 5;
3625 bit &= 0x1f;
3626 mc_filter[regidx] |= (1 << bit);
3627 }
3628
3629 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3630 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3631 mc_filter[i]);
3632 }
3633
3634 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3635 }
3636
3637 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3638 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3639 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3640 BNX2_RPM_SORT_USER0_PROM_VLAN;
3641 } else if (!(dev->flags & IFF_PROMISC)) {
3642 /* Add all entries into to the match filter list */
3643 i = 0;
3644 netdev_for_each_uc_addr(ha, dev) {
3645 bnx2_set_mac_addr(bp, ha->addr,
3646 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3647 sort_mode |= (1 <<
3648 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3649 i++;
3650 }
3651
3652 }
3653
3654 if (rx_mode != bp->rx_mode) {
3655 bp->rx_mode = rx_mode;
3656 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3657 }
3658
3659 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3660 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3661 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3662
3663 spin_unlock_bh(&bp->phy_lock);
3664}
3665
3666static int
3667check_fw_section(const struct firmware *fw,
3668 const struct bnx2_fw_file_section *section,
3669 u32 alignment, bool non_empty)
3670{
3671 u32 offset = be32_to_cpu(section->offset);
3672 u32 len = be32_to_cpu(section->len);
3673
3674 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3675 return -EINVAL;
3676 if ((non_empty && len == 0) || len > fw->size - offset ||
3677 len & (alignment - 1))
3678 return -EINVAL;
3679 return 0;
3680}
3681
3682static int
3683check_mips_fw_entry(const struct firmware *fw,
3684 const struct bnx2_mips_fw_file_entry *entry)
3685{
3686 if (check_fw_section(fw, &entry->text, 4, true) ||
3687 check_fw_section(fw, &entry->data, 4, false) ||
3688 check_fw_section(fw, &entry->rodata, 4, false))
3689 return -EINVAL;
3690 return 0;
3691}
3692
3693static void bnx2_release_firmware(struct bnx2 *bp)
3694{
3695 if (bp->rv2p_firmware) {
3696 release_firmware(bp->mips_firmware);
3697 release_firmware(bp->rv2p_firmware);
3698 bp->rv2p_firmware = NULL;
3699 }
3700}
3701
3702static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3703{
3704 const char *mips_fw_file, *rv2p_fw_file;
3705 const struct bnx2_mips_fw_file *mips_fw;
3706 const struct bnx2_rv2p_fw_file *rv2p_fw;
3707 int rc;
3708
3709 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3710 mips_fw_file = FW_MIPS_FILE_09;
3711 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3712 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3713 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3714 else
3715 rv2p_fw_file = FW_RV2P_FILE_09;
3716 } else {
3717 mips_fw_file = FW_MIPS_FILE_06;
3718 rv2p_fw_file = FW_RV2P_FILE_06;
3719 }
3720
3721 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3722 if (rc) {
3723 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3724 goto out;
3725 }
3726
3727 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3728 if (rc) {
3729 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3730 goto err_release_mips_firmware;
3731 }
3732 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3733 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3734 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3735 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3736 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3737 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3738 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3739 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3740 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3741 rc = -EINVAL;
3742 goto err_release_firmware;
3743 }
3744 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3745 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3746 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3747 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3748 rc = -EINVAL;
3749 goto err_release_firmware;
3750 }
3751out:
3752 return rc;
3753
3754err_release_firmware:
3755 release_firmware(bp->rv2p_firmware);
3756 bp->rv2p_firmware = NULL;
3757err_release_mips_firmware:
3758 release_firmware(bp->mips_firmware);
3759 goto out;
3760}
3761
3762static int bnx2_request_firmware(struct bnx2 *bp)
3763{
3764 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3765}
3766
3767static u32
3768rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3769{
3770 switch (idx) {
3771 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3772 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3773 rv2p_code |= RV2P_BD_PAGE_SIZE;
3774 break;
3775 }
3776 return rv2p_code;
3777}
3778
3779static int
3780load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3781 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3782{
3783 u32 rv2p_code_len, file_offset;
3784 __be32 *rv2p_code;
3785 int i;
3786 u32 val, cmd, addr;
3787
3788 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3789 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3790
3791 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3792
3793 if (rv2p_proc == RV2P_PROC1) {
3794 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3795 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3796 } else {
3797 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3798 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3799 }
3800
3801 for (i = 0; i < rv2p_code_len; i += 8) {
3802 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3803 rv2p_code++;
3804 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3805 rv2p_code++;
3806
3807 val = (i / 8) | cmd;
3808 BNX2_WR(bp, addr, val);
3809 }
3810
3811 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3812 for (i = 0; i < 8; i++) {
3813 u32 loc, code;
3814
3815 loc = be32_to_cpu(fw_entry->fixup[i]);
3816 if (loc && ((loc * 4) < rv2p_code_len)) {
3817 code = be32_to_cpu(*(rv2p_code + loc - 1));
3818 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3819 code = be32_to_cpu(*(rv2p_code + loc));
3820 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3821 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3822
3823 val = (loc / 2) | cmd;
3824 BNX2_WR(bp, addr, val);
3825 }
3826 }
3827
3828 /* Reset the processor, un-stall is done later. */
3829 if (rv2p_proc == RV2P_PROC1) {
3830 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3831 }
3832 else {
3833 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3834 }
3835
3836 return 0;
3837}
3838
3839static int
3840load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3841 const struct bnx2_mips_fw_file_entry *fw_entry)
3842{
3843 u32 addr, len, file_offset;
3844 __be32 *data;
3845 u32 offset;
3846 u32 val;
3847
3848 /* Halt the CPU. */
3849 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3850 val |= cpu_reg->mode_value_halt;
3851 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3852 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3853
3854 /* Load the Text area. */
3855 addr = be32_to_cpu(fw_entry->text.addr);
3856 len = be32_to_cpu(fw_entry->text.len);
3857 file_offset = be32_to_cpu(fw_entry->text.offset);
3858 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3859
3860 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3861 if (len) {
3862 int j;
3863
3864 for (j = 0; j < (len / 4); j++, offset += 4)
3865 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3866 }
3867
3868 /* Load the Data area. */
3869 addr = be32_to_cpu(fw_entry->data.addr);
3870 len = be32_to_cpu(fw_entry->data.len);
3871 file_offset = be32_to_cpu(fw_entry->data.offset);
3872 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3873
3874 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3875 if (len) {
3876 int j;
3877
3878 for (j = 0; j < (len / 4); j++, offset += 4)
3879 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3880 }
3881
3882 /* Load the Read-Only area. */
3883 addr = be32_to_cpu(fw_entry->rodata.addr);
3884 len = be32_to_cpu(fw_entry->rodata.len);
3885 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3886 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3887
3888 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3889 if (len) {
3890 int j;
3891
3892 for (j = 0; j < (len / 4); j++, offset += 4)
3893 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3894 }
3895
3896 /* Clear the pre-fetch instruction. */
3897 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3898
3899 val = be32_to_cpu(fw_entry->start_addr);
3900 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3901
3902 /* Start the CPU. */
3903 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3904 val &= ~cpu_reg->mode_value_halt;
3905 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3906 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3907
3908 return 0;
3909}
3910
3911static int
3912bnx2_init_cpus(struct bnx2 *bp)
3913{
3914 const struct bnx2_mips_fw_file *mips_fw =
3915 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3916 const struct bnx2_rv2p_fw_file *rv2p_fw =
3917 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3918 int rc;
3919
3920 /* Initialize the RV2P processor. */
3921 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3922 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3923
3924 /* Initialize the RX Processor. */
3925 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3926 if (rc)
3927 goto init_cpu_err;
3928
3929 /* Initialize the TX Processor. */
3930 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3931 if (rc)
3932 goto init_cpu_err;
3933
3934 /* Initialize the TX Patch-up Processor. */
3935 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3936 if (rc)
3937 goto init_cpu_err;
3938
3939 /* Initialize the Completion Processor. */
3940 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3941 if (rc)
3942 goto init_cpu_err;
3943
3944 /* Initialize the Command Processor. */
3945 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3946
3947init_cpu_err:
3948 return rc;
3949}
3950
3951static void
3952bnx2_setup_wol(struct bnx2 *bp)
3953{
3954 int i;
3955 u32 val, wol_msg;
3956
3957 if (bp->wol) {
3958 u32 advertising;
3959 u8 autoneg;
3960
3961 autoneg = bp->autoneg;
3962 advertising = bp->advertising;
3963
3964 if (bp->phy_port == PORT_TP) {
3965 bp->autoneg = AUTONEG_SPEED;
3966 bp->advertising = ADVERTISED_10baseT_Half |
3967 ADVERTISED_10baseT_Full |
3968 ADVERTISED_100baseT_Half |
3969 ADVERTISED_100baseT_Full |
3970 ADVERTISED_Autoneg;
3971 }
3972
3973 spin_lock_bh(&bp->phy_lock);
3974 bnx2_setup_phy(bp, bp->phy_port);
3975 spin_unlock_bh(&bp->phy_lock);
3976
3977 bp->autoneg = autoneg;
3978 bp->advertising = advertising;
3979
3980 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3981
3982 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3983
3984 /* Enable port mode. */
3985 val &= ~BNX2_EMAC_MODE_PORT;
3986 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3987 BNX2_EMAC_MODE_ACPI_RCVD |
3988 BNX2_EMAC_MODE_MPKT;
3989 if (bp->phy_port == PORT_TP) {
3990 val |= BNX2_EMAC_MODE_PORT_MII;
3991 } else {
3992 val |= BNX2_EMAC_MODE_PORT_GMII;
3993 if (bp->line_speed == SPEED_2500)
3994 val |= BNX2_EMAC_MODE_25G_MODE;
3995 }
3996
3997 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3998
3999 /* receive all multicast */
4000 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
4001 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
4002 0xffffffff);
4003 }
4004 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4005
4006 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4007 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4008 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4009 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4010
4011 /* Need to enable EMAC and RPM for WOL. */
4012 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4013 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4014 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4015 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4016
4017 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4018 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4019 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4020
4021 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4022 } else {
4023 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4024 }
4025
4026 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4027 u32 val;
4028
4029 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4030 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4031 bnx2_fw_sync(bp, wol_msg, 1, 0);
4032 return;
4033 }
4034 /* Tell firmware not to power down the PHY yet, otherwise
4035 * the chip will take a long time to respond to MMIO reads.
4036 */
4037 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4038 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4039 val | BNX2_PORT_FEATURE_ASF_ENABLED);
4040 bnx2_fw_sync(bp, wol_msg, 1, 0);
4041 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4042 }
4043
4044}
4045
4046static int
4047bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4048{
4049 switch (state) {
4050 case PCI_D0: {
4051 u32 val;
4052
4053 pci_enable_wake(bp->pdev, PCI_D0, false);
4054 pci_set_power_state(bp->pdev, PCI_D0);
4055
4056 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4057 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4058 val &= ~BNX2_EMAC_MODE_MPKT;
4059 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4060
4061 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4062 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4063 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4064 break;
4065 }
4066 case PCI_D3hot: {
4067 bnx2_setup_wol(bp);
4068 pci_wake_from_d3(bp->pdev, bp->wol);
4069 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4070 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4071
4072 if (bp->wol)
4073 pci_set_power_state(bp->pdev, PCI_D3hot);
4074 break;
4075
4076 }
4077 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4078 u32 val;
4079
4080 /* Tell firmware not to power down the PHY yet,
4081 * otherwise the other port may not respond to
4082 * MMIO reads.
4083 */
4084 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4085 val &= ~BNX2_CONDITION_PM_STATE_MASK;
4086 val |= BNX2_CONDITION_PM_STATE_UNPREP;
4087 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4088 }
4089 pci_set_power_state(bp->pdev, PCI_D3hot);
4090
4091 /* No more memory access after this point until
4092 * device is brought back to D0.
4093 */
4094 break;
4095 }
4096 default:
4097 return -EINVAL;
4098 }
4099 return 0;
4100}
4101
4102static int
4103bnx2_acquire_nvram_lock(struct bnx2 *bp)
4104{
4105 u32 val;
4106 int j;
4107
4108 /* Request access to the flash interface. */
4109 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4110 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4111 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4112 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4113 break;
4114
4115 udelay(5);
4116 }
4117
4118 if (j >= NVRAM_TIMEOUT_COUNT)
4119 return -EBUSY;
4120
4121 return 0;
4122}
4123
4124static int
4125bnx2_release_nvram_lock(struct bnx2 *bp)
4126{
4127 int j;
4128 u32 val;
4129
4130 /* Relinquish nvram interface. */
4131 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4132
4133 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4134 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4135 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4136 break;
4137
4138 udelay(5);
4139 }
4140
4141 if (j >= NVRAM_TIMEOUT_COUNT)
4142 return -EBUSY;
4143
4144 return 0;
4145}
4146
4147
4148static int
4149bnx2_enable_nvram_write(struct bnx2 *bp)
4150{
4151 u32 val;
4152
4153 val = BNX2_RD(bp, BNX2_MISC_CFG);
4154 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4155
4156 if (bp->flash_info->flags & BNX2_NV_WREN) {
4157 int j;
4158
4159 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4160 BNX2_WR(bp, BNX2_NVM_COMMAND,
4161 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4162
4163 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4164 udelay(5);
4165
4166 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4167 if (val & BNX2_NVM_COMMAND_DONE)
4168 break;
4169 }
4170
4171 if (j >= NVRAM_TIMEOUT_COUNT)
4172 return -EBUSY;
4173 }
4174 return 0;
4175}
4176
4177static void
4178bnx2_disable_nvram_write(struct bnx2 *bp)
4179{
4180 u32 val;
4181
4182 val = BNX2_RD(bp, BNX2_MISC_CFG);
4183 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4184}
4185
4186
4187static void
4188bnx2_enable_nvram_access(struct bnx2 *bp)
4189{
4190 u32 val;
4191
4192 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4193 /* Enable both bits, even on read. */
4194 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4195 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4196}
4197
4198static void
4199bnx2_disable_nvram_access(struct bnx2 *bp)
4200{
4201 u32 val;
4202
4203 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4204 /* Disable both bits, even after read. */
4205 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4206 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4207 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4208}
4209
4210static int
4211bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4212{
4213 u32 cmd;
4214 int j;
4215
4216 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4217 /* Buffered flash, no erase needed */
4218 return 0;
4219
4220 /* Build an erase command */
4221 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4222 BNX2_NVM_COMMAND_DOIT;
4223
4224 /* Need to clear DONE bit separately. */
4225 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4226
4227 /* Address of the NVRAM to read from. */
4228 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4229
4230 /* Issue an erase command. */
4231 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4232
4233 /* Wait for completion. */
4234 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4235 u32 val;
4236
4237 udelay(5);
4238
4239 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4240 if (val & BNX2_NVM_COMMAND_DONE)
4241 break;
4242 }
4243
4244 if (j >= NVRAM_TIMEOUT_COUNT)
4245 return -EBUSY;
4246
4247 return 0;
4248}
4249
4250static int
4251bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4252{
4253 u32 cmd;
4254 int j;
4255
4256 /* Build the command word. */
4257 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4258
4259 /* Calculate an offset of a buffered flash, not needed for 5709. */
4260 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4261 offset = ((offset / bp->flash_info->page_size) <<
4262 bp->flash_info->page_bits) +
4263 (offset % bp->flash_info->page_size);
4264 }
4265
4266 /* Need to clear DONE bit separately. */
4267 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4268
4269 /* Address of the NVRAM to read from. */
4270 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4271
4272 /* Issue a read command. */
4273 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4274
4275 /* Wait for completion. */
4276 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4277 u32 val;
4278
4279 udelay(5);
4280
4281 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4282 if (val & BNX2_NVM_COMMAND_DONE) {
4283 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4284 memcpy(ret_val, &v, 4);
4285 break;
4286 }
4287 }
4288 if (j >= NVRAM_TIMEOUT_COUNT)
4289 return -EBUSY;
4290
4291 return 0;
4292}
4293
4294
4295static int
4296bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4297{
4298 u32 cmd;
4299 __be32 val32;
4300 int j;
4301
4302 /* Build the command word. */
4303 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4304
4305 /* Calculate an offset of a buffered flash, not needed for 5709. */
4306 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4307 offset = ((offset / bp->flash_info->page_size) <<
4308 bp->flash_info->page_bits) +
4309 (offset % bp->flash_info->page_size);
4310 }
4311
4312 /* Need to clear DONE bit separately. */
4313 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4314
4315 memcpy(&val32, val, 4);
4316
4317 /* Write the data. */
4318 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4319
4320 /* Address of the NVRAM to write to. */
4321 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4322
4323 /* Issue the write command. */
4324 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4325
4326 /* Wait for completion. */
4327 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4328 udelay(5);
4329
4330 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4331 break;
4332 }
4333 if (j >= NVRAM_TIMEOUT_COUNT)
4334 return -EBUSY;
4335
4336 return 0;
4337}
4338
4339static int
4340bnx2_init_nvram(struct bnx2 *bp)
4341{
4342 u32 val;
4343 int j, entry_count, rc = 0;
4344 const struct flash_spec *flash;
4345
4346 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4347 bp->flash_info = &flash_5709;
4348 goto get_flash_size;
4349 }
4350
4351 /* Determine the selected interface. */
4352 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4353
4354 entry_count = ARRAY_SIZE(flash_table);
4355
4356 if (val & 0x40000000) {
4357
4358 /* Flash interface has been reconfigured */
4359 for (j = 0, flash = &flash_table[0]; j < entry_count;
4360 j++, flash++) {
4361 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4362 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4363 bp->flash_info = flash;
4364 break;
4365 }
4366 }
4367 }
4368 else {
4369 u32 mask;
4370 /* Not yet been reconfigured */
4371
4372 if (val & (1 << 23))
4373 mask = FLASH_BACKUP_STRAP_MASK;
4374 else
4375 mask = FLASH_STRAP_MASK;
4376
4377 for (j = 0, flash = &flash_table[0]; j < entry_count;
4378 j++, flash++) {
4379
4380 if ((val & mask) == (flash->strapping & mask)) {
4381 bp->flash_info = flash;
4382
4383 /* Request access to the flash interface. */
4384 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4385 return rc;
4386
4387 /* Enable access to flash interface */
4388 bnx2_enable_nvram_access(bp);
4389
4390 /* Reconfigure the flash interface */
4391 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4392 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4393 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4394 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4395
4396 /* Disable access to flash interface */
4397 bnx2_disable_nvram_access(bp);
4398 bnx2_release_nvram_lock(bp);
4399
4400 break;
4401 }
4402 }
4403 } /* if (val & 0x40000000) */
4404
4405 if (j == entry_count) {
4406 bp->flash_info = NULL;
4407 pr_alert("Unknown flash/EEPROM type\n");
4408 return -ENODEV;
4409 }
4410
4411get_flash_size:
4412 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4413 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4414 if (val)
4415 bp->flash_size = val;
4416 else
4417 bp->flash_size = bp->flash_info->total_size;
4418
4419 return rc;
4420}
4421
4422static int
4423bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4424 int buf_size)
4425{
4426 int rc = 0;
4427 u32 cmd_flags, offset32, len32, extra;
4428
4429 if (buf_size == 0)
4430 return 0;
4431
4432 /* Request access to the flash interface. */
4433 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4434 return rc;
4435
4436 /* Enable access to flash interface */
4437 bnx2_enable_nvram_access(bp);
4438
4439 len32 = buf_size;
4440 offset32 = offset;
4441 extra = 0;
4442
4443 cmd_flags = 0;
4444
4445 if (offset32 & 3) {
4446 u8 buf[4];
4447 u32 pre_len;
4448
4449 offset32 &= ~3;
4450 pre_len = 4 - (offset & 3);
4451
4452 if (pre_len >= len32) {
4453 pre_len = len32;
4454 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4455 BNX2_NVM_COMMAND_LAST;
4456 }
4457 else {
4458 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4459 }
4460
4461 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4462
4463 if (rc)
4464 return rc;
4465
4466 memcpy(ret_buf, buf + (offset & 3), pre_len);
4467
4468 offset32 += 4;
4469 ret_buf += pre_len;
4470 len32 -= pre_len;
4471 }
4472 if (len32 & 3) {
4473 extra = 4 - (len32 & 3);
4474 len32 = (len32 + 4) & ~3;
4475 }
4476
4477 if (len32 == 4) {
4478 u8 buf[4];
4479
4480 if (cmd_flags)
4481 cmd_flags = BNX2_NVM_COMMAND_LAST;
4482 else
4483 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4484 BNX2_NVM_COMMAND_LAST;
4485
4486 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4487
4488 memcpy(ret_buf, buf, 4 - extra);
4489 }
4490 else if (len32 > 0) {
4491 u8 buf[4];
4492
4493 /* Read the first word. */
4494 if (cmd_flags)
4495 cmd_flags = 0;
4496 else
4497 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4498
4499 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4500
4501 /* Advance to the next dword. */
4502 offset32 += 4;
4503 ret_buf += 4;
4504 len32 -= 4;
4505
4506 while (len32 > 4 && rc == 0) {
4507 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4508
4509 /* Advance to the next dword. */
4510 offset32 += 4;
4511 ret_buf += 4;
4512 len32 -= 4;
4513 }
4514
4515 if (rc)
4516 return rc;
4517
4518 cmd_flags = BNX2_NVM_COMMAND_LAST;
4519 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4520
4521 memcpy(ret_buf, buf, 4 - extra);
4522 }
4523
4524 /* Disable access to flash interface */
4525 bnx2_disable_nvram_access(bp);
4526
4527 bnx2_release_nvram_lock(bp);
4528
4529 return rc;
4530}
4531
4532static int
4533bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4534 int buf_size)
4535{
4536 u32 written, offset32, len32;
4537 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4538 int rc = 0;
4539 int align_start, align_end;
4540
4541 buf = data_buf;
4542 offset32 = offset;
4543 len32 = buf_size;
4544 align_start = align_end = 0;
4545
4546 if ((align_start = (offset32 & 3))) {
4547 offset32 &= ~3;
4548 len32 += align_start;
4549 if (len32 < 4)
4550 len32 = 4;
4551 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4552 return rc;
4553 }
4554
4555 if (len32 & 3) {
4556 align_end = 4 - (len32 & 3);
4557 len32 += align_end;
4558 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4559 return rc;
4560 }
4561
4562 if (align_start || align_end) {
4563 align_buf = kmalloc(len32, GFP_KERNEL);
4564 if (align_buf == NULL)
4565 return -ENOMEM;
4566 if (align_start) {
4567 memcpy(align_buf, start, 4);
4568 }
4569 if (align_end) {
4570 memcpy(align_buf + len32 - 4, end, 4);
4571 }
4572 memcpy(align_buf + align_start, data_buf, buf_size);
4573 buf = align_buf;
4574 }
4575
4576 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4577 flash_buffer = kmalloc(264, GFP_KERNEL);
4578 if (flash_buffer == NULL) {
4579 rc = -ENOMEM;
4580 goto nvram_write_end;
4581 }
4582 }
4583
4584 written = 0;
4585 while ((written < len32) && (rc == 0)) {
4586 u32 page_start, page_end, data_start, data_end;
4587 u32 addr, cmd_flags;
4588 int i;
4589
4590 /* Find the page_start addr */
4591 page_start = offset32 + written;
4592 page_start -= (page_start % bp->flash_info->page_size);
4593 /* Find the page_end addr */
4594 page_end = page_start + bp->flash_info->page_size;
4595 /* Find the data_start addr */
4596 data_start = (written == 0) ? offset32 : page_start;
4597 /* Find the data_end addr */
4598 data_end = (page_end > offset32 + len32) ?
4599 (offset32 + len32) : page_end;
4600
4601 /* Request access to the flash interface. */
4602 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4603 goto nvram_write_end;
4604
4605 /* Enable access to flash interface */
4606 bnx2_enable_nvram_access(bp);
4607
4608 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4609 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4610 int j;
4611
4612 /* Read the whole page into the buffer
4613 * (non-buffer flash only) */
4614 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4615 if (j == (bp->flash_info->page_size - 4)) {
4616 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4617 }
4618 rc = bnx2_nvram_read_dword(bp,
4619 page_start + j,
4620 &flash_buffer[j],
4621 cmd_flags);
4622
4623 if (rc)
4624 goto nvram_write_end;
4625
4626 cmd_flags = 0;
4627 }
4628 }
4629
4630 /* Enable writes to flash interface (unlock write-protect) */
4631 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4632 goto nvram_write_end;
4633
4634 /* Loop to write back the buffer data from page_start to
4635 * data_start */
4636 i = 0;
4637 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4638 /* Erase the page */
4639 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4640 goto nvram_write_end;
4641
4642 /* Re-enable the write again for the actual write */
4643 bnx2_enable_nvram_write(bp);
4644
4645 for (addr = page_start; addr < data_start;
4646 addr += 4, i += 4) {
4647
4648 rc = bnx2_nvram_write_dword(bp, addr,
4649 &flash_buffer[i], cmd_flags);
4650
4651 if (rc != 0)
4652 goto nvram_write_end;
4653
4654 cmd_flags = 0;
4655 }
4656 }
4657
4658 /* Loop to write the new data from data_start to data_end */
4659 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4660 if ((addr == page_end - 4) ||
4661 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4662 (addr == data_end - 4))) {
4663
4664 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4665 }
4666 rc = bnx2_nvram_write_dword(bp, addr, buf,
4667 cmd_flags);
4668
4669 if (rc != 0)
4670 goto nvram_write_end;
4671
4672 cmd_flags = 0;
4673 buf += 4;
4674 }
4675
4676 /* Loop to write back the buffer data from data_end
4677 * to page_end */
4678 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4679 for (addr = data_end; addr < page_end;
4680 addr += 4, i += 4) {
4681
4682 if (addr == page_end-4) {
4683 cmd_flags = BNX2_NVM_COMMAND_LAST;
4684 }
4685 rc = bnx2_nvram_write_dword(bp, addr,
4686 &flash_buffer[i], cmd_flags);
4687
4688 if (rc != 0)
4689 goto nvram_write_end;
4690
4691 cmd_flags = 0;
4692 }
4693 }
4694
4695 /* Disable writes to flash interface (lock write-protect) */
4696 bnx2_disable_nvram_write(bp);
4697
4698 /* Disable access to flash interface */
4699 bnx2_disable_nvram_access(bp);
4700 bnx2_release_nvram_lock(bp);
4701
4702 /* Increment written */
4703 written += data_end - data_start;
4704 }
4705
4706nvram_write_end:
4707 kfree(flash_buffer);
4708 kfree(align_buf);
4709 return rc;
4710}
4711
4712static void
4713bnx2_init_fw_cap(struct bnx2 *bp)
4714{
4715 u32 val, sig = 0;
4716
4717 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4718 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4719
4720 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4721 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4722
4723 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4724 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4725 return;
4726
4727 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4728 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4729 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4730 }
4731
4732 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4733 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4734 u32 link;
4735
4736 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4737
4738 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4739 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4740 bp->phy_port = PORT_FIBRE;
4741 else
4742 bp->phy_port = PORT_TP;
4743
4744 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4745 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4746 }
4747
4748 if (netif_running(bp->dev) && sig)
4749 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4750}
4751
4752static void
4753bnx2_setup_msix_tbl(struct bnx2 *bp)
4754{
4755 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4756
4757 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4758 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4759}
4760
4761static void
4762bnx2_wait_dma_complete(struct bnx2 *bp)
4763{
4764 u32 val;
4765 int i;
4766
4767 /*
4768 * Wait for the current PCI transaction to complete before
4769 * issuing a reset.
4770 */
4771 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4772 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4773 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4774 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4775 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4776 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4777 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4778 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4779 udelay(5);
4780 } else { /* 5709 */
4781 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4782 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4783 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4784 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4785
4786 for (i = 0; i < 100; i++) {
4787 msleep(1);
4788 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4789 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4790 break;
4791 }
4792 }
4793
4794 return;
4795}
4796
4797
4798static int
4799bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4800{
4801 u32 val;
4802 int i, rc = 0;
4803 u8 old_port;
4804
4805 /* Wait for the current PCI transaction to complete before
4806 * issuing a reset. */
4807 bnx2_wait_dma_complete(bp);
4808
4809 /* Wait for the firmware to tell us it is ok to issue a reset. */
4810 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4811
4812 /* Deposit a driver reset signature so the firmware knows that
4813 * this is a soft reset. */
4814 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4815 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4816
4817 /* Do a dummy read to force the chip to complete all current transaction
4818 * before we issue a reset. */
4819 val = BNX2_RD(bp, BNX2_MISC_ID);
4820
4821 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4822 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4823 BNX2_RD(bp, BNX2_MISC_COMMAND);
4824 udelay(5);
4825
4826 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4827 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4828
4829 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4830
4831 } else {
4832 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4833 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4834 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4835
4836 /* Chip reset. */
4837 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4838
4839 /* Reading back any register after chip reset will hang the
4840 * bus on 5706 A0 and A1. The msleep below provides plenty
4841 * of margin for write posting.
4842 */
4843 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4844 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4845 msleep(20);
4846
4847 /* Reset takes approximate 30 usec */
4848 for (i = 0; i < 10; i++) {
4849 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4850 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4851 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4852 break;
4853 udelay(10);
4854 }
4855
4856 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4857 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4858 pr_err("Chip reset did not complete\n");
4859 return -EBUSY;
4860 }
4861 }
4862
4863 /* Make sure byte swapping is properly configured. */
4864 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4865 if (val != 0x01020304) {
4866 pr_err("Chip not in correct endian mode\n");
4867 return -ENODEV;
4868 }
4869
4870 /* Wait for the firmware to finish its initialization. */
4871 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4872 if (rc)
4873 return rc;
4874
4875 spin_lock_bh(&bp->phy_lock);
4876 old_port = bp->phy_port;
4877 bnx2_init_fw_cap(bp);
4878 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4879 old_port != bp->phy_port)
4880 bnx2_set_default_remote_link(bp);
4881 spin_unlock_bh(&bp->phy_lock);
4882
4883 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4884 /* Adjust the voltage regular to two steps lower. The default
4885 * of this register is 0x0000000e. */
4886 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4887
4888 /* Remove bad rbuf memory from the free pool. */
4889 rc = bnx2_alloc_bad_rbuf(bp);
4890 }
4891
4892 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4893 bnx2_setup_msix_tbl(bp);
4894 /* Prevent MSIX table reads and write from timing out */
4895 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4896 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4897 }
4898
4899 return rc;
4900}
4901
4902static int
4903bnx2_init_chip(struct bnx2 *bp)
4904{
4905 u32 val, mtu;
4906 int rc, i;
4907
4908 /* Make sure the interrupt is not active. */
4909 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4910
4911 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4912 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4913#ifdef __BIG_ENDIAN
4914 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4915#endif
4916 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4917 DMA_READ_CHANS << 12 |
4918 DMA_WRITE_CHANS << 16;
4919
4920 val |= (0x2 << 20) | (1 << 11);
4921
4922 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4923 val |= (1 << 23);
4924
4925 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4926 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4927 !(bp->flags & BNX2_FLAG_PCIX))
4928 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4929
4930 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4931
4932 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4933 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4934 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4935 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4936 }
4937
4938 if (bp->flags & BNX2_FLAG_PCIX) {
4939 u16 val16;
4940
4941 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4942 &val16);
4943 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4944 val16 & ~PCI_X_CMD_ERO);
4945 }
4946
4947 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4948 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4949 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4950 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4951
4952 /* Initialize context mapping and zero out the quick contexts. The
4953 * context block must have already been enabled. */
4954 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4955 rc = bnx2_init_5709_context(bp);
4956 if (rc)
4957 return rc;
4958 } else
4959 bnx2_init_context(bp);
4960
4961 if ((rc = bnx2_init_cpus(bp)) != 0)
4962 return rc;
4963
4964 bnx2_init_nvram(bp);
4965
4966 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4967
4968 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4969 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4970 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4971 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4972 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4973 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4974 val |= BNX2_MQ_CONFIG_HALT_DIS;
4975 }
4976
4977 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4978
4979 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4980 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4981 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4982
4983 val = (BNX2_PAGE_BITS - 8) << 24;
4984 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4985
4986 /* Configure page size. */
4987 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4988 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4989 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4990 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4991
4992 val = bp->mac_addr[0] +
4993 (bp->mac_addr[1] << 8) +
4994 (bp->mac_addr[2] << 16) +
4995 bp->mac_addr[3] +
4996 (bp->mac_addr[4] << 8) +
4997 (bp->mac_addr[5] << 16);
4998 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4999
5000 /* Program the MTU. Also include 4 bytes for CRC32. */
5001 mtu = bp->dev->mtu;
5002 val = mtu + ETH_HLEN + ETH_FCS_LEN;
5003 if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
5004 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5005 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5006
5007 if (mtu < ETH_DATA_LEN)
5008 mtu = ETH_DATA_LEN;
5009
5010 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5011 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5012 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5013
5014 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5015 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5016 bp->bnx2_napi[i].last_status_idx = 0;
5017
5018 bp->idle_chk_status_idx = 0xffff;
5019
5020 /* Set up how to generate a link change interrupt. */
5021 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5022
5023 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5024 (u64) bp->status_blk_mapping & 0xffffffff);
5025 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5026
5027 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5028 (u64) bp->stats_blk_mapping & 0xffffffff);
5029 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5030 (u64) bp->stats_blk_mapping >> 32);
5031
5032 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5033 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5034
5035 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5036 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5037
5038 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5039 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5040
5041 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5042
5043 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5044
5045 BNX2_WR(bp, BNX2_HC_COM_TICKS,
5046 (bp->com_ticks_int << 16) | bp->com_ticks);
5047
5048 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5049 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5050
5051 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5052 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5053 else
5054 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5055 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
5056
5057 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5058 val = BNX2_HC_CONFIG_COLLECT_STATS;
5059 else {
5060 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5061 BNX2_HC_CONFIG_COLLECT_STATS;
5062 }
5063
5064 if (bp->flags & BNX2_FLAG_USING_MSIX) {
5065 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5066 BNX2_HC_MSIX_BIT_VECTOR_VAL);
5067
5068 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5069 }
5070
5071 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5072 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5073
5074 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5075
5076 if (bp->rx_ticks < 25)
5077 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5078 else
5079 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5080
5081 for (i = 1; i < bp->irq_nvecs; i++) {
5082 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5083 BNX2_HC_SB_CONFIG_1;
5084
5085 BNX2_WR(bp, base,
5086 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5087 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5088 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5089
5090 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5091 (bp->tx_quick_cons_trip_int << 16) |
5092 bp->tx_quick_cons_trip);
5093
5094 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5095 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5096
5097 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5098 (bp->rx_quick_cons_trip_int << 16) |
5099 bp->rx_quick_cons_trip);
5100
5101 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5102 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5103 }
5104
5105 /* Clear internal stats counters. */
5106 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5107
5108 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5109
5110 /* Initialize the receive filter. */
5111 bnx2_set_rx_mode(bp->dev);
5112
5113 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5114 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5115 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5116 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5117 }
5118 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5119 1, 0);
5120
5121 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5122 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5123
5124 udelay(20);
5125
5126 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5127
5128 return rc;
5129}
5130
5131static void
5132bnx2_clear_ring_states(struct bnx2 *bp)
5133{
5134 struct bnx2_napi *bnapi;
5135 struct bnx2_tx_ring_info *txr;
5136 struct bnx2_rx_ring_info *rxr;
5137 int i;
5138
5139 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5140 bnapi = &bp->bnx2_napi[i];
5141 txr = &bnapi->tx_ring;
5142 rxr = &bnapi->rx_ring;
5143
5144 txr->tx_cons = 0;
5145 txr->hw_tx_cons = 0;
5146 rxr->rx_prod_bseq = 0;
5147 rxr->rx_prod = 0;
5148 rxr->rx_cons = 0;
5149 rxr->rx_pg_prod = 0;
5150 rxr->rx_pg_cons = 0;
5151 }
5152}
5153
5154static void
5155bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5156{
5157 u32 val, offset0, offset1, offset2, offset3;
5158 u32 cid_addr = GET_CID_ADDR(cid);
5159
5160 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5161 offset0 = BNX2_L2CTX_TYPE_XI;
5162 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5163 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5164 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5165 } else {
5166 offset0 = BNX2_L2CTX_TYPE;
5167 offset1 = BNX2_L2CTX_CMD_TYPE;
5168 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5169 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5170 }
5171 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5172 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5173
5174 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5175 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5176
5177 val = (u64) txr->tx_desc_mapping >> 32;
5178 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5179
5180 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5181 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5182}
5183
5184static void
5185bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5186{
5187 struct bnx2_tx_bd *txbd;
5188 u32 cid = TX_CID;
5189 struct bnx2_napi *bnapi;
5190 struct bnx2_tx_ring_info *txr;
5191
5192 bnapi = &bp->bnx2_napi[ring_num];
5193 txr = &bnapi->tx_ring;
5194
5195 if (ring_num == 0)
5196 cid = TX_CID;
5197 else
5198 cid = TX_TSS_CID + ring_num - 1;
5199
5200 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5201
5202 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5203
5204 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5205 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5206
5207 txr->tx_prod = 0;
5208 txr->tx_prod_bseq = 0;
5209
5210 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5211 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5212
5213 bnx2_init_tx_context(bp, cid, txr);
5214}
5215
5216static void
5217bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5218 u32 buf_size, int num_rings)
5219{
5220 int i;
5221 struct bnx2_rx_bd *rxbd;
5222
5223 for (i = 0; i < num_rings; i++) {
5224 int j;
5225
5226 rxbd = &rx_ring[i][0];
5227 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5228 rxbd->rx_bd_len = buf_size;
5229 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5230 }
5231 if (i == (num_rings - 1))
5232 j = 0;
5233 else
5234 j = i + 1;
5235 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5236 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5237 }
5238}
5239
5240static void
5241bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5242{
5243 int i;
5244 u16 prod, ring_prod;
5245 u32 cid, rx_cid_addr, val;
5246 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5247 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5248
5249 if (ring_num == 0)
5250 cid = RX_CID;
5251 else
5252 cid = RX_RSS_CID + ring_num - 1;
5253
5254 rx_cid_addr = GET_CID_ADDR(cid);
5255
5256 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5257 bp->rx_buf_use_size, bp->rx_max_ring);
5258
5259 bnx2_init_rx_context(bp, cid);
5260
5261 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5262 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5263 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5264 }
5265
5266 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5267 if (bp->rx_pg_ring_size) {
5268 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5269 rxr->rx_pg_desc_mapping,
5270 PAGE_SIZE, bp->rx_max_pg_ring);
5271 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5272 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5273 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5274 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5275
5276 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5277 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5278
5279 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5280 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5281
5282 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5283 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5284 }
5285
5286 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5287 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5288
5289 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5290 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5291
5292 ring_prod = prod = rxr->rx_pg_prod;
5293 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5294 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5295 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5296 ring_num, i, bp->rx_pg_ring_size);
5297 break;
5298 }
5299 prod = BNX2_NEXT_RX_BD(prod);
5300 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5301 }
5302 rxr->rx_pg_prod = prod;
5303
5304 ring_prod = prod = rxr->rx_prod;
5305 for (i = 0; i < bp->rx_ring_size; i++) {
5306 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5307 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5308 ring_num, i, bp->rx_ring_size);
5309 break;
5310 }
5311 prod = BNX2_NEXT_RX_BD(prod);
5312 ring_prod = BNX2_RX_RING_IDX(prod);
5313 }
5314 rxr->rx_prod = prod;
5315
5316 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5317 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5318 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5319
5320 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5321 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5322
5323 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5324}
5325
5326static void
5327bnx2_init_all_rings(struct bnx2 *bp)
5328{
5329 int i;
5330 u32 val;
5331
5332 bnx2_clear_ring_states(bp);
5333
5334 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5335 for (i = 0; i < bp->num_tx_rings; i++)
5336 bnx2_init_tx_ring(bp, i);
5337
5338 if (bp->num_tx_rings > 1)
5339 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5340 (TX_TSS_CID << 7));
5341
5342 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5343 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5344
5345 for (i = 0; i < bp->num_rx_rings; i++)
5346 bnx2_init_rx_ring(bp, i);
5347
5348 if (bp->num_rx_rings > 1) {
5349 u32 tbl_32 = 0;
5350
5351 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5352 int shift = (i % 8) << 2;
5353
5354 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5355 if ((i % 8) == 7) {
5356 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5357 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5358 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5359 BNX2_RLUP_RSS_COMMAND_WRITE |
5360 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5361 tbl_32 = 0;
5362 }
5363 }
5364
5365 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5366 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5367
5368 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5369
5370 }
5371}
5372
5373static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5374{
5375 u32 max, num_rings = 1;
5376
5377 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5378 ring_size -= BNX2_MAX_RX_DESC_CNT;
5379 num_rings++;
5380 }
5381 /* round to next power of 2 */
5382 max = max_size;
5383 while ((max & num_rings) == 0)
5384 max >>= 1;
5385
5386 if (num_rings != max)
5387 max <<= 1;
5388
5389 return max;
5390}
5391
5392static void
5393bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5394{
5395 u32 rx_size, rx_space, jumbo_size;
5396
5397 /* 8 for CRC and VLAN */
5398 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5399
5400 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5401 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5402
5403 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5404 bp->rx_pg_ring_size = 0;
5405 bp->rx_max_pg_ring = 0;
5406 bp->rx_max_pg_ring_idx = 0;
5407 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5408 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5409
5410 jumbo_size = size * pages;
5411 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5412 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5413
5414 bp->rx_pg_ring_size = jumbo_size;
5415 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5416 BNX2_MAX_RX_PG_RINGS);
5417 bp->rx_max_pg_ring_idx =
5418 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5419 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5420 bp->rx_copy_thresh = 0;
5421 }
5422
5423 bp->rx_buf_use_size = rx_size;
5424 /* hw alignment + build_skb() overhead*/
5425 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5426 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5427 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5428 bp->rx_ring_size = size;
5429 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5430 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5431}
5432
5433static void
5434bnx2_free_tx_skbs(struct bnx2 *bp)
5435{
5436 int i;
5437
5438 for (i = 0; i < bp->num_tx_rings; i++) {
5439 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5440 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5441 int j;
5442
5443 if (txr->tx_buf_ring == NULL)
5444 continue;
5445
5446 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5447 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5448 struct sk_buff *skb = tx_buf->skb;
5449 int k, last;
5450
5451 if (skb == NULL) {
5452 j = BNX2_NEXT_TX_BD(j);
5453 continue;
5454 }
5455
5456 dma_unmap_single(&bp->pdev->dev,
5457 dma_unmap_addr(tx_buf, mapping),
5458 skb_headlen(skb),
5459 PCI_DMA_TODEVICE);
5460
5461 tx_buf->skb = NULL;
5462
5463 last = tx_buf->nr_frags;
5464 j = BNX2_NEXT_TX_BD(j);
5465 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5466 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5467 dma_unmap_page(&bp->pdev->dev,
5468 dma_unmap_addr(tx_buf, mapping),
5469 skb_frag_size(&skb_shinfo(skb)->frags[k]),
5470 PCI_DMA_TODEVICE);
5471 }
5472 dev_kfree_skb(skb);
5473 }
5474 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5475 }
5476}
5477
5478static void
5479bnx2_free_rx_skbs(struct bnx2 *bp)
5480{
5481 int i;
5482
5483 for (i = 0; i < bp->num_rx_rings; i++) {
5484 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5485 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5486 int j;
5487
5488 if (rxr->rx_buf_ring == NULL)
5489 return;
5490
5491 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5492 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5493 u8 *data = rx_buf->data;
5494
5495 if (data == NULL)
5496 continue;
5497
5498 dma_unmap_single(&bp->pdev->dev,
5499 dma_unmap_addr(rx_buf, mapping),
5500 bp->rx_buf_use_size,
5501 PCI_DMA_FROMDEVICE);
5502
5503 rx_buf->data = NULL;
5504
5505 kfree(data);
5506 }
5507 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5508 bnx2_free_rx_page(bp, rxr, j);
5509 }
5510}
5511
5512static void
5513bnx2_free_skbs(struct bnx2 *bp)
5514{
5515 bnx2_free_tx_skbs(bp);
5516 bnx2_free_rx_skbs(bp);
5517}
5518
5519static int
5520bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5521{
5522 int rc;
5523
5524 rc = bnx2_reset_chip(bp, reset_code);
5525 bnx2_free_skbs(bp);
5526 if (rc)
5527 return rc;
5528
5529 if ((rc = bnx2_init_chip(bp)) != 0)
5530 return rc;
5531
5532 bnx2_init_all_rings(bp);
5533 return 0;
5534}
5535
5536static int
5537bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5538{
5539 int rc;
5540
5541 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5542 return rc;
5543
5544 spin_lock_bh(&bp->phy_lock);
5545 bnx2_init_phy(bp, reset_phy);
5546 bnx2_set_link(bp);
5547 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5548 bnx2_remote_phy_event(bp);
5549 spin_unlock_bh(&bp->phy_lock);
5550 return 0;
5551}
5552
5553static int
5554bnx2_shutdown_chip(struct bnx2 *bp)
5555{
5556 u32 reset_code;
5557
5558 if (bp->flags & BNX2_FLAG_NO_WOL)
5559 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5560 else if (bp->wol)
5561 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5562 else
5563 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5564
5565 return bnx2_reset_chip(bp, reset_code);
5566}
5567
5568static int
5569bnx2_test_registers(struct bnx2 *bp)
5570{
5571 int ret;
5572 int i, is_5709;
5573 static const struct {
5574 u16 offset;
5575 u16 flags;
5576#define BNX2_FL_NOT_5709 1
5577 u32 rw_mask;
5578 u32 ro_mask;
5579 } reg_tbl[] = {
5580 { 0x006c, 0, 0x00000000, 0x0000003f },
5581 { 0x0090, 0, 0xffffffff, 0x00000000 },
5582 { 0x0094, 0, 0x00000000, 0x00000000 },
5583
5584 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5585 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5587 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5588 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5589 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5590 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5591 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5592 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5593
5594 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5595 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5596 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5597 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5598 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5599 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5600
5601 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5602 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5603 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5604
5605 { 0x1000, 0, 0x00000000, 0x00000001 },
5606 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5607
5608 { 0x1408, 0, 0x01c00800, 0x00000000 },
5609 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5610 { 0x14a8, 0, 0x00000000, 0x000001ff },
5611 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5612 { 0x14b0, 0, 0x00000002, 0x00000001 },
5613 { 0x14b8, 0, 0x00000000, 0x00000000 },
5614 { 0x14c0, 0, 0x00000000, 0x00000009 },
5615 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5616 { 0x14cc, 0, 0x00000000, 0x00000001 },
5617 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5618
5619 { 0x1800, 0, 0x00000000, 0x00000001 },
5620 { 0x1804, 0, 0x00000000, 0x00000003 },
5621
5622 { 0x2800, 0, 0x00000000, 0x00000001 },
5623 { 0x2804, 0, 0x00000000, 0x00003f01 },
5624 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5625 { 0x2810, 0, 0xffff0000, 0x00000000 },
5626 { 0x2814, 0, 0xffff0000, 0x00000000 },
5627 { 0x2818, 0, 0xffff0000, 0x00000000 },
5628 { 0x281c, 0, 0xffff0000, 0x00000000 },
5629 { 0x2834, 0, 0xffffffff, 0x00000000 },
5630 { 0x2840, 0, 0x00000000, 0xffffffff },
5631 { 0x2844, 0, 0x00000000, 0xffffffff },
5632 { 0x2848, 0, 0xffffffff, 0x00000000 },
5633 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5634
5635 { 0x2c00, 0, 0x00000000, 0x00000011 },
5636 { 0x2c04, 0, 0x00000000, 0x00030007 },
5637
5638 { 0x3c00, 0, 0x00000000, 0x00000001 },
5639 { 0x3c04, 0, 0x00000000, 0x00070000 },
5640 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5641 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5642 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5643 { 0x3c14, 0, 0x00000000, 0xffffffff },
5644 { 0x3c18, 0, 0x00000000, 0xffffffff },
5645 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5646 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5647
5648 { 0x5004, 0, 0x00000000, 0x0000007f },
5649 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5650
5651 { 0x5c00, 0, 0x00000000, 0x00000001 },
5652 { 0x5c04, 0, 0x00000000, 0x0003000f },
5653 { 0x5c08, 0, 0x00000003, 0x00000000 },
5654 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5655 { 0x5c10, 0, 0x00000000, 0xffffffff },
5656 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5657 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5658 { 0x5c88, 0, 0x00000000, 0x00077373 },
5659 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5660
5661 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5662 { 0x680c, 0, 0xffffffff, 0x00000000 },
5663 { 0x6810, 0, 0xffffffff, 0x00000000 },
5664 { 0x6814, 0, 0xffffffff, 0x00000000 },
5665 { 0x6818, 0, 0xffffffff, 0x00000000 },
5666 { 0x681c, 0, 0xffffffff, 0x00000000 },
5667 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5668 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5669 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5670 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5671 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5672 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5673 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5674 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5675 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5676 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5677 { 0x684c, 0, 0xffffffff, 0x00000000 },
5678 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5679 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5680 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5681 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5682 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5683 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5684
5685 { 0xffff, 0, 0x00000000, 0x00000000 },
5686 };
5687
5688 ret = 0;
5689 is_5709 = 0;
5690 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5691 is_5709 = 1;
5692
5693 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5694 u32 offset, rw_mask, ro_mask, save_val, val;
5695 u16 flags = reg_tbl[i].flags;
5696
5697 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5698 continue;
5699
5700 offset = (u32) reg_tbl[i].offset;
5701 rw_mask = reg_tbl[i].rw_mask;
5702 ro_mask = reg_tbl[i].ro_mask;
5703
5704 save_val = readl(bp->regview + offset);
5705
5706 writel(0, bp->regview + offset);
5707
5708 val = readl(bp->regview + offset);
5709 if ((val & rw_mask) != 0) {
5710 goto reg_test_err;
5711 }
5712
5713 if ((val & ro_mask) != (save_val & ro_mask)) {
5714 goto reg_test_err;
5715 }
5716
5717 writel(0xffffffff, bp->regview + offset);
5718
5719 val = readl(bp->regview + offset);
5720 if ((val & rw_mask) != rw_mask) {
5721 goto reg_test_err;
5722 }
5723
5724 if ((val & ro_mask) != (save_val & ro_mask)) {
5725 goto reg_test_err;
5726 }
5727
5728 writel(save_val, bp->regview + offset);
5729 continue;
5730
5731reg_test_err:
5732 writel(save_val, bp->regview + offset);
5733 ret = -ENODEV;
5734 break;
5735 }
5736 return ret;
5737}
5738
5739static int
5740bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5741{
5742 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5743 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5744 int i;
5745
5746 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5747 u32 offset;
5748
5749 for (offset = 0; offset < size; offset += 4) {
5750
5751 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5752
5753 if (bnx2_reg_rd_ind(bp, start + offset) !=
5754 test_pattern[i]) {
5755 return -ENODEV;
5756 }
5757 }
5758 }
5759 return 0;
5760}
5761
5762static int
5763bnx2_test_memory(struct bnx2 *bp)
5764{
5765 int ret = 0;
5766 int i;
5767 static struct mem_entry {
5768 u32 offset;
5769 u32 len;
5770 } mem_tbl_5706[] = {
5771 { 0x60000, 0x4000 },
5772 { 0xa0000, 0x3000 },
5773 { 0xe0000, 0x4000 },
5774 { 0x120000, 0x4000 },
5775 { 0x1a0000, 0x4000 },
5776 { 0x160000, 0x4000 },
5777 { 0xffffffff, 0 },
5778 },
5779 mem_tbl_5709[] = {
5780 { 0x60000, 0x4000 },
5781 { 0xa0000, 0x3000 },
5782 { 0xe0000, 0x4000 },
5783 { 0x120000, 0x4000 },
5784 { 0x1a0000, 0x4000 },
5785 { 0xffffffff, 0 },
5786 };
5787 struct mem_entry *mem_tbl;
5788
5789 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5790 mem_tbl = mem_tbl_5709;
5791 else
5792 mem_tbl = mem_tbl_5706;
5793
5794 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5795 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5796 mem_tbl[i].len)) != 0) {
5797 return ret;
5798 }
5799 }
5800
5801 return ret;
5802}
5803
5804#define BNX2_MAC_LOOPBACK 0
5805#define BNX2_PHY_LOOPBACK 1
5806
5807static int
5808bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5809{
5810 unsigned int pkt_size, num_pkts, i;
5811 struct sk_buff *skb;
5812 u8 *data;
5813 unsigned char *packet;
5814 u16 rx_start_idx, rx_idx;
5815 dma_addr_t map;
5816 struct bnx2_tx_bd *txbd;
5817 struct bnx2_sw_bd *rx_buf;
5818 struct l2_fhdr *rx_hdr;
5819 int ret = -ENODEV;
5820 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5821 struct bnx2_tx_ring_info *txr;
5822 struct bnx2_rx_ring_info *rxr;
5823
5824 tx_napi = bnapi;
5825
5826 txr = &tx_napi->tx_ring;
5827 rxr = &bnapi->rx_ring;
5828 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5829 bp->loopback = MAC_LOOPBACK;
5830 bnx2_set_mac_loopback(bp);
5831 }
5832 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5833 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5834 return 0;
5835
5836 bp->loopback = PHY_LOOPBACK;
5837 bnx2_set_phy_loopback(bp);
5838 }
5839 else
5840 return -EINVAL;
5841
5842 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5843 skb = netdev_alloc_skb(bp->dev, pkt_size);
5844 if (!skb)
5845 return -ENOMEM;
5846 packet = skb_put(skb, pkt_size);
5847 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5848 memset(packet + ETH_ALEN, 0x0, 8);
5849 for (i = 14; i < pkt_size; i++)
5850 packet[i] = (unsigned char) (i & 0xff);
5851
5852 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5853 PCI_DMA_TODEVICE);
5854 if (dma_mapping_error(&bp->pdev->dev, map)) {
5855 dev_kfree_skb(skb);
5856 return -EIO;
5857 }
5858
5859 BNX2_WR(bp, BNX2_HC_COMMAND,
5860 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5861
5862 BNX2_RD(bp, BNX2_HC_COMMAND);
5863
5864 udelay(5);
5865 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5866
5867 num_pkts = 0;
5868
5869 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5870
5871 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5872 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5873 txbd->tx_bd_mss_nbytes = pkt_size;
5874 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5875
5876 num_pkts++;
5877 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5878 txr->tx_prod_bseq += pkt_size;
5879
5880 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5881 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5882
5883 udelay(100);
5884
5885 BNX2_WR(bp, BNX2_HC_COMMAND,
5886 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5887
5888 BNX2_RD(bp, BNX2_HC_COMMAND);
5889
5890 udelay(5);
5891
5892 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5893 dev_kfree_skb(skb);
5894
5895 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5896 goto loopback_test_done;
5897
5898 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5899 if (rx_idx != rx_start_idx + num_pkts) {
5900 goto loopback_test_done;
5901 }
5902
5903 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5904 data = rx_buf->data;
5905
5906 rx_hdr = get_l2_fhdr(data);
5907 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5908
5909 dma_sync_single_for_cpu(&bp->pdev->dev,
5910 dma_unmap_addr(rx_buf, mapping),
5911 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5912
5913 if (rx_hdr->l2_fhdr_status &
5914 (L2_FHDR_ERRORS_BAD_CRC |
5915 L2_FHDR_ERRORS_PHY_DECODE |
5916 L2_FHDR_ERRORS_ALIGNMENT |
5917 L2_FHDR_ERRORS_TOO_SHORT |
5918 L2_FHDR_ERRORS_GIANT_FRAME)) {
5919
5920 goto loopback_test_done;
5921 }
5922
5923 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5924 goto loopback_test_done;
5925 }
5926
5927 for (i = 14; i < pkt_size; i++) {
5928 if (*(data + i) != (unsigned char) (i & 0xff)) {
5929 goto loopback_test_done;
5930 }
5931 }
5932
5933 ret = 0;
5934
5935loopback_test_done:
5936 bp->loopback = 0;
5937 return ret;
5938}
5939
5940#define BNX2_MAC_LOOPBACK_FAILED 1
5941#define BNX2_PHY_LOOPBACK_FAILED 2
5942#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5943 BNX2_PHY_LOOPBACK_FAILED)
5944
5945static int
5946bnx2_test_loopback(struct bnx2 *bp)
5947{
5948 int rc = 0;
5949
5950 if (!netif_running(bp->dev))
5951 return BNX2_LOOPBACK_FAILED;
5952
5953 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5954 spin_lock_bh(&bp->phy_lock);
5955 bnx2_init_phy(bp, 1);
5956 spin_unlock_bh(&bp->phy_lock);
5957 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5958 rc |= BNX2_MAC_LOOPBACK_FAILED;
5959 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5960 rc |= BNX2_PHY_LOOPBACK_FAILED;
5961 return rc;
5962}
5963
5964#define NVRAM_SIZE 0x200
5965#define CRC32_RESIDUAL 0xdebb20e3
5966
5967static int
5968bnx2_test_nvram(struct bnx2 *bp)
5969{
5970 __be32 buf[NVRAM_SIZE / 4];
5971 u8 *data = (u8 *) buf;
5972 int rc = 0;
5973 u32 magic, csum;
5974
5975 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5976 goto test_nvram_done;
5977
5978 magic = be32_to_cpu(buf[0]);
5979 if (magic != 0x669955aa) {
5980 rc = -ENODEV;
5981 goto test_nvram_done;
5982 }
5983
5984 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5985 goto test_nvram_done;
5986
5987 csum = ether_crc_le(0x100, data);
5988 if (csum != CRC32_RESIDUAL) {
5989 rc = -ENODEV;
5990 goto test_nvram_done;
5991 }
5992
5993 csum = ether_crc_le(0x100, data + 0x100);
5994 if (csum != CRC32_RESIDUAL) {
5995 rc = -ENODEV;
5996 }
5997
5998test_nvram_done:
5999 return rc;
6000}
6001
6002static int
6003bnx2_test_link(struct bnx2 *bp)
6004{
6005 u32 bmsr;
6006
6007 if (!netif_running(bp->dev))
6008 return -ENODEV;
6009
6010 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6011 if (bp->link_up)
6012 return 0;
6013 return -ENODEV;
6014 }
6015 spin_lock_bh(&bp->phy_lock);
6016 bnx2_enable_bmsr1(bp);
6017 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6018 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6019 bnx2_disable_bmsr1(bp);
6020 spin_unlock_bh(&bp->phy_lock);
6021
6022 if (bmsr & BMSR_LSTATUS) {
6023 return 0;
6024 }
6025 return -ENODEV;
6026}
6027
6028static int
6029bnx2_test_intr(struct bnx2 *bp)
6030{
6031 int i;
6032 u16 status_idx;
6033
6034 if (!netif_running(bp->dev))
6035 return -ENODEV;
6036
6037 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6038
6039 /* This register is not touched during run-time. */
6040 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6041 BNX2_RD(bp, BNX2_HC_COMMAND);
6042
6043 for (i = 0; i < 10; i++) {
6044 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6045 status_idx) {
6046
6047 break;
6048 }
6049
6050 msleep_interruptible(10);
6051 }
6052 if (i < 10)
6053 return 0;
6054
6055 return -ENODEV;
6056}
6057
6058/* Determining link for parallel detection. */
6059static int
6060bnx2_5706_serdes_has_link(struct bnx2 *bp)
6061{
6062 u32 mode_ctl, an_dbg, exp;
6063
6064 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6065 return 0;
6066
6067 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6068 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6069
6070 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6071 return 0;
6072
6073 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6074 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6075 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6076
6077 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6078 return 0;
6079
6080 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6081 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6082 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6083
6084 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6085 return 0;
6086
6087 return 1;
6088}
6089
6090static void
6091bnx2_5706_serdes_timer(struct bnx2 *bp)
6092{
6093 int check_link = 1;
6094
6095 spin_lock(&bp->phy_lock);
6096 if (bp->serdes_an_pending) {
6097 bp->serdes_an_pending--;
6098 check_link = 0;
6099 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6100 u32 bmcr;
6101
6102 bp->current_interval = BNX2_TIMER_INTERVAL;
6103
6104 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6105
6106 if (bmcr & BMCR_ANENABLE) {
6107 if (bnx2_5706_serdes_has_link(bp)) {
6108 bmcr &= ~BMCR_ANENABLE;
6109 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6110 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6111 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6112 }
6113 }
6114 }
6115 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6116 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6117 u32 phy2;
6118
6119 bnx2_write_phy(bp, 0x17, 0x0f01);
6120 bnx2_read_phy(bp, 0x15, &phy2);
6121 if (phy2 & 0x20) {
6122 u32 bmcr;
6123
6124 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6125 bmcr |= BMCR_ANENABLE;
6126 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6127
6128 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6129 }
6130 } else
6131 bp->current_interval = BNX2_TIMER_INTERVAL;
6132
6133 if (check_link) {
6134 u32 val;
6135
6136 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6137 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6138 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6139
6140 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6141 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6142 bnx2_5706s_force_link_dn(bp, 1);
6143 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6144 } else
6145 bnx2_set_link(bp);
6146 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6147 bnx2_set_link(bp);
6148 }
6149 spin_unlock(&bp->phy_lock);
6150}
6151
6152static void
6153bnx2_5708_serdes_timer(struct bnx2 *bp)
6154{
6155 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6156 return;
6157
6158 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6159 bp->serdes_an_pending = 0;
6160 return;
6161 }
6162
6163 spin_lock(&bp->phy_lock);
6164 if (bp->serdes_an_pending)
6165 bp->serdes_an_pending--;
6166 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6167 u32 bmcr;
6168
6169 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6170 if (bmcr & BMCR_ANENABLE) {
6171 bnx2_enable_forced_2g5(bp);
6172 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6173 } else {
6174 bnx2_disable_forced_2g5(bp);
6175 bp->serdes_an_pending = 2;
6176 bp->current_interval = BNX2_TIMER_INTERVAL;
6177 }
6178
6179 } else
6180 bp->current_interval = BNX2_TIMER_INTERVAL;
6181
6182 spin_unlock(&bp->phy_lock);
6183}
6184
6185static void
6186bnx2_timer(struct timer_list *t)
6187{
6188 struct bnx2 *bp = from_timer(bp, t, timer);
6189
6190 if (!netif_running(bp->dev))
6191 return;
6192
6193 if (atomic_read(&bp->intr_sem) != 0)
6194 goto bnx2_restart_timer;
6195
6196 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6197 BNX2_FLAG_USING_MSI)
6198 bnx2_chk_missed_msi(bp);
6199
6200 bnx2_send_heart_beat(bp);
6201
6202 bp->stats_blk->stat_FwRxDrop =
6203 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6204
6205 /* workaround occasional corrupted counters */
6206 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6207 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6208 BNX2_HC_COMMAND_STATS_NOW);
6209
6210 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6211 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6212 bnx2_5706_serdes_timer(bp);
6213 else
6214 bnx2_5708_serdes_timer(bp);
6215 }
6216
6217bnx2_restart_timer:
6218 mod_timer(&bp->timer, jiffies + bp->current_interval);
6219}
6220
6221static int
6222bnx2_request_irq(struct bnx2 *bp)
6223{
6224 unsigned long flags;
6225 struct bnx2_irq *irq;
6226 int rc = 0, i;
6227
6228 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6229 flags = 0;
6230 else
6231 flags = IRQF_SHARED;
6232
6233 for (i = 0; i < bp->irq_nvecs; i++) {
6234 irq = &bp->irq_tbl[i];
6235 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6236 &bp->bnx2_napi[i]);
6237 if (rc)
6238 break;
6239 irq->requested = 1;
6240 }
6241 return rc;
6242}
6243
6244static void
6245__bnx2_free_irq(struct bnx2 *bp)
6246{
6247 struct bnx2_irq *irq;
6248 int i;
6249
6250 for (i = 0; i < bp->irq_nvecs; i++) {
6251 irq = &bp->irq_tbl[i];
6252 if (irq->requested)
6253 free_irq(irq->vector, &bp->bnx2_napi[i]);
6254 irq->requested = 0;
6255 }
6256}
6257
6258static void
6259bnx2_free_irq(struct bnx2 *bp)
6260{
6261
6262 __bnx2_free_irq(bp);
6263 if (bp->flags & BNX2_FLAG_USING_MSI)
6264 pci_disable_msi(bp->pdev);
6265 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6266 pci_disable_msix(bp->pdev);
6267
6268 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6269}
6270
6271static void
6272bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6273{
6274 int i, total_vecs;
6275 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6276 struct net_device *dev = bp->dev;
6277 const int len = sizeof(bp->irq_tbl[0].name);
6278
6279 bnx2_setup_msix_tbl(bp);
6280 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6281 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6282 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6283
6284 /* Need to flush the previous three writes to ensure MSI-X
6285 * is setup properly */
6286 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6287
6288 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6289 msix_ent[i].entry = i;
6290 msix_ent[i].vector = 0;
6291 }
6292
6293 total_vecs = msix_vecs;
6294#ifdef BCM_CNIC
6295 total_vecs++;
6296#endif
6297 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6298 BNX2_MIN_MSIX_VEC, total_vecs);
6299 if (total_vecs < 0)
6300 return;
6301
6302 msix_vecs = total_vecs;
6303#ifdef BCM_CNIC
6304 msix_vecs--;
6305#endif
6306 bp->irq_nvecs = msix_vecs;
6307 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6308 for (i = 0; i < total_vecs; i++) {
6309 bp->irq_tbl[i].vector = msix_ent[i].vector;
6310 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6311 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6312 }
6313}
6314
6315static int
6316bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6317{
6318 int cpus = netif_get_num_default_rss_queues();
6319 int msix_vecs;
6320
6321 if (!bp->num_req_rx_rings)
6322 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6323 else if (!bp->num_req_tx_rings)
6324 msix_vecs = max(cpus, bp->num_req_rx_rings);
6325 else
6326 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6327
6328 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6329
6330 bp->irq_tbl[0].handler = bnx2_interrupt;
6331 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6332 bp->irq_nvecs = 1;
6333 bp->irq_tbl[0].vector = bp->pdev->irq;
6334
6335 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6336 bnx2_enable_msix(bp, msix_vecs);
6337
6338 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6339 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6340 if (pci_enable_msi(bp->pdev) == 0) {
6341 bp->flags |= BNX2_FLAG_USING_MSI;
6342 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6343 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6344 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6345 } else
6346 bp->irq_tbl[0].handler = bnx2_msi;
6347
6348 bp->irq_tbl[0].vector = bp->pdev->irq;
6349 }
6350 }
6351
6352 if (!bp->num_req_tx_rings)
6353 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6354 else
6355 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6356
6357 if (!bp->num_req_rx_rings)
6358 bp->num_rx_rings = bp->irq_nvecs;
6359 else
6360 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6361
6362 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6363
6364 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6365}
6366
6367/* Called with rtnl_lock */
6368static int
6369bnx2_open(struct net_device *dev)
6370{
6371 struct bnx2 *bp = netdev_priv(dev);
6372 int rc;
6373
6374 rc = bnx2_request_firmware(bp);
6375 if (rc < 0)
6376 goto out;
6377
6378 netif_carrier_off(dev);
6379
6380 bnx2_disable_int(bp);
6381
6382 rc = bnx2_setup_int_mode(bp, disable_msi);
6383 if (rc)
6384 goto open_err;
6385 bnx2_init_napi(bp);
6386 bnx2_napi_enable(bp);
6387 rc = bnx2_alloc_mem(bp);
6388 if (rc)
6389 goto open_err;
6390
6391 rc = bnx2_request_irq(bp);
6392 if (rc)
6393 goto open_err;
6394
6395 rc = bnx2_init_nic(bp, 1);
6396 if (rc)
6397 goto open_err;
6398
6399 mod_timer(&bp->timer, jiffies + bp->current_interval);
6400
6401 atomic_set(&bp->intr_sem, 0);
6402
6403 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6404
6405 bnx2_enable_int(bp);
6406
6407 if (bp->flags & BNX2_FLAG_USING_MSI) {
6408 /* Test MSI to make sure it is working
6409 * If MSI test fails, go back to INTx mode
6410 */
6411 if (bnx2_test_intr(bp) != 0) {
6412 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6413
6414 bnx2_disable_int(bp);
6415 bnx2_free_irq(bp);
6416
6417 bnx2_setup_int_mode(bp, 1);
6418
6419 rc = bnx2_init_nic(bp, 0);
6420
6421 if (!rc)
6422 rc = bnx2_request_irq(bp);
6423
6424 if (rc) {
6425 del_timer_sync(&bp->timer);
6426 goto open_err;
6427 }
6428 bnx2_enable_int(bp);
6429 }
6430 }
6431 if (bp->flags & BNX2_FLAG_USING_MSI)
6432 netdev_info(dev, "using MSI\n");
6433 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6434 netdev_info(dev, "using MSIX\n");
6435
6436 netif_tx_start_all_queues(dev);
6437out:
6438 return rc;
6439
6440open_err:
6441 bnx2_napi_disable(bp);
6442 bnx2_free_skbs(bp);
6443 bnx2_free_irq(bp);
6444 bnx2_free_mem(bp);
6445 bnx2_del_napi(bp);
6446 bnx2_release_firmware(bp);
6447 goto out;
6448}
6449
6450static void
6451bnx2_reset_task(struct work_struct *work)
6452{
6453 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6454 int rc;
6455 u16 pcicmd;
6456
6457 rtnl_lock();
6458 if (!netif_running(bp->dev)) {
6459 rtnl_unlock();
6460 return;
6461 }
6462
6463 bnx2_netif_stop(bp, true);
6464
6465 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6466 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6467 /* in case PCI block has reset */
6468 pci_restore_state(bp->pdev);
6469 pci_save_state(bp->pdev);
6470 }
6471 rc = bnx2_init_nic(bp, 1);
6472 if (rc) {
6473 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6474 bnx2_napi_enable(bp);
6475 dev_close(bp->dev);
6476 rtnl_unlock();
6477 return;
6478 }
6479
6480 atomic_set(&bp->intr_sem, 1);
6481 bnx2_netif_start(bp, true);
6482 rtnl_unlock();
6483}
6484
6485#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6486
6487static void
6488bnx2_dump_ftq(struct bnx2 *bp)
6489{
6490 int i;
6491 u32 reg, bdidx, cid, valid;
6492 struct net_device *dev = bp->dev;
6493 static const struct ftq_reg {
6494 char *name;
6495 u32 off;
6496 } ftq_arr[] = {
6497 BNX2_FTQ_ENTRY(RV2P_P),
6498 BNX2_FTQ_ENTRY(RV2P_T),
6499 BNX2_FTQ_ENTRY(RV2P_M),
6500 BNX2_FTQ_ENTRY(TBDR_),
6501 BNX2_FTQ_ENTRY(TDMA_),
6502 BNX2_FTQ_ENTRY(TXP_),
6503 BNX2_FTQ_ENTRY(TXP_),
6504 BNX2_FTQ_ENTRY(TPAT_),
6505 BNX2_FTQ_ENTRY(RXP_C),
6506 BNX2_FTQ_ENTRY(RXP_),
6507 BNX2_FTQ_ENTRY(COM_COMXQ_),
6508 BNX2_FTQ_ENTRY(COM_COMTQ_),
6509 BNX2_FTQ_ENTRY(COM_COMQ_),
6510 BNX2_FTQ_ENTRY(CP_CPQ_),
6511 };
6512
6513 netdev_err(dev, "<--- start FTQ dump --->\n");
6514 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6515 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6516 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6517
6518 netdev_err(dev, "CPU states:\n");
6519 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6520 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6521 reg, bnx2_reg_rd_ind(bp, reg),
6522 bnx2_reg_rd_ind(bp, reg + 4),
6523 bnx2_reg_rd_ind(bp, reg + 8),
6524 bnx2_reg_rd_ind(bp, reg + 0x1c),
6525 bnx2_reg_rd_ind(bp, reg + 0x1c),
6526 bnx2_reg_rd_ind(bp, reg + 0x20));
6527
6528 netdev_err(dev, "<--- end FTQ dump --->\n");
6529 netdev_err(dev, "<--- start TBDC dump --->\n");
6530 netdev_err(dev, "TBDC free cnt: %ld\n",
6531 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6532 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6533 for (i = 0; i < 0x20; i++) {
6534 int j = 0;
6535
6536 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6537 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6538 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6539 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6540 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6541 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6542 j++;
6543
6544 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6545 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6546 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6547 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6548 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6549 bdidx >> 24, (valid >> 8) & 0x0ff);
6550 }
6551 netdev_err(dev, "<--- end TBDC dump --->\n");
6552}
6553
6554static void
6555bnx2_dump_state(struct bnx2 *bp)
6556{
6557 struct net_device *dev = bp->dev;
6558 u32 val1, val2;
6559
6560 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6561 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6562 atomic_read(&bp->intr_sem), val1);
6563 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6564 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6565 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6566 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6567 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6568 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6569 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6570 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6571 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6572 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6573 if (bp->flags & BNX2_FLAG_USING_MSIX)
6574 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6575 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6576}
6577
6578static void
6579bnx2_tx_timeout(struct net_device *dev)
6580{
6581 struct bnx2 *bp = netdev_priv(dev);
6582
6583 bnx2_dump_ftq(bp);
6584 bnx2_dump_state(bp);
6585 bnx2_dump_mcp_state(bp);
6586
6587 /* This allows the netif to be shutdown gracefully before resetting */
6588 schedule_work(&bp->reset_task);
6589}
6590
6591/* Called with netif_tx_lock.
6592 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6593 * netif_wake_queue().
6594 */
6595static netdev_tx_t
6596bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6597{
6598 struct bnx2 *bp = netdev_priv(dev);
6599 dma_addr_t mapping;
6600 struct bnx2_tx_bd *txbd;
6601 struct bnx2_sw_tx_bd *tx_buf;
6602 u32 len, vlan_tag_flags, last_frag, mss;
6603 u16 prod, ring_prod;
6604 int i;
6605 struct bnx2_napi *bnapi;
6606 struct bnx2_tx_ring_info *txr;
6607 struct netdev_queue *txq;
6608
6609 /* Determine which tx ring we will be placed on */
6610 i = skb_get_queue_mapping(skb);
6611 bnapi = &bp->bnx2_napi[i];
6612 txr = &bnapi->tx_ring;
6613 txq = netdev_get_tx_queue(dev, i);
6614
6615 if (unlikely(bnx2_tx_avail(bp, txr) <
6616 (skb_shinfo(skb)->nr_frags + 1))) {
6617 netif_tx_stop_queue(txq);
6618 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6619
6620 return NETDEV_TX_BUSY;
6621 }
6622 len = skb_headlen(skb);
6623 prod = txr->tx_prod;
6624 ring_prod = BNX2_TX_RING_IDX(prod);
6625
6626 vlan_tag_flags = 0;
6627 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6628 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6629 }
6630
6631 if (skb_vlan_tag_present(skb)) {
6632 vlan_tag_flags |=
6633 (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6634 }
6635
6636 if ((mss = skb_shinfo(skb)->gso_size)) {
6637 u32 tcp_opt_len;
6638 struct iphdr *iph;
6639
6640 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6641
6642 tcp_opt_len = tcp_optlen(skb);
6643
6644 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6645 u32 tcp_off = skb_transport_offset(skb) -
6646 sizeof(struct ipv6hdr) - ETH_HLEN;
6647
6648 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6649 TX_BD_FLAGS_SW_FLAGS;
6650 if (likely(tcp_off == 0))
6651 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6652 else {
6653 tcp_off >>= 3;
6654 vlan_tag_flags |= ((tcp_off & 0x3) <<
6655 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6656 ((tcp_off & 0x10) <<
6657 TX_BD_FLAGS_TCP6_OFF4_SHL);
6658 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6659 }
6660 } else {
6661 iph = ip_hdr(skb);
6662 if (tcp_opt_len || (iph->ihl > 5)) {
6663 vlan_tag_flags |= ((iph->ihl - 5) +
6664 (tcp_opt_len >> 2)) << 8;
6665 }
6666 }
6667 } else
6668 mss = 0;
6669
6670 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6671 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6672 dev_kfree_skb_any(skb);
6673 return NETDEV_TX_OK;
6674 }
6675
6676 tx_buf = &txr->tx_buf_ring[ring_prod];
6677 tx_buf->skb = skb;
6678 dma_unmap_addr_set(tx_buf, mapping, mapping);
6679
6680 txbd = &txr->tx_desc_ring[ring_prod];
6681
6682 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6683 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6684 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6685 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6686
6687 last_frag = skb_shinfo(skb)->nr_frags;
6688 tx_buf->nr_frags = last_frag;
6689 tx_buf->is_gso = skb_is_gso(skb);
6690
6691 for (i = 0; i < last_frag; i++) {
6692 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6693
6694 prod = BNX2_NEXT_TX_BD(prod);
6695 ring_prod = BNX2_TX_RING_IDX(prod);
6696 txbd = &txr->tx_desc_ring[ring_prod];
6697
6698 len = skb_frag_size(frag);
6699 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6700 DMA_TO_DEVICE);
6701 if (dma_mapping_error(&bp->pdev->dev, mapping))
6702 goto dma_error;
6703 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6704 mapping);
6705
6706 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6707 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6708 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6709 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6710
6711 }
6712 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6713
6714 /* Sync BD data before updating TX mailbox */
6715 wmb();
6716
6717 netdev_tx_sent_queue(txq, skb->len);
6718
6719 prod = BNX2_NEXT_TX_BD(prod);
6720 txr->tx_prod_bseq += skb->len;
6721
6722 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6723 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6724
6725 mmiowb();
6726
6727 txr->tx_prod = prod;
6728
6729 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6730 netif_tx_stop_queue(txq);
6731
6732 /* netif_tx_stop_queue() must be done before checking
6733 * tx index in bnx2_tx_avail() below, because in
6734 * bnx2_tx_int(), we update tx index before checking for
6735 * netif_tx_queue_stopped().
6736 */
6737 smp_mb();
6738 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6739 netif_tx_wake_queue(txq);
6740 }
6741
6742 return NETDEV_TX_OK;
6743dma_error:
6744 /* save value of frag that failed */
6745 last_frag = i;
6746
6747 /* start back at beginning and unmap skb */
6748 prod = txr->tx_prod;
6749 ring_prod = BNX2_TX_RING_IDX(prod);
6750 tx_buf = &txr->tx_buf_ring[ring_prod];
6751 tx_buf->skb = NULL;
6752 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6753 skb_headlen(skb), PCI_DMA_TODEVICE);
6754
6755 /* unmap remaining mapped pages */
6756 for (i = 0; i < last_frag; i++) {
6757 prod = BNX2_NEXT_TX_BD(prod);
6758 ring_prod = BNX2_TX_RING_IDX(prod);
6759 tx_buf = &txr->tx_buf_ring[ring_prod];
6760 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6761 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6762 PCI_DMA_TODEVICE);
6763 }
6764
6765 dev_kfree_skb_any(skb);
6766 return NETDEV_TX_OK;
6767}
6768
6769/* Called with rtnl_lock */
6770static int
6771bnx2_close(struct net_device *dev)
6772{
6773 struct bnx2 *bp = netdev_priv(dev);
6774
6775 bnx2_disable_int_sync(bp);
6776 bnx2_napi_disable(bp);
6777 netif_tx_disable(dev);
6778 del_timer_sync(&bp->timer);
6779 bnx2_shutdown_chip(bp);
6780 bnx2_free_irq(bp);
6781 bnx2_free_skbs(bp);
6782 bnx2_free_mem(bp);
6783 bnx2_del_napi(bp);
6784 bp->link_up = 0;
6785 netif_carrier_off(bp->dev);
6786 return 0;
6787}
6788
6789static void
6790bnx2_save_stats(struct bnx2 *bp)
6791{
6792 u32 *hw_stats = (u32 *) bp->stats_blk;
6793 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6794 int i;
6795
6796 /* The 1st 10 counters are 64-bit counters */
6797 for (i = 0; i < 20; i += 2) {
6798 u32 hi;
6799 u64 lo;
6800
6801 hi = temp_stats[i] + hw_stats[i];
6802 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6803 if (lo > 0xffffffff)
6804 hi++;
6805 temp_stats[i] = hi;
6806 temp_stats[i + 1] = lo & 0xffffffff;
6807 }
6808
6809 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6810 temp_stats[i] += hw_stats[i];
6811}
6812
6813#define GET_64BIT_NET_STATS64(ctr) \
6814 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6815
6816#define GET_64BIT_NET_STATS(ctr) \
6817 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6818 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6819
6820#define GET_32BIT_NET_STATS(ctr) \
6821 (unsigned long) (bp->stats_blk->ctr + \
6822 bp->temp_stats_blk->ctr)
6823
6824static void
6825bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6826{
6827 struct bnx2 *bp = netdev_priv(dev);
6828
6829 if (bp->stats_blk == NULL)
6830 return;
6831
6832 net_stats->rx_packets =
6833 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6834 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6835 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6836
6837 net_stats->tx_packets =
6838 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6839 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6840 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6841
6842 net_stats->rx_bytes =
6843 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6844
6845 net_stats->tx_bytes =
6846 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6847
6848 net_stats->multicast =
6849 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6850
6851 net_stats->collisions =
6852 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6853
6854 net_stats->rx_length_errors =
6855 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6856 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6857
6858 net_stats->rx_over_errors =
6859 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6860 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6861
6862 net_stats->rx_frame_errors =
6863 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6864
6865 net_stats->rx_crc_errors =
6866 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6867
6868 net_stats->rx_errors = net_stats->rx_length_errors +
6869 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6870 net_stats->rx_crc_errors;
6871
6872 net_stats->tx_aborted_errors =
6873 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6874 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6875
6876 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6877 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6878 net_stats->tx_carrier_errors = 0;
6879 else {
6880 net_stats->tx_carrier_errors =
6881 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6882 }
6883
6884 net_stats->tx_errors =
6885 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6886 net_stats->tx_aborted_errors +
6887 net_stats->tx_carrier_errors;
6888
6889 net_stats->rx_missed_errors =
6890 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6891 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6892 GET_32BIT_NET_STATS(stat_FwRxDrop);
6893
6894}
6895
6896/* All ethtool functions called with rtnl_lock */
6897
6898static int
6899bnx2_get_link_ksettings(struct net_device *dev,
6900 struct ethtool_link_ksettings *cmd)
6901{
6902 struct bnx2 *bp = netdev_priv(dev);
6903 int support_serdes = 0, support_copper = 0;
6904 u32 supported, advertising;
6905
6906 supported = SUPPORTED_Autoneg;
6907 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6908 support_serdes = 1;
6909 support_copper = 1;
6910 } else if (bp->phy_port == PORT_FIBRE)
6911 support_serdes = 1;
6912 else
6913 support_copper = 1;
6914
6915 if (support_serdes) {
6916 supported |= SUPPORTED_1000baseT_Full |
6917 SUPPORTED_FIBRE;
6918 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6919 supported |= SUPPORTED_2500baseX_Full;
6920 }
6921 if (support_copper) {
6922 supported |= SUPPORTED_10baseT_Half |
6923 SUPPORTED_10baseT_Full |
6924 SUPPORTED_100baseT_Half |
6925 SUPPORTED_100baseT_Full |
6926 SUPPORTED_1000baseT_Full |
6927 SUPPORTED_TP;
6928 }
6929
6930 spin_lock_bh(&bp->phy_lock);
6931 cmd->base.port = bp->phy_port;
6932 advertising = bp->advertising;
6933
6934 if (bp->autoneg & AUTONEG_SPEED) {
6935 cmd->base.autoneg = AUTONEG_ENABLE;
6936 } else {
6937 cmd->base.autoneg = AUTONEG_DISABLE;
6938 }
6939
6940 if (netif_carrier_ok(dev)) {
6941 cmd->base.speed = bp->line_speed;
6942 cmd->base.duplex = bp->duplex;
6943 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6944 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6945 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6946 else
6947 cmd->base.eth_tp_mdix = ETH_TP_MDI;
6948 }
6949 }
6950 else {
6951 cmd->base.speed = SPEED_UNKNOWN;
6952 cmd->base.duplex = DUPLEX_UNKNOWN;
6953 }
6954 spin_unlock_bh(&bp->phy_lock);
6955
6956 cmd->base.phy_address = bp->phy_addr;
6957
6958 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6959 supported);
6960 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6961 advertising);
6962
6963 return 0;
6964}
6965
6966static int
6967bnx2_set_link_ksettings(struct net_device *dev,
6968 const struct ethtool_link_ksettings *cmd)
6969{
6970 struct bnx2 *bp = netdev_priv(dev);
6971 u8 autoneg = bp->autoneg;
6972 u8 req_duplex = bp->req_duplex;
6973 u16 req_line_speed = bp->req_line_speed;
6974 u32 advertising = bp->advertising;
6975 int err = -EINVAL;
6976
6977 spin_lock_bh(&bp->phy_lock);
6978
6979 if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6980 goto err_out_unlock;
6981
6982 if (cmd->base.port != bp->phy_port &&
6983 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6984 goto err_out_unlock;
6985
6986 /* If device is down, we can store the settings only if the user
6987 * is setting the currently active port.
6988 */
6989 if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6990 goto err_out_unlock;
6991
6992 if (cmd->base.autoneg == AUTONEG_ENABLE) {
6993 autoneg |= AUTONEG_SPEED;
6994
6995 ethtool_convert_link_mode_to_legacy_u32(
6996 &advertising, cmd->link_modes.advertising);
6997
6998 if (cmd->base.port == PORT_TP) {
6999 advertising &= ETHTOOL_ALL_COPPER_SPEED;
7000 if (!advertising)
7001 advertising = ETHTOOL_ALL_COPPER_SPEED;
7002 } else {
7003 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7004 if (!advertising)
7005 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7006 }
7007 advertising |= ADVERTISED_Autoneg;
7008 }
7009 else {
7010 u32 speed = cmd->base.speed;
7011
7012 if (cmd->base.port == PORT_FIBRE) {
7013 if ((speed != SPEED_1000 &&
7014 speed != SPEED_2500) ||
7015 (cmd->base.duplex != DUPLEX_FULL))
7016 goto err_out_unlock;
7017
7018 if (speed == SPEED_2500 &&
7019 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7020 goto err_out_unlock;
7021 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7022 goto err_out_unlock;
7023
7024 autoneg &= ~AUTONEG_SPEED;
7025 req_line_speed = speed;
7026 req_duplex = cmd->base.duplex;
7027 advertising = 0;
7028 }
7029
7030 bp->autoneg = autoneg;
7031 bp->advertising = advertising;
7032 bp->req_line_speed = req_line_speed;
7033 bp->req_duplex = req_duplex;
7034
7035 err = 0;
7036 /* If device is down, the new settings will be picked up when it is
7037 * brought up.
7038 */
7039 if (netif_running(dev))
7040 err = bnx2_setup_phy(bp, cmd->base.port);
7041
7042err_out_unlock:
7043 spin_unlock_bh(&bp->phy_lock);
7044
7045 return err;
7046}
7047
7048static void
7049bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7050{
7051 struct bnx2 *bp = netdev_priv(dev);
7052
7053 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7054 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7055 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7056 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7057}
7058
7059#define BNX2_REGDUMP_LEN (32 * 1024)
7060
7061static int
7062bnx2_get_regs_len(struct net_device *dev)
7063{
7064 return BNX2_REGDUMP_LEN;
7065}
7066
7067static void
7068bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7069{
7070 u32 *p = _p, i, offset;
7071 u8 *orig_p = _p;
7072 struct bnx2 *bp = netdev_priv(dev);
7073 static const u32 reg_boundaries[] = {
7074 0x0000, 0x0098, 0x0400, 0x045c,
7075 0x0800, 0x0880, 0x0c00, 0x0c10,
7076 0x0c30, 0x0d08, 0x1000, 0x101c,
7077 0x1040, 0x1048, 0x1080, 0x10a4,
7078 0x1400, 0x1490, 0x1498, 0x14f0,
7079 0x1500, 0x155c, 0x1580, 0x15dc,
7080 0x1600, 0x1658, 0x1680, 0x16d8,
7081 0x1800, 0x1820, 0x1840, 0x1854,
7082 0x1880, 0x1894, 0x1900, 0x1984,
7083 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7084 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7085 0x2000, 0x2030, 0x23c0, 0x2400,
7086 0x2800, 0x2820, 0x2830, 0x2850,
7087 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7088 0x3c00, 0x3c94, 0x4000, 0x4010,
7089 0x4080, 0x4090, 0x43c0, 0x4458,
7090 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7091 0x4fc0, 0x5010, 0x53c0, 0x5444,
7092 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7093 0x5fc0, 0x6000, 0x6400, 0x6428,
7094 0x6800, 0x6848, 0x684c, 0x6860,
7095 0x6888, 0x6910, 0x8000
7096 };
7097
7098 regs->version = 0;
7099
7100 memset(p, 0, BNX2_REGDUMP_LEN);
7101
7102 if (!netif_running(bp->dev))
7103 return;
7104
7105 i = 0;
7106 offset = reg_boundaries[0];
7107 p += offset;
7108 while (offset < BNX2_REGDUMP_LEN) {
7109 *p++ = BNX2_RD(bp, offset);
7110 offset += 4;
7111 if (offset == reg_boundaries[i + 1]) {
7112 offset = reg_boundaries[i + 2];
7113 p = (u32 *) (orig_p + offset);
7114 i += 2;
7115 }
7116 }
7117}
7118
7119static void
7120bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7121{
7122 struct bnx2 *bp = netdev_priv(dev);
7123
7124 if (bp->flags & BNX2_FLAG_NO_WOL) {
7125 wol->supported = 0;
7126 wol->wolopts = 0;
7127 }
7128 else {
7129 wol->supported = WAKE_MAGIC;
7130 if (bp->wol)
7131 wol->wolopts = WAKE_MAGIC;
7132 else
7133 wol->wolopts = 0;
7134 }
7135 memset(&wol->sopass, 0, sizeof(wol->sopass));
7136}
7137
7138static int
7139bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7140{
7141 struct bnx2 *bp = netdev_priv(dev);
7142
7143 if (wol->wolopts & ~WAKE_MAGIC)
7144 return -EINVAL;
7145
7146 if (wol->wolopts & WAKE_MAGIC) {
7147 if (bp->flags & BNX2_FLAG_NO_WOL)
7148 return -EINVAL;
7149
7150 bp->wol = 1;
7151 }
7152 else {
7153 bp->wol = 0;
7154 }
7155
7156 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7157
7158 return 0;
7159}
7160
7161static int
7162bnx2_nway_reset(struct net_device *dev)
7163{
7164 struct bnx2 *bp = netdev_priv(dev);
7165 u32 bmcr;
7166
7167 if (!netif_running(dev))
7168 return -EAGAIN;
7169
7170 if (!(bp->autoneg & AUTONEG_SPEED)) {
7171 return -EINVAL;
7172 }
7173
7174 spin_lock_bh(&bp->phy_lock);
7175
7176 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7177 int rc;
7178
7179 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7180 spin_unlock_bh(&bp->phy_lock);
7181 return rc;
7182 }
7183
7184 /* Force a link down visible on the other side */
7185 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7186 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7187 spin_unlock_bh(&bp->phy_lock);
7188
7189 msleep(20);
7190
7191 spin_lock_bh(&bp->phy_lock);
7192
7193 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7194 bp->serdes_an_pending = 1;
7195 mod_timer(&bp->timer, jiffies + bp->current_interval);
7196 }
7197
7198 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7199 bmcr &= ~BMCR_LOOPBACK;
7200 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7201
7202 spin_unlock_bh(&bp->phy_lock);
7203
7204 return 0;
7205}
7206
7207static u32
7208bnx2_get_link(struct net_device *dev)
7209{
7210 struct bnx2 *bp = netdev_priv(dev);
7211
7212 return bp->link_up;
7213}
7214
7215static int
7216bnx2_get_eeprom_len(struct net_device *dev)
7217{
7218 struct bnx2 *bp = netdev_priv(dev);
7219
7220 if (bp->flash_info == NULL)
7221 return 0;
7222
7223 return (int) bp->flash_size;
7224}
7225
7226static int
7227bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7228 u8 *eebuf)
7229{
7230 struct bnx2 *bp = netdev_priv(dev);
7231 int rc;
7232
7233 /* parameters already validated in ethtool_get_eeprom */
7234
7235 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7236
7237 return rc;
7238}
7239
7240static int
7241bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7242 u8 *eebuf)
7243{
7244 struct bnx2 *bp = netdev_priv(dev);
7245 int rc;
7246
7247 /* parameters already validated in ethtool_set_eeprom */
7248
7249 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7250
7251 return rc;
7252}
7253
7254static int
7255bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7256{
7257 struct bnx2 *bp = netdev_priv(dev);
7258
7259 memset(coal, 0, sizeof(struct ethtool_coalesce));
7260
7261 coal->rx_coalesce_usecs = bp->rx_ticks;
7262 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7263 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7264 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7265
7266 coal->tx_coalesce_usecs = bp->tx_ticks;
7267 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7268 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7269 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7270
7271 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7272
7273 return 0;
7274}
7275
7276static int
7277bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7278{
7279 struct bnx2 *bp = netdev_priv(dev);
7280
7281 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7282 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7283
7284 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7285 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7286
7287 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7288 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7289
7290 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7291 if (bp->rx_quick_cons_trip_int > 0xff)
7292 bp->rx_quick_cons_trip_int = 0xff;
7293
7294 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7295 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7296
7297 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7298 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7299
7300 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7301 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7302
7303 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7304 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7305 0xff;
7306
7307 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7308 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7309 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7310 bp->stats_ticks = USEC_PER_SEC;
7311 }
7312 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7313 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7314 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7315
7316 if (netif_running(bp->dev)) {
7317 bnx2_netif_stop(bp, true);
7318 bnx2_init_nic(bp, 0);
7319 bnx2_netif_start(bp, true);
7320 }
7321
7322 return 0;
7323}
7324
7325static void
7326bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7327{
7328 struct bnx2 *bp = netdev_priv(dev);
7329
7330 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7331 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7332
7333 ering->rx_pending = bp->rx_ring_size;
7334 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7335
7336 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7337 ering->tx_pending = bp->tx_ring_size;
7338}
7339
7340static int
7341bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7342{
7343 if (netif_running(bp->dev)) {
7344 /* Reset will erase chipset stats; save them */
7345 bnx2_save_stats(bp);
7346
7347 bnx2_netif_stop(bp, true);
7348 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7349 if (reset_irq) {
7350 bnx2_free_irq(bp);
7351 bnx2_del_napi(bp);
7352 } else {
7353 __bnx2_free_irq(bp);
7354 }
7355 bnx2_free_skbs(bp);
7356 bnx2_free_mem(bp);
7357 }
7358
7359 bnx2_set_rx_ring_size(bp, rx);
7360 bp->tx_ring_size = tx;
7361
7362 if (netif_running(bp->dev)) {
7363 int rc = 0;
7364
7365 if (reset_irq) {
7366 rc = bnx2_setup_int_mode(bp, disable_msi);
7367 bnx2_init_napi(bp);
7368 }
7369
7370 if (!rc)
7371 rc = bnx2_alloc_mem(bp);
7372
7373 if (!rc)
7374 rc = bnx2_request_irq(bp);
7375
7376 if (!rc)
7377 rc = bnx2_init_nic(bp, 0);
7378
7379 if (rc) {
7380 bnx2_napi_enable(bp);
7381 dev_close(bp->dev);
7382 return rc;
7383 }
7384#ifdef BCM_CNIC
7385 mutex_lock(&bp->cnic_lock);
7386 /* Let cnic know about the new status block. */
7387 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7388 bnx2_setup_cnic_irq_info(bp);
7389 mutex_unlock(&bp->cnic_lock);
7390#endif
7391 bnx2_netif_start(bp, true);
7392 }
7393 return 0;
7394}
7395
7396static int
7397bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7398{
7399 struct bnx2 *bp = netdev_priv(dev);
7400 int rc;
7401
7402 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7403 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7404 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7405
7406 return -EINVAL;
7407 }
7408 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7409 false);
7410 return rc;
7411}
7412
7413static void
7414bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7415{
7416 struct bnx2 *bp = netdev_priv(dev);
7417
7418 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7419 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7420 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7421}
7422
7423static int
7424bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7425{
7426 struct bnx2 *bp = netdev_priv(dev);
7427
7428 bp->req_flow_ctrl = 0;
7429 if (epause->rx_pause)
7430 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7431 if (epause->tx_pause)
7432 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7433
7434 if (epause->autoneg) {
7435 bp->autoneg |= AUTONEG_FLOW_CTRL;
7436 }
7437 else {
7438 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7439 }
7440
7441 if (netif_running(dev)) {
7442 spin_lock_bh(&bp->phy_lock);
7443 bnx2_setup_phy(bp, bp->phy_port);
7444 spin_unlock_bh(&bp->phy_lock);
7445 }
7446
7447 return 0;
7448}
7449
7450static struct {
7451 char string[ETH_GSTRING_LEN];
7452} bnx2_stats_str_arr[] = {
7453 { "rx_bytes" },
7454 { "rx_error_bytes" },
7455 { "tx_bytes" },
7456 { "tx_error_bytes" },
7457 { "rx_ucast_packets" },
7458 { "rx_mcast_packets" },
7459 { "rx_bcast_packets" },
7460 { "tx_ucast_packets" },
7461 { "tx_mcast_packets" },
7462 { "tx_bcast_packets" },
7463 { "tx_mac_errors" },
7464 { "tx_carrier_errors" },
7465 { "rx_crc_errors" },
7466 { "rx_align_errors" },
7467 { "tx_single_collisions" },
7468 { "tx_multi_collisions" },
7469 { "tx_deferred" },
7470 { "tx_excess_collisions" },
7471 { "tx_late_collisions" },
7472 { "tx_total_collisions" },
7473 { "rx_fragments" },
7474 { "rx_jabbers" },
7475 { "rx_undersize_packets" },
7476 { "rx_oversize_packets" },
7477 { "rx_64_byte_packets" },
7478 { "rx_65_to_127_byte_packets" },
7479 { "rx_128_to_255_byte_packets" },
7480 { "rx_256_to_511_byte_packets" },
7481 { "rx_512_to_1023_byte_packets" },
7482 { "rx_1024_to_1522_byte_packets" },
7483 { "rx_1523_to_9022_byte_packets" },
7484 { "tx_64_byte_packets" },
7485 { "tx_65_to_127_byte_packets" },
7486 { "tx_128_to_255_byte_packets" },
7487 { "tx_256_to_511_byte_packets" },
7488 { "tx_512_to_1023_byte_packets" },
7489 { "tx_1024_to_1522_byte_packets" },
7490 { "tx_1523_to_9022_byte_packets" },
7491 { "rx_xon_frames" },
7492 { "rx_xoff_frames" },
7493 { "tx_xon_frames" },
7494 { "tx_xoff_frames" },
7495 { "rx_mac_ctrl_frames" },
7496 { "rx_filtered_packets" },
7497 { "rx_ftq_discards" },
7498 { "rx_discards" },
7499 { "rx_fw_discards" },
7500};
7501
7502#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7503
7504#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7505
7506static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7507 STATS_OFFSET32(stat_IfHCInOctets_hi),
7508 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7509 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7510 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7511 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7512 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7513 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7514 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7515 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7516 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7517 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7518 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7519 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7520 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7521 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7522 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7523 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7524 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7525 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7526 STATS_OFFSET32(stat_EtherStatsCollisions),
7527 STATS_OFFSET32(stat_EtherStatsFragments),
7528 STATS_OFFSET32(stat_EtherStatsJabbers),
7529 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7530 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7531 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7532 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7533 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7534 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7535 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7536 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7537 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7538 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7539 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7540 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7541 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7542 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7543 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7544 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7545 STATS_OFFSET32(stat_XonPauseFramesReceived),
7546 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7547 STATS_OFFSET32(stat_OutXonSent),
7548 STATS_OFFSET32(stat_OutXoffSent),
7549 STATS_OFFSET32(stat_MacControlFramesReceived),
7550 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7551 STATS_OFFSET32(stat_IfInFTQDiscards),
7552 STATS_OFFSET32(stat_IfInMBUFDiscards),
7553 STATS_OFFSET32(stat_FwRxDrop),
7554};
7555
7556/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7557 * skipped because of errata.
7558 */
7559static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7560 8,0,8,8,8,8,8,8,8,8,
7561 4,0,4,4,4,4,4,4,4,4,
7562 4,4,4,4,4,4,4,4,4,4,
7563 4,4,4,4,4,4,4,4,4,4,
7564 4,4,4,4,4,4,4,
7565};
7566
7567static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7568 8,0,8,8,8,8,8,8,8,8,
7569 4,4,4,4,4,4,4,4,4,4,
7570 4,4,4,4,4,4,4,4,4,4,
7571 4,4,4,4,4,4,4,4,4,4,
7572 4,4,4,4,4,4,4,
7573};
7574
7575#define BNX2_NUM_TESTS 6
7576
7577static struct {
7578 char string[ETH_GSTRING_LEN];
7579} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7580 { "register_test (offline)" },
7581 { "memory_test (offline)" },
7582 { "loopback_test (offline)" },
7583 { "nvram_test (online)" },
7584 { "interrupt_test (online)" },
7585 { "link_test (online)" },
7586};
7587
7588static int
7589bnx2_get_sset_count(struct net_device *dev, int sset)
7590{
7591 switch (sset) {
7592 case ETH_SS_TEST:
7593 return BNX2_NUM_TESTS;
7594 case ETH_SS_STATS:
7595 return BNX2_NUM_STATS;
7596 default:
7597 return -EOPNOTSUPP;
7598 }
7599}
7600
7601static void
7602bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7603{
7604 struct bnx2 *bp = netdev_priv(dev);
7605
7606 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7607 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7608 int i;
7609
7610 bnx2_netif_stop(bp, true);
7611 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7612 bnx2_free_skbs(bp);
7613
7614 if (bnx2_test_registers(bp) != 0) {
7615 buf[0] = 1;
7616 etest->flags |= ETH_TEST_FL_FAILED;
7617 }
7618 if (bnx2_test_memory(bp) != 0) {
7619 buf[1] = 1;
7620 etest->flags |= ETH_TEST_FL_FAILED;
7621 }
7622 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7623 etest->flags |= ETH_TEST_FL_FAILED;
7624
7625 if (!netif_running(bp->dev))
7626 bnx2_shutdown_chip(bp);
7627 else {
7628 bnx2_init_nic(bp, 1);
7629 bnx2_netif_start(bp, true);
7630 }
7631
7632 /* wait for link up */
7633 for (i = 0; i < 7; i++) {
7634 if (bp->link_up)
7635 break;
7636 msleep_interruptible(1000);
7637 }
7638 }
7639
7640 if (bnx2_test_nvram(bp) != 0) {
7641 buf[3] = 1;
7642 etest->flags |= ETH_TEST_FL_FAILED;
7643 }
7644 if (bnx2_test_intr(bp) != 0) {
7645 buf[4] = 1;
7646 etest->flags |= ETH_TEST_FL_FAILED;
7647 }
7648
7649 if (bnx2_test_link(bp) != 0) {
7650 buf[5] = 1;
7651 etest->flags |= ETH_TEST_FL_FAILED;
7652
7653 }
7654}
7655
7656static void
7657bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7658{
7659 switch (stringset) {
7660 case ETH_SS_STATS:
7661 memcpy(buf, bnx2_stats_str_arr,
7662 sizeof(bnx2_stats_str_arr));
7663 break;
7664 case ETH_SS_TEST:
7665 memcpy(buf, bnx2_tests_str_arr,
7666 sizeof(bnx2_tests_str_arr));
7667 break;
7668 }
7669}
7670
7671static void
7672bnx2_get_ethtool_stats(struct net_device *dev,
7673 struct ethtool_stats *stats, u64 *buf)
7674{
7675 struct bnx2 *bp = netdev_priv(dev);
7676 int i;
7677 u32 *hw_stats = (u32 *) bp->stats_blk;
7678 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7679 u8 *stats_len_arr = NULL;
7680
7681 if (hw_stats == NULL) {
7682 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7683 return;
7684 }
7685
7686 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7687 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7688 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7689 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7690 stats_len_arr = bnx2_5706_stats_len_arr;
7691 else
7692 stats_len_arr = bnx2_5708_stats_len_arr;
7693
7694 for (i = 0; i < BNX2_NUM_STATS; i++) {
7695 unsigned long offset;
7696
7697 if (stats_len_arr[i] == 0) {
7698 /* skip this counter */
7699 buf[i] = 0;
7700 continue;
7701 }
7702
7703 offset = bnx2_stats_offset_arr[i];
7704 if (stats_len_arr[i] == 4) {
7705 /* 4-byte counter */
7706 buf[i] = (u64) *(hw_stats + offset) +
7707 *(temp_stats + offset);
7708 continue;
7709 }
7710 /* 8-byte counter */
7711 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7712 *(hw_stats + offset + 1) +
7713 (((u64) *(temp_stats + offset)) << 32) +
7714 *(temp_stats + offset + 1);
7715 }
7716}
7717
7718static int
7719bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7720{
7721 struct bnx2 *bp = netdev_priv(dev);
7722
7723 switch (state) {
7724 case ETHTOOL_ID_ACTIVE:
7725 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7726 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7727 return 1; /* cycle on/off once per second */
7728
7729 case ETHTOOL_ID_ON:
7730 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7731 BNX2_EMAC_LED_1000MB_OVERRIDE |
7732 BNX2_EMAC_LED_100MB_OVERRIDE |
7733 BNX2_EMAC_LED_10MB_OVERRIDE |
7734 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7735 BNX2_EMAC_LED_TRAFFIC);
7736 break;
7737
7738 case ETHTOOL_ID_OFF:
7739 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7740 break;
7741
7742 case ETHTOOL_ID_INACTIVE:
7743 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7744 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7745 break;
7746 }
7747
7748 return 0;
7749}
7750
7751static int
7752bnx2_set_features(struct net_device *dev, netdev_features_t features)
7753{
7754 struct bnx2 *bp = netdev_priv(dev);
7755
7756 /* TSO with VLAN tag won't work with current firmware */
7757 if (features & NETIF_F_HW_VLAN_CTAG_TX)
7758 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7759 else
7760 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7761
7762 if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7763 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7764 netif_running(dev)) {
7765 bnx2_netif_stop(bp, false);
7766 dev->features = features;
7767 bnx2_set_rx_mode(dev);
7768 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7769 bnx2_netif_start(bp, false);
7770 return 1;
7771 }
7772
7773 return 0;
7774}
7775
7776static void bnx2_get_channels(struct net_device *dev,
7777 struct ethtool_channels *channels)
7778{
7779 struct bnx2 *bp = netdev_priv(dev);
7780 u32 max_rx_rings = 1;
7781 u32 max_tx_rings = 1;
7782
7783 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7784 max_rx_rings = RX_MAX_RINGS;
7785 max_tx_rings = TX_MAX_RINGS;
7786 }
7787
7788 channels->max_rx = max_rx_rings;
7789 channels->max_tx = max_tx_rings;
7790 channels->max_other = 0;
7791 channels->max_combined = 0;
7792 channels->rx_count = bp->num_rx_rings;
7793 channels->tx_count = bp->num_tx_rings;
7794 channels->other_count = 0;
7795 channels->combined_count = 0;
7796}
7797
7798static int bnx2_set_channels(struct net_device *dev,
7799 struct ethtool_channels *channels)
7800{
7801 struct bnx2 *bp = netdev_priv(dev);
7802 u32 max_rx_rings = 1;
7803 u32 max_tx_rings = 1;
7804 int rc = 0;
7805
7806 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7807 max_rx_rings = RX_MAX_RINGS;
7808 max_tx_rings = TX_MAX_RINGS;
7809 }
7810 if (channels->rx_count > max_rx_rings ||
7811 channels->tx_count > max_tx_rings)
7812 return -EINVAL;
7813
7814 bp->num_req_rx_rings = channels->rx_count;
7815 bp->num_req_tx_rings = channels->tx_count;
7816
7817 if (netif_running(dev))
7818 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7819 bp->tx_ring_size, true);
7820
7821 return rc;
7822}
7823
7824static const struct ethtool_ops bnx2_ethtool_ops = {
7825 .get_drvinfo = bnx2_get_drvinfo,
7826 .get_regs_len = bnx2_get_regs_len,
7827 .get_regs = bnx2_get_regs,
7828 .get_wol = bnx2_get_wol,
7829 .set_wol = bnx2_set_wol,
7830 .nway_reset = bnx2_nway_reset,
7831 .get_link = bnx2_get_link,
7832 .get_eeprom_len = bnx2_get_eeprom_len,
7833 .get_eeprom = bnx2_get_eeprom,
7834 .set_eeprom = bnx2_set_eeprom,
7835 .get_coalesce = bnx2_get_coalesce,
7836 .set_coalesce = bnx2_set_coalesce,
7837 .get_ringparam = bnx2_get_ringparam,
7838 .set_ringparam = bnx2_set_ringparam,
7839 .get_pauseparam = bnx2_get_pauseparam,
7840 .set_pauseparam = bnx2_set_pauseparam,
7841 .self_test = bnx2_self_test,
7842 .get_strings = bnx2_get_strings,
7843 .set_phys_id = bnx2_set_phys_id,
7844 .get_ethtool_stats = bnx2_get_ethtool_stats,
7845 .get_sset_count = bnx2_get_sset_count,
7846 .get_channels = bnx2_get_channels,
7847 .set_channels = bnx2_set_channels,
7848 .get_link_ksettings = bnx2_get_link_ksettings,
7849 .set_link_ksettings = bnx2_set_link_ksettings,
7850};
7851
7852/* Called with rtnl_lock */
7853static int
7854bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7855{
7856 struct mii_ioctl_data *data = if_mii(ifr);
7857 struct bnx2 *bp = netdev_priv(dev);
7858 int err;
7859
7860 switch(cmd) {
7861 case SIOCGMIIPHY:
7862 data->phy_id = bp->phy_addr;
7863
7864 /* fallthru */
7865 case SIOCGMIIREG: {
7866 u32 mii_regval;
7867
7868 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7869 return -EOPNOTSUPP;
7870
7871 if (!netif_running(dev))
7872 return -EAGAIN;
7873
7874 spin_lock_bh(&bp->phy_lock);
7875 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7876 spin_unlock_bh(&bp->phy_lock);
7877
7878 data->val_out = mii_regval;
7879
7880 return err;
7881 }
7882
7883 case SIOCSMIIREG:
7884 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7885 return -EOPNOTSUPP;
7886
7887 if (!netif_running(dev))
7888 return -EAGAIN;
7889
7890 spin_lock_bh(&bp->phy_lock);
7891 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7892 spin_unlock_bh(&bp->phy_lock);
7893
7894 return err;
7895
7896 default:
7897 /* do nothing */
7898 break;
7899 }
7900 return -EOPNOTSUPP;
7901}
7902
7903/* Called with rtnl_lock */
7904static int
7905bnx2_change_mac_addr(struct net_device *dev, void *p)
7906{
7907 struct sockaddr *addr = p;
7908 struct bnx2 *bp = netdev_priv(dev);
7909
7910 if (!is_valid_ether_addr(addr->sa_data))
7911 return -EADDRNOTAVAIL;
7912
7913 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7914 if (netif_running(dev))
7915 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7916
7917 return 0;
7918}
7919
7920/* Called with rtnl_lock */
7921static int
7922bnx2_change_mtu(struct net_device *dev, int new_mtu)
7923{
7924 struct bnx2 *bp = netdev_priv(dev);
7925
7926 dev->mtu = new_mtu;
7927 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7928 false);
7929}
7930
7931#ifdef CONFIG_NET_POLL_CONTROLLER
7932static void
7933poll_bnx2(struct net_device *dev)
7934{
7935 struct bnx2 *bp = netdev_priv(dev);
7936 int i;
7937
7938 for (i = 0; i < bp->irq_nvecs; i++) {
7939 struct bnx2_irq *irq = &bp->irq_tbl[i];
7940
7941 disable_irq(irq->vector);
7942 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7943 enable_irq(irq->vector);
7944 }
7945}
7946#endif
7947
7948static void
7949bnx2_get_5709_media(struct bnx2 *bp)
7950{
7951 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7952 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7953 u32 strap;
7954
7955 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7956 return;
7957 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7958 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7959 return;
7960 }
7961
7962 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7963 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7964 else
7965 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7966
7967 if (bp->func == 0) {
7968 switch (strap) {
7969 case 0x4:
7970 case 0x5:
7971 case 0x6:
7972 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7973 return;
7974 }
7975 } else {
7976 switch (strap) {
7977 case 0x1:
7978 case 0x2:
7979 case 0x4:
7980 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7981 return;
7982 }
7983 }
7984}
7985
7986static void
7987bnx2_get_pci_speed(struct bnx2 *bp)
7988{
7989 u32 reg;
7990
7991 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7992 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7993 u32 clkreg;
7994
7995 bp->flags |= BNX2_FLAG_PCIX;
7996
7997 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7998
7999 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8000 switch (clkreg) {
8001 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8002 bp->bus_speed_mhz = 133;
8003 break;
8004
8005 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8006 bp->bus_speed_mhz = 100;
8007 break;
8008
8009 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8010 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8011 bp->bus_speed_mhz = 66;
8012 break;
8013
8014 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8015 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8016 bp->bus_speed_mhz = 50;
8017 break;
8018
8019 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8020 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8021 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8022 bp->bus_speed_mhz = 33;
8023 break;
8024 }
8025 }
8026 else {
8027 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8028 bp->bus_speed_mhz = 66;
8029 else
8030 bp->bus_speed_mhz = 33;
8031 }
8032
8033 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8034 bp->flags |= BNX2_FLAG_PCI_32BIT;
8035
8036}
8037
8038static void
8039bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8040{
8041 int rc, i, j;
8042 u8 *data;
8043 unsigned int block_end, rosize, len;
8044
8045#define BNX2_VPD_NVRAM_OFFSET 0x300
8046#define BNX2_VPD_LEN 128
8047#define BNX2_MAX_VER_SLEN 30
8048
8049 data = kmalloc(256, GFP_KERNEL);
8050 if (!data)
8051 return;
8052
8053 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8054 BNX2_VPD_LEN);
8055 if (rc)
8056 goto vpd_done;
8057
8058 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8059 data[i] = data[i + BNX2_VPD_LEN + 3];
8060 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8061 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8062 data[i + 3] = data[i + BNX2_VPD_LEN];
8063 }
8064
8065 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8066 if (i < 0)
8067 goto vpd_done;
8068
8069 rosize = pci_vpd_lrdt_size(&data[i]);
8070 i += PCI_VPD_LRDT_TAG_SIZE;
8071 block_end = i + rosize;
8072
8073 if (block_end > BNX2_VPD_LEN)
8074 goto vpd_done;
8075
8076 j = pci_vpd_find_info_keyword(data, i, rosize,
8077 PCI_VPD_RO_KEYWORD_MFR_ID);
8078 if (j < 0)
8079 goto vpd_done;
8080
8081 len = pci_vpd_info_field_size(&data[j]);
8082
8083 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8084 if (j + len > block_end || len != 4 ||
8085 memcmp(&data[j], "1028", 4))
8086 goto vpd_done;
8087
8088 j = pci_vpd_find_info_keyword(data, i, rosize,
8089 PCI_VPD_RO_KEYWORD_VENDOR0);
8090 if (j < 0)
8091 goto vpd_done;
8092
8093 len = pci_vpd_info_field_size(&data[j]);
8094
8095 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8096 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8097 goto vpd_done;
8098
8099 memcpy(bp->fw_version, &data[j], len);
8100 bp->fw_version[len] = ' ';
8101
8102vpd_done:
8103 kfree(data);
8104}
8105
8106static int
8107bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8108{
8109 struct bnx2 *bp;
8110 int rc, i, j;
8111 u32 reg;
8112 u64 dma_mask, persist_dma_mask;
8113 int err;
8114
8115 SET_NETDEV_DEV(dev, &pdev->dev);
8116 bp = netdev_priv(dev);
8117
8118 bp->flags = 0;
8119 bp->phy_flags = 0;
8120
8121 bp->temp_stats_blk =
8122 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8123
8124 if (bp->temp_stats_blk == NULL) {
8125 rc = -ENOMEM;
8126 goto err_out;
8127 }
8128
8129 /* enable device (incl. PCI PM wakeup), and bus-mastering */
8130 rc = pci_enable_device(pdev);
8131 if (rc) {
8132 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8133 goto err_out;
8134 }
8135
8136 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8137 dev_err(&pdev->dev,
8138 "Cannot find PCI device base address, aborting\n");
8139 rc = -ENODEV;
8140 goto err_out_disable;
8141 }
8142
8143 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8144 if (rc) {
8145 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8146 goto err_out_disable;
8147 }
8148
8149 pci_set_master(pdev);
8150
8151 bp->pm_cap = pdev->pm_cap;
8152 if (bp->pm_cap == 0) {
8153 dev_err(&pdev->dev,
8154 "Cannot find power management capability, aborting\n");
8155 rc = -EIO;
8156 goto err_out_release;
8157 }
8158
8159 bp->dev = dev;
8160 bp->pdev = pdev;
8161
8162 spin_lock_init(&bp->phy_lock);
8163 spin_lock_init(&bp->indirect_lock);
8164#ifdef BCM_CNIC
8165 mutex_init(&bp->cnic_lock);
8166#endif
8167 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8168
8169 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8170 TX_MAX_TSS_RINGS + 1));
8171 if (!bp->regview) {
8172 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8173 rc = -ENOMEM;
8174 goto err_out_release;
8175 }
8176
8177 /* Configure byte swap and enable write to the reg_window registers.
8178 * Rely on CPU to do target byte swapping on big endian systems
8179 * The chip's target access swapping will not swap all accesses
8180 */
8181 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8182 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8183 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8184
8185 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8186
8187 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8188 if (!pci_is_pcie(pdev)) {
8189 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8190 rc = -EIO;
8191 goto err_out_unmap;
8192 }
8193 bp->flags |= BNX2_FLAG_PCIE;
8194 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8195 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8196
8197 /* AER (Advanced Error Reporting) hooks */
8198 err = pci_enable_pcie_error_reporting(pdev);
8199 if (!err)
8200 bp->flags |= BNX2_FLAG_AER_ENABLED;
8201
8202 } else {
8203 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8204 if (bp->pcix_cap == 0) {
8205 dev_err(&pdev->dev,
8206 "Cannot find PCIX capability, aborting\n");
8207 rc = -EIO;
8208 goto err_out_unmap;
8209 }
8210 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8211 }
8212
8213 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8214 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8215 if (pdev->msix_cap)
8216 bp->flags |= BNX2_FLAG_MSIX_CAP;
8217 }
8218
8219 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8220 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8221 if (pdev->msi_cap)
8222 bp->flags |= BNX2_FLAG_MSI_CAP;
8223 }
8224
8225 /* 5708 cannot support DMA addresses > 40-bit. */
8226 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8227 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8228 else
8229 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8230
8231 /* Configure DMA attributes. */
8232 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8233 dev->features |= NETIF_F_HIGHDMA;
8234 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8235 if (rc) {
8236 dev_err(&pdev->dev,
8237 "pci_set_consistent_dma_mask failed, aborting\n");
8238 goto err_out_unmap;
8239 }
8240 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8241 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8242 goto err_out_unmap;
8243 }
8244
8245 if (!(bp->flags & BNX2_FLAG_PCIE))
8246 bnx2_get_pci_speed(bp);
8247
8248 /* 5706A0 may falsely detect SERR and PERR. */
8249 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8250 reg = BNX2_RD(bp, PCI_COMMAND);
8251 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8252 BNX2_WR(bp, PCI_COMMAND, reg);
8253 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8254 !(bp->flags & BNX2_FLAG_PCIX)) {
8255
8256 dev_err(&pdev->dev,
8257 "5706 A1 can only be used in a PCIX bus, aborting\n");
8258 goto err_out_unmap;
8259 }
8260
8261 bnx2_init_nvram(bp);
8262
8263 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8264
8265 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8266 bp->func = 1;
8267
8268 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8269 BNX2_SHM_HDR_SIGNATURE_SIG) {
8270 u32 off = bp->func << 2;
8271
8272 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8273 } else
8274 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8275
8276 /* Get the permanent MAC address. First we need to make sure the
8277 * firmware is actually running.
8278 */
8279 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8280
8281 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8282 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8283 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8284 rc = -ENODEV;
8285 goto err_out_unmap;
8286 }
8287
8288 bnx2_read_vpd_fw_ver(bp);
8289
8290 j = strlen(bp->fw_version);
8291 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8292 for (i = 0; i < 3 && j < 24; i++) {
8293 u8 num, k, skip0;
8294
8295 if (i == 0) {
8296 bp->fw_version[j++] = 'b';
8297 bp->fw_version[j++] = 'c';
8298 bp->fw_version[j++] = ' ';
8299 }
8300 num = (u8) (reg >> (24 - (i * 8)));
8301 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8302 if (num >= k || !skip0 || k == 1) {
8303 bp->fw_version[j++] = (num / k) + '0';
8304 skip0 = 0;
8305 }
8306 }
8307 if (i != 2)
8308 bp->fw_version[j++] = '.';
8309 }
8310 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8311 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8312 bp->wol = 1;
8313
8314 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8315 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8316
8317 for (i = 0; i < 30; i++) {
8318 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8319 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8320 break;
8321 msleep(10);
8322 }
8323 }
8324 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8325 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8326 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8327 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8328 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8329
8330 if (j < 32)
8331 bp->fw_version[j++] = ' ';
8332 for (i = 0; i < 3 && j < 28; i++) {
8333 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8334 reg = be32_to_cpu(reg);
8335 memcpy(&bp->fw_version[j], ®, 4);
8336 j += 4;
8337 }
8338 }
8339
8340 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8341 bp->mac_addr[0] = (u8) (reg >> 8);
8342 bp->mac_addr[1] = (u8) reg;
8343
8344 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8345 bp->mac_addr[2] = (u8) (reg >> 24);
8346 bp->mac_addr[3] = (u8) (reg >> 16);
8347 bp->mac_addr[4] = (u8) (reg >> 8);
8348 bp->mac_addr[5] = (u8) reg;
8349
8350 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8351 bnx2_set_rx_ring_size(bp, 255);
8352
8353 bp->tx_quick_cons_trip_int = 2;
8354 bp->tx_quick_cons_trip = 20;
8355 bp->tx_ticks_int = 18;
8356 bp->tx_ticks = 80;
8357
8358 bp->rx_quick_cons_trip_int = 2;
8359 bp->rx_quick_cons_trip = 12;
8360 bp->rx_ticks_int = 18;
8361 bp->rx_ticks = 18;
8362
8363 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8364
8365 bp->current_interval = BNX2_TIMER_INTERVAL;
8366
8367 bp->phy_addr = 1;
8368
8369 /* allocate stats_blk */
8370 rc = bnx2_alloc_stats_blk(dev);
8371 if (rc)
8372 goto err_out_unmap;
8373
8374 /* Disable WOL support if we are running on a SERDES chip. */
8375 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8376 bnx2_get_5709_media(bp);
8377 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8378 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8379
8380 bp->phy_port = PORT_TP;
8381 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8382 bp->phy_port = PORT_FIBRE;
8383 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8384 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8385 bp->flags |= BNX2_FLAG_NO_WOL;
8386 bp->wol = 0;
8387 }
8388 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8389 /* Don't do parallel detect on this board because of
8390 * some board problems. The link will not go down
8391 * if we do parallel detect.
8392 */
8393 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8394 pdev->subsystem_device == 0x310c)
8395 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8396 } else {
8397 bp->phy_addr = 2;
8398 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8399 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8400 }
8401 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8402 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8403 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8404 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8405 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8406 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8407 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8408
8409 bnx2_init_fw_cap(bp);
8410
8411 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8412 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8413 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8414 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8415 bp->flags |= BNX2_FLAG_NO_WOL;
8416 bp->wol = 0;
8417 }
8418
8419 if (bp->flags & BNX2_FLAG_NO_WOL)
8420 device_set_wakeup_capable(&bp->pdev->dev, false);
8421 else
8422 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8423
8424 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8425 bp->tx_quick_cons_trip_int =
8426 bp->tx_quick_cons_trip;
8427 bp->tx_ticks_int = bp->tx_ticks;
8428 bp->rx_quick_cons_trip_int =
8429 bp->rx_quick_cons_trip;
8430 bp->rx_ticks_int = bp->rx_ticks;
8431 bp->comp_prod_trip_int = bp->comp_prod_trip;
8432 bp->com_ticks_int = bp->com_ticks;
8433 bp->cmd_ticks_int = bp->cmd_ticks;
8434 }
8435
8436 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8437 *
8438 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8439 * with byte enables disabled on the unused 32-bit word. This is legal
8440 * but causes problems on the AMD 8132 which will eventually stop
8441 * responding after a while.
8442 *
8443 * AMD believes this incompatibility is unique to the 5706, and
8444 * prefers to locally disable MSI rather than globally disabling it.
8445 */
8446 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8447 struct pci_dev *amd_8132 = NULL;
8448
8449 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8450 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8451 amd_8132))) {
8452
8453 if (amd_8132->revision >= 0x10 &&
8454 amd_8132->revision <= 0x13) {
8455 disable_msi = 1;
8456 pci_dev_put(amd_8132);
8457 break;
8458 }
8459 }
8460 }
8461
8462 bnx2_set_default_link(bp);
8463 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8464
8465 timer_setup(&bp->timer, bnx2_timer, 0);
8466 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8467
8468#ifdef BCM_CNIC
8469 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8470 bp->cnic_eth_dev.max_iscsi_conn =
8471 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8472 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8473 bp->cnic_probe = bnx2_cnic_probe;
8474#endif
8475 pci_save_state(pdev);
8476
8477 return 0;
8478
8479err_out_unmap:
8480 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8481 pci_disable_pcie_error_reporting(pdev);
8482 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8483 }
8484
8485 pci_iounmap(pdev, bp->regview);
8486 bp->regview = NULL;
8487
8488err_out_release:
8489 pci_release_regions(pdev);
8490
8491err_out_disable:
8492 pci_disable_device(pdev);
8493
8494err_out:
8495 kfree(bp->temp_stats_blk);
8496
8497 return rc;
8498}
8499
8500static char *
8501bnx2_bus_string(struct bnx2 *bp, char *str)
8502{
8503 char *s = str;
8504
8505 if (bp->flags & BNX2_FLAG_PCIE) {
8506 s += sprintf(s, "PCI Express");
8507 } else {
8508 s += sprintf(s, "PCI");
8509 if (bp->flags & BNX2_FLAG_PCIX)
8510 s += sprintf(s, "-X");
8511 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8512 s += sprintf(s, " 32-bit");
8513 else
8514 s += sprintf(s, " 64-bit");
8515 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8516 }
8517 return str;
8518}
8519
8520static void
8521bnx2_del_napi(struct bnx2 *bp)
8522{
8523 int i;
8524
8525 for (i = 0; i < bp->irq_nvecs; i++)
8526 netif_napi_del(&bp->bnx2_napi[i].napi);
8527}
8528
8529static void
8530bnx2_init_napi(struct bnx2 *bp)
8531{
8532 int i;
8533
8534 for (i = 0; i < bp->irq_nvecs; i++) {
8535 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8536 int (*poll)(struct napi_struct *, int);
8537
8538 if (i == 0)
8539 poll = bnx2_poll;
8540 else
8541 poll = bnx2_poll_msix;
8542
8543 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8544 bnapi->bp = bp;
8545 }
8546}
8547
8548static const struct net_device_ops bnx2_netdev_ops = {
8549 .ndo_open = bnx2_open,
8550 .ndo_start_xmit = bnx2_start_xmit,
8551 .ndo_stop = bnx2_close,
8552 .ndo_get_stats64 = bnx2_get_stats64,
8553 .ndo_set_rx_mode = bnx2_set_rx_mode,
8554 .ndo_do_ioctl = bnx2_ioctl,
8555 .ndo_validate_addr = eth_validate_addr,
8556 .ndo_set_mac_address = bnx2_change_mac_addr,
8557 .ndo_change_mtu = bnx2_change_mtu,
8558 .ndo_set_features = bnx2_set_features,
8559 .ndo_tx_timeout = bnx2_tx_timeout,
8560#ifdef CONFIG_NET_POLL_CONTROLLER
8561 .ndo_poll_controller = poll_bnx2,
8562#endif
8563};
8564
8565static int
8566bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8567{
8568 static int version_printed = 0;
8569 struct net_device *dev;
8570 struct bnx2 *bp;
8571 int rc;
8572 char str[40];
8573
8574 if (version_printed++ == 0)
8575 pr_info("%s", version);
8576
8577 /* dev zeroed in init_etherdev */
8578 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8579 if (!dev)
8580 return -ENOMEM;
8581
8582 rc = bnx2_init_board(pdev, dev);
8583 if (rc < 0)
8584 goto err_free;
8585
8586 dev->netdev_ops = &bnx2_netdev_ops;
8587 dev->watchdog_timeo = TX_TIMEOUT;
8588 dev->ethtool_ops = &bnx2_ethtool_ops;
8589
8590 bp = netdev_priv(dev);
8591
8592 pci_set_drvdata(pdev, dev);
8593
8594 /*
8595 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8596 * New io-page table has been created before bnx2 does reset at open stage.
8597 * We have to wait for the in-flight DMA to complete to avoid it look up
8598 * into the newly created io-page table.
8599 */
8600 if (is_kdump_kernel())
8601 bnx2_wait_dma_complete(bp);
8602
8603 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8604
8605 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8606 NETIF_F_TSO | NETIF_F_TSO_ECN |
8607 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8608
8609 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8610 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8611
8612 dev->vlan_features = dev->hw_features;
8613 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8614 dev->features |= dev->hw_features;
8615 dev->priv_flags |= IFF_UNICAST_FLT;
8616 dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8617 dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8618
8619 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8620 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8621
8622 if ((rc = register_netdev(dev))) {
8623 dev_err(&pdev->dev, "Cannot register net device\n");
8624 goto error;
8625 }
8626
8627 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8628 "node addr %pM\n", board_info[ent->driver_data].name,
8629 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8630 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8631 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8632 pdev->irq, dev->dev_addr);
8633
8634 return 0;
8635
8636error:
8637 pci_iounmap(pdev, bp->regview);
8638 pci_release_regions(pdev);
8639 pci_disable_device(pdev);
8640err_free:
8641 bnx2_free_stats_blk(dev);
8642 free_netdev(dev);
8643 return rc;
8644}
8645
8646static void
8647bnx2_remove_one(struct pci_dev *pdev)
8648{
8649 struct net_device *dev = pci_get_drvdata(pdev);
8650 struct bnx2 *bp = netdev_priv(dev);
8651
8652 unregister_netdev(dev);
8653
8654 del_timer_sync(&bp->timer);
8655 cancel_work_sync(&bp->reset_task);
8656
8657 pci_iounmap(bp->pdev, bp->regview);
8658
8659 bnx2_free_stats_blk(dev);
8660 kfree(bp->temp_stats_blk);
8661
8662 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8663 pci_disable_pcie_error_reporting(pdev);
8664 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8665 }
8666
8667 bnx2_release_firmware(bp);
8668
8669 free_netdev(dev);
8670
8671 pci_release_regions(pdev);
8672 pci_disable_device(pdev);
8673}
8674
8675#ifdef CONFIG_PM_SLEEP
8676static int
8677bnx2_suspend(struct device *device)
8678{
8679 struct pci_dev *pdev = to_pci_dev(device);
8680 struct net_device *dev = pci_get_drvdata(pdev);
8681 struct bnx2 *bp = netdev_priv(dev);
8682
8683 if (netif_running(dev)) {
8684 cancel_work_sync(&bp->reset_task);
8685 bnx2_netif_stop(bp, true);
8686 netif_device_detach(dev);
8687 del_timer_sync(&bp->timer);
8688 bnx2_shutdown_chip(bp);
8689 __bnx2_free_irq(bp);
8690 bnx2_free_skbs(bp);
8691 }
8692 bnx2_setup_wol(bp);
8693 return 0;
8694}
8695
8696static int
8697bnx2_resume(struct device *device)
8698{
8699 struct pci_dev *pdev = to_pci_dev(device);
8700 struct net_device *dev = pci_get_drvdata(pdev);
8701 struct bnx2 *bp = netdev_priv(dev);
8702
8703 if (!netif_running(dev))
8704 return 0;
8705
8706 bnx2_set_power_state(bp, PCI_D0);
8707 netif_device_attach(dev);
8708 bnx2_request_irq(bp);
8709 bnx2_init_nic(bp, 1);
8710 bnx2_netif_start(bp, true);
8711 return 0;
8712}
8713
8714static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8715#define BNX2_PM_OPS (&bnx2_pm_ops)
8716
8717#else
8718
8719#define BNX2_PM_OPS NULL
8720
8721#endif /* CONFIG_PM_SLEEP */
8722/**
8723 * bnx2_io_error_detected - called when PCI error is detected
8724 * @pdev: Pointer to PCI device
8725 * @state: The current pci connection state
8726 *
8727 * This function is called after a PCI bus error affecting
8728 * this device has been detected.
8729 */
8730static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8731 pci_channel_state_t state)
8732{
8733 struct net_device *dev = pci_get_drvdata(pdev);
8734 struct bnx2 *bp = netdev_priv(dev);
8735
8736 rtnl_lock();
8737 netif_device_detach(dev);
8738
8739 if (state == pci_channel_io_perm_failure) {
8740 rtnl_unlock();
8741 return PCI_ERS_RESULT_DISCONNECT;
8742 }
8743
8744 if (netif_running(dev)) {
8745 bnx2_netif_stop(bp, true);
8746 del_timer_sync(&bp->timer);
8747 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8748 }
8749
8750 pci_disable_device(pdev);
8751 rtnl_unlock();
8752
8753 /* Request a slot slot reset. */
8754 return PCI_ERS_RESULT_NEED_RESET;
8755}
8756
8757/**
8758 * bnx2_io_slot_reset - called after the pci bus has been reset.
8759 * @pdev: Pointer to PCI device
8760 *
8761 * Restart the card from scratch, as if from a cold-boot.
8762 */
8763static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8764{
8765 struct net_device *dev = pci_get_drvdata(pdev);
8766 struct bnx2 *bp = netdev_priv(dev);
8767 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8768 int err = 0;
8769
8770 rtnl_lock();
8771 if (pci_enable_device(pdev)) {
8772 dev_err(&pdev->dev,
8773 "Cannot re-enable PCI device after reset\n");
8774 } else {
8775 pci_set_master(pdev);
8776 pci_restore_state(pdev);
8777 pci_save_state(pdev);
8778
8779 if (netif_running(dev))
8780 err = bnx2_init_nic(bp, 1);
8781
8782 if (!err)
8783 result = PCI_ERS_RESULT_RECOVERED;
8784 }
8785
8786 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8787 bnx2_napi_enable(bp);
8788 dev_close(dev);
8789 }
8790 rtnl_unlock();
8791
8792 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8793 return result;
8794
8795 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8796 if (err) {
8797 dev_err(&pdev->dev,
8798 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8799 err); /* non-fatal, continue */
8800 }
8801
8802 return result;
8803}
8804
8805/**
8806 * bnx2_io_resume - called when traffic can start flowing again.
8807 * @pdev: Pointer to PCI device
8808 *
8809 * This callback is called when the error recovery driver tells us that
8810 * its OK to resume normal operation.
8811 */
8812static void bnx2_io_resume(struct pci_dev *pdev)
8813{
8814 struct net_device *dev = pci_get_drvdata(pdev);
8815 struct bnx2 *bp = netdev_priv(dev);
8816
8817 rtnl_lock();
8818 if (netif_running(dev))
8819 bnx2_netif_start(bp, true);
8820
8821 netif_device_attach(dev);
8822 rtnl_unlock();
8823}
8824
8825static void bnx2_shutdown(struct pci_dev *pdev)
8826{
8827 struct net_device *dev = pci_get_drvdata(pdev);
8828 struct bnx2 *bp;
8829
8830 if (!dev)
8831 return;
8832
8833 bp = netdev_priv(dev);
8834 if (!bp)
8835 return;
8836
8837 rtnl_lock();
8838 if (netif_running(dev))
8839 dev_close(bp->dev);
8840
8841 if (system_state == SYSTEM_POWER_OFF)
8842 bnx2_set_power_state(bp, PCI_D3hot);
8843
8844 rtnl_unlock();
8845}
8846
8847static const struct pci_error_handlers bnx2_err_handler = {
8848 .error_detected = bnx2_io_error_detected,
8849 .slot_reset = bnx2_io_slot_reset,
8850 .resume = bnx2_io_resume,
8851};
8852
8853static struct pci_driver bnx2_pci_driver = {
8854 .name = DRV_MODULE_NAME,
8855 .id_table = bnx2_pci_tbl,
8856 .probe = bnx2_init_one,
8857 .remove = bnx2_remove_one,
8858 .driver.pm = BNX2_PM_OPS,
8859 .err_handler = &bnx2_err_handler,
8860 .shutdown = bnx2_shutdown,
8861};
8862
8863module_pci_driver(bnx2_pci_driver);
1/* bnx2.c: QLogic bnx2 network driver.
2 *
3 * Copyright (c) 2004-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 * Written by: Michael Chan (mchan@broadcom.com)
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17
18#include <linux/stringify.h>
19#include <linux/kernel.h>
20#include <linux/timer.h>
21#include <linux/errno.h>
22#include <linux/ioport.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/dma-mapping.h>
31#include <linux/bitops.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <linux/delay.h>
35#include <asm/byteorder.h>
36#include <asm/page.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/if.h>
41#include <linux/if_vlan.h>
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/firmware.h>
50#include <linux/log2.h>
51#include <linux/crash_dump.h>
52
53#if IS_ENABLED(CONFIG_CNIC)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
57#include "bnx2.h"
58#include "bnx2_fw.h"
59
60#define DRV_MODULE_NAME "bnx2"
61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
66
67#define RUN_AT(x) (jiffies + (x))
68
69/* Time in jiffies before concluding the transmitter is hung. */
70#define TX_TIMEOUT (5*HZ)
71
72MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
73MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
74MODULE_LICENSE("GPL");
75MODULE_FIRMWARE(FW_MIPS_FILE_06);
76MODULE_FIRMWARE(FW_RV2P_FILE_06);
77MODULE_FIRMWARE(FW_MIPS_FILE_09);
78MODULE_FIRMWARE(FW_RV2P_FILE_09);
79MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
80
81static int disable_msi = 0;
82
83module_param(disable_msi, int, 0444);
84MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
85
86typedef enum {
87 BCM5706 = 0,
88 NC370T,
89 NC370I,
90 BCM5706S,
91 NC370F,
92 BCM5708,
93 BCM5708S,
94 BCM5709,
95 BCM5709S,
96 BCM5716,
97 BCM5716S,
98} board_t;
99
100/* indexed by board_t, above */
101static struct {
102 char *name;
103} board_info[] = {
104 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
105 { "HP NC370T Multifunction Gigabit Server Adapter" },
106 { "HP NC370i Multifunction Gigabit Server Adapter" },
107 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
108 { "HP NC370F Multifunction Gigabit Server Adapter" },
109 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
110 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
111 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
112 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
113 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
114 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
115 };
116
117static const struct pci_device_id bnx2_pci_tbl[] = {
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
119 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
121 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
127 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
136 { PCI_VENDOR_ID_BROADCOM, 0x163b,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
138 { PCI_VENDOR_ID_BROADCOM, 0x163c,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
140 { 0, }
141};
142
143static const struct flash_spec flash_table[] =
144{
145#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
146#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
147 /* Slow EEPROM */
148 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
149 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
150 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
151 "EEPROM - slow"},
152 /* Expansion entry 0001 */
153 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
154 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
155 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 "Entry 0001"},
157 /* Saifun SA25F010 (non-buffered flash) */
158 /* strap, cfg1, & write1 need updates */
159 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
162 "Non-buffered flash (128kB)"},
163 /* Saifun SA25F020 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
168 "Non-buffered flash (256kB)"},
169 /* Expansion entry 0100 */
170 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
173 "Entry 0100"},
174 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
175 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
178 "Entry 0101: ST M45PE10 (128kB non-buffered)"},
179 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
180 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
183 "Entry 0110: ST M45PE20 (256kB non-buffered)"},
184 /* Saifun SA25F005 (non-buffered flash) */
185 /* strap, cfg1, & write1 need updates */
186 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
189 "Non-buffered flash (64kB)"},
190 /* Fast EEPROM */
191 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
192 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
193 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
194 "EEPROM - fast"},
195 /* Expansion entry 1001 */
196 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1001"},
200 /* Expansion entry 1010 */
201 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1010"},
205 /* ATMEL AT45DB011B (buffered flash) */
206 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
207 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
208 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
209 "Buffered flash (128kB)"},
210 /* Expansion entry 1100 */
211 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1100"},
215 /* Expansion entry 1101 */
216 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1101"},
220 /* Ateml Expansion entry 1110 */
221 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
222 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
224 "Entry 1110 (Atmel)"},
225 /* ATMEL AT45DB021B (buffered flash) */
226 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
229 "Buffered flash (256kB)"},
230};
231
232static const struct flash_spec flash_5709 = {
233 .flags = BNX2_NV_BUFFERED,
234 .page_bits = BCM5709_FLASH_PAGE_BITS,
235 .page_size = BCM5709_FLASH_PAGE_SIZE,
236 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
237 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
238 .name = "5709 Buffered flash (256kB)",
239};
240
241MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
242
243static void bnx2_init_napi(struct bnx2 *bp);
244static void bnx2_del_napi(struct bnx2 *bp);
245
246static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
247{
248 u32 diff;
249
250 /* The ring uses 256 indices for 255 entries, one of them
251 * needs to be skipped.
252 */
253 diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
254 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
255 diff &= 0xffff;
256 if (diff == BNX2_TX_DESC_CNT)
257 diff = BNX2_MAX_TX_DESC_CNT;
258 }
259 return bp->tx_ring_size - diff;
260}
261
262static u32
263bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264{
265 unsigned long flags;
266 u32 val;
267
268 spin_lock_irqsave(&bp->indirect_lock, flags);
269 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
270 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
271 spin_unlock_irqrestore(&bp->indirect_lock, flags);
272 return val;
273}
274
275static void
276bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
277{
278 unsigned long flags;
279
280 spin_lock_irqsave(&bp->indirect_lock, flags);
281 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
282 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
283 spin_unlock_irqrestore(&bp->indirect_lock, flags);
284}
285
286static void
287bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
288{
289 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
290}
291
292static u32
293bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
294{
295 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
296}
297
298static void
299bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
300{
301 unsigned long flags;
302
303 offset += cid_addr;
304 spin_lock_irqsave(&bp->indirect_lock, flags);
305 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
306 int i;
307
308 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
309 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
310 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311 for (i = 0; i < 5; i++) {
312 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
313 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314 break;
315 udelay(5);
316 }
317 } else {
318 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
319 BNX2_WR(bp, BNX2_CTX_DATA, val);
320 }
321 spin_unlock_irqrestore(&bp->indirect_lock, flags);
322}
323
324#ifdef BCM_CNIC
325static int
326bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327{
328 struct bnx2 *bp = netdev_priv(dev);
329 struct drv_ctl_io *io = &info->data.io;
330
331 switch (info->cmd) {
332 case DRV_CTL_IO_WR_CMD:
333 bnx2_reg_wr_ind(bp, io->offset, io->data);
334 break;
335 case DRV_CTL_IO_RD_CMD:
336 io->data = bnx2_reg_rd_ind(bp, io->offset);
337 break;
338 case DRV_CTL_CTX_WR_CMD:
339 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340 break;
341 default:
342 return -EINVAL;
343 }
344 return 0;
345}
346
347static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348{
349 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351 int sb_id;
352
353 if (bp->flags & BNX2_FLAG_USING_MSIX) {
354 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355 bnapi->cnic_present = 0;
356 sb_id = bp->irq_nvecs;
357 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358 } else {
359 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360 bnapi->cnic_tag = bnapi->last_status_idx;
361 bnapi->cnic_present = 1;
362 sb_id = 0;
363 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364 }
365
366 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367 cp->irq_arr[0].status_blk = (void *)
368 ((unsigned long) bnapi->status_blk.msi +
369 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370 cp->irq_arr[0].status_blk_map = bp->status_blk_mapping;
371 cp->irq_arr[0].status_blk_num = sb_id;
372 cp->num_irq = 1;
373}
374
375static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376 void *data)
377{
378 struct bnx2 *bp = netdev_priv(dev);
379 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381 if (!ops)
382 return -EINVAL;
383
384 if (cp->drv_state & CNIC_DRV_STATE_REGD)
385 return -EBUSY;
386
387 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
388 return -ENODEV;
389
390 bp->cnic_data = data;
391 rcu_assign_pointer(bp->cnic_ops, ops);
392
393 cp->num_irq = 0;
394 cp->drv_state = CNIC_DRV_STATE_REGD;
395
396 bnx2_setup_cnic_irq_info(bp);
397
398 return 0;
399}
400
401static int bnx2_unregister_cnic(struct net_device *dev)
402{
403 struct bnx2 *bp = netdev_priv(dev);
404 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
405 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406
407 mutex_lock(&bp->cnic_lock);
408 cp->drv_state = 0;
409 bnapi->cnic_present = 0;
410 RCU_INIT_POINTER(bp->cnic_ops, NULL);
411 mutex_unlock(&bp->cnic_lock);
412 synchronize_rcu();
413 return 0;
414}
415
416static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
417{
418 struct bnx2 *bp = netdev_priv(dev);
419 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
420
421 if (!cp->max_iscsi_conn)
422 return NULL;
423
424 cp->drv_owner = THIS_MODULE;
425 cp->chip_id = bp->chip_id;
426 cp->pdev = bp->pdev;
427 cp->io_base = bp->regview;
428 cp->drv_ctl = bnx2_drv_ctl;
429 cp->drv_register_cnic = bnx2_register_cnic;
430 cp->drv_unregister_cnic = bnx2_unregister_cnic;
431
432 return cp;
433}
434
435static void
436bnx2_cnic_stop(struct bnx2 *bp)
437{
438 struct cnic_ops *c_ops;
439 struct cnic_ctl_info info;
440
441 mutex_lock(&bp->cnic_lock);
442 c_ops = rcu_dereference_protected(bp->cnic_ops,
443 lockdep_is_held(&bp->cnic_lock));
444 if (c_ops) {
445 info.cmd = CNIC_CTL_STOP_CMD;
446 c_ops->cnic_ctl(bp->cnic_data, &info);
447 }
448 mutex_unlock(&bp->cnic_lock);
449}
450
451static void
452bnx2_cnic_start(struct bnx2 *bp)
453{
454 struct cnic_ops *c_ops;
455 struct cnic_ctl_info info;
456
457 mutex_lock(&bp->cnic_lock);
458 c_ops = rcu_dereference_protected(bp->cnic_ops,
459 lockdep_is_held(&bp->cnic_lock));
460 if (c_ops) {
461 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
462 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
463
464 bnapi->cnic_tag = bnapi->last_status_idx;
465 }
466 info.cmd = CNIC_CTL_START_CMD;
467 c_ops->cnic_ctl(bp->cnic_data, &info);
468 }
469 mutex_unlock(&bp->cnic_lock);
470}
471
472#else
473
474static void
475bnx2_cnic_stop(struct bnx2 *bp)
476{
477}
478
479static void
480bnx2_cnic_start(struct bnx2 *bp)
481{
482}
483
484#endif
485
486static int
487bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
488{
489 u32 val1;
490 int i, ret;
491
492 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
493 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
494 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
495
496 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
497 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
498
499 udelay(40);
500 }
501
502 val1 = (bp->phy_addr << 21) | (reg << 16) |
503 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
504 BNX2_EMAC_MDIO_COMM_START_BUSY;
505 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
506
507 for (i = 0; i < 50; i++) {
508 udelay(10);
509
510 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
511 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
512 udelay(5);
513
514 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
515 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
516
517 break;
518 }
519 }
520
521 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
522 *val = 0x0;
523 ret = -EBUSY;
524 }
525 else {
526 *val = val1;
527 ret = 0;
528 }
529
530 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
531 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
532 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
533
534 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
535 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
536
537 udelay(40);
538 }
539
540 return ret;
541}
542
543static int
544bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
545{
546 u32 val1;
547 int i, ret;
548
549 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
550 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
551 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
552
553 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
554 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
555
556 udelay(40);
557 }
558
559 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
560 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
561 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
562 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
563
564 for (i = 0; i < 50; i++) {
565 udelay(10);
566
567 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
568 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
569 udelay(5);
570 break;
571 }
572 }
573
574 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
575 ret = -EBUSY;
576 else
577 ret = 0;
578
579 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
580 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
581 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
582
583 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
584 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
585
586 udelay(40);
587 }
588
589 return ret;
590}
591
592static void
593bnx2_disable_int(struct bnx2 *bp)
594{
595 int i;
596 struct bnx2_napi *bnapi;
597
598 for (i = 0; i < bp->irq_nvecs; i++) {
599 bnapi = &bp->bnx2_napi[i];
600 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
601 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
602 }
603 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
604}
605
606static void
607bnx2_enable_int(struct bnx2 *bp)
608{
609 int i;
610 struct bnx2_napi *bnapi;
611
612 for (i = 0; i < bp->irq_nvecs; i++) {
613 bnapi = &bp->bnx2_napi[i];
614
615 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
618 bnapi->last_status_idx);
619
620 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
621 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
622 bnapi->last_status_idx);
623 }
624 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
625}
626
627static void
628bnx2_disable_int_sync(struct bnx2 *bp)
629{
630 int i;
631
632 atomic_inc(&bp->intr_sem);
633 if (!netif_running(bp->dev))
634 return;
635
636 bnx2_disable_int(bp);
637 for (i = 0; i < bp->irq_nvecs; i++)
638 synchronize_irq(bp->irq_tbl[i].vector);
639}
640
641static void
642bnx2_napi_disable(struct bnx2 *bp)
643{
644 int i;
645
646 for (i = 0; i < bp->irq_nvecs; i++)
647 napi_disable(&bp->bnx2_napi[i].napi);
648}
649
650static void
651bnx2_napi_enable(struct bnx2 *bp)
652{
653 int i;
654
655 for (i = 0; i < bp->irq_nvecs; i++)
656 napi_enable(&bp->bnx2_napi[i].napi);
657}
658
659static void
660bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
661{
662 if (stop_cnic)
663 bnx2_cnic_stop(bp);
664 if (netif_running(bp->dev)) {
665 bnx2_napi_disable(bp);
666 netif_tx_disable(bp->dev);
667 }
668 bnx2_disable_int_sync(bp);
669 netif_carrier_off(bp->dev); /* prevent tx timeout */
670}
671
672static void
673bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
674{
675 if (atomic_dec_and_test(&bp->intr_sem)) {
676 if (netif_running(bp->dev)) {
677 netif_tx_wake_all_queues(bp->dev);
678 spin_lock_bh(&bp->phy_lock);
679 if (bp->link_up)
680 netif_carrier_on(bp->dev);
681 spin_unlock_bh(&bp->phy_lock);
682 bnx2_napi_enable(bp);
683 bnx2_enable_int(bp);
684 if (start_cnic)
685 bnx2_cnic_start(bp);
686 }
687 }
688}
689
690static void
691bnx2_free_tx_mem(struct bnx2 *bp)
692{
693 int i;
694
695 for (i = 0; i < bp->num_tx_rings; i++) {
696 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
697 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
698
699 if (txr->tx_desc_ring) {
700 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
701 txr->tx_desc_ring,
702 txr->tx_desc_mapping);
703 txr->tx_desc_ring = NULL;
704 }
705 kfree(txr->tx_buf_ring);
706 txr->tx_buf_ring = NULL;
707 }
708}
709
710static void
711bnx2_free_rx_mem(struct bnx2 *bp)
712{
713 int i;
714
715 for (i = 0; i < bp->num_rx_rings; i++) {
716 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
717 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
718 int j;
719
720 for (j = 0; j < bp->rx_max_ring; j++) {
721 if (rxr->rx_desc_ring[j])
722 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
723 rxr->rx_desc_ring[j],
724 rxr->rx_desc_mapping[j]);
725 rxr->rx_desc_ring[j] = NULL;
726 }
727 vfree(rxr->rx_buf_ring);
728 rxr->rx_buf_ring = NULL;
729
730 for (j = 0; j < bp->rx_max_pg_ring; j++) {
731 if (rxr->rx_pg_desc_ring[j])
732 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
733 rxr->rx_pg_desc_ring[j],
734 rxr->rx_pg_desc_mapping[j]);
735 rxr->rx_pg_desc_ring[j] = NULL;
736 }
737 vfree(rxr->rx_pg_ring);
738 rxr->rx_pg_ring = NULL;
739 }
740}
741
742static int
743bnx2_alloc_tx_mem(struct bnx2 *bp)
744{
745 int i;
746
747 for (i = 0; i < bp->num_tx_rings; i++) {
748 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
749 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
750
751 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
752 if (!txr->tx_buf_ring)
753 return -ENOMEM;
754
755 txr->tx_desc_ring =
756 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
757 &txr->tx_desc_mapping, GFP_KERNEL);
758 if (!txr->tx_desc_ring)
759 return -ENOMEM;
760 }
761 return 0;
762}
763
764static int
765bnx2_alloc_rx_mem(struct bnx2 *bp)
766{
767 int i;
768
769 for (i = 0; i < bp->num_rx_rings; i++) {
770 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
771 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
772 int j;
773
774 rxr->rx_buf_ring =
775 vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
776 if (!rxr->rx_buf_ring)
777 return -ENOMEM;
778
779 for (j = 0; j < bp->rx_max_ring; j++) {
780 rxr->rx_desc_ring[j] =
781 dma_alloc_coherent(&bp->pdev->dev,
782 RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j],
784 GFP_KERNEL);
785 if (!rxr->rx_desc_ring[j])
786 return -ENOMEM;
787
788 }
789
790 if (bp->rx_pg_ring_size) {
791 rxr->rx_pg_ring =
792 vzalloc(array_size(SW_RXPG_RING_SIZE,
793 bp->rx_max_pg_ring));
794 if (!rxr->rx_pg_ring)
795 return -ENOMEM;
796
797 }
798
799 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 rxr->rx_pg_desc_ring[j] =
801 dma_alloc_coherent(&bp->pdev->dev,
802 RXBD_RING_SIZE,
803 &rxr->rx_pg_desc_mapping[j],
804 GFP_KERNEL);
805 if (!rxr->rx_pg_desc_ring[j])
806 return -ENOMEM;
807
808 }
809 }
810 return 0;
811}
812
813static void
814bnx2_free_stats_blk(struct net_device *dev)
815{
816 struct bnx2 *bp = netdev_priv(dev);
817
818 if (bp->status_blk) {
819 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
820 bp->status_blk,
821 bp->status_blk_mapping);
822 bp->status_blk = NULL;
823 bp->stats_blk = NULL;
824 }
825}
826
827static int
828bnx2_alloc_stats_blk(struct net_device *dev)
829{
830 int status_blk_size;
831 void *status_blk;
832 struct bnx2 *bp = netdev_priv(dev);
833
834 /* Combine status and statistics blocks into one allocation. */
835 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
836 if (bp->flags & BNX2_FLAG_MSIX_CAP)
837 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
838 BNX2_SBLK_MSIX_ALIGN_SIZE);
839 bp->status_stats_size = status_blk_size +
840 sizeof(struct statistics_block);
841 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
842 &bp->status_blk_mapping, GFP_KERNEL);
843 if (!status_blk)
844 return -ENOMEM;
845
846 bp->status_blk = status_blk;
847 bp->stats_blk = status_blk + status_blk_size;
848 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
849
850 return 0;
851}
852
853static void
854bnx2_free_mem(struct bnx2 *bp)
855{
856 int i;
857 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
858
859 bnx2_free_tx_mem(bp);
860 bnx2_free_rx_mem(bp);
861
862 for (i = 0; i < bp->ctx_pages; i++) {
863 if (bp->ctx_blk[i]) {
864 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
865 bp->ctx_blk[i],
866 bp->ctx_blk_mapping[i]);
867 bp->ctx_blk[i] = NULL;
868 }
869 }
870
871 if (bnapi->status_blk.msi)
872 bnapi->status_blk.msi = NULL;
873}
874
875static int
876bnx2_alloc_mem(struct bnx2 *bp)
877{
878 int i, err;
879 struct bnx2_napi *bnapi;
880
881 bnapi = &bp->bnx2_napi[0];
882 bnapi->status_blk.msi = bp->status_blk;
883 bnapi->hw_tx_cons_ptr =
884 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
885 bnapi->hw_rx_cons_ptr =
886 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
887 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
888 for (i = 1; i < bp->irq_nvecs; i++) {
889 struct status_block_msix *sblk;
890
891 bnapi = &bp->bnx2_napi[i];
892
893 sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
894 bnapi->status_blk.msix = sblk;
895 bnapi->hw_tx_cons_ptr =
896 &sblk->status_tx_quick_consumer_index;
897 bnapi->hw_rx_cons_ptr =
898 &sblk->status_rx_quick_consumer_index;
899 bnapi->int_num = i << 24;
900 }
901 }
902
903 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
904 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
905 if (bp->ctx_pages == 0)
906 bp->ctx_pages = 1;
907 for (i = 0; i < bp->ctx_pages; i++) {
908 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
909 BNX2_PAGE_SIZE,
910 &bp->ctx_blk_mapping[i],
911 GFP_KERNEL);
912 if (!bp->ctx_blk[i])
913 goto alloc_mem_err;
914 }
915 }
916
917 err = bnx2_alloc_rx_mem(bp);
918 if (err)
919 goto alloc_mem_err;
920
921 err = bnx2_alloc_tx_mem(bp);
922 if (err)
923 goto alloc_mem_err;
924
925 return 0;
926
927alloc_mem_err:
928 bnx2_free_mem(bp);
929 return -ENOMEM;
930}
931
932static void
933bnx2_report_fw_link(struct bnx2 *bp)
934{
935 u32 fw_link_status = 0;
936
937 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
938 return;
939
940 if (bp->link_up) {
941 u32 bmsr;
942
943 switch (bp->line_speed) {
944 case SPEED_10:
945 if (bp->duplex == DUPLEX_HALF)
946 fw_link_status = BNX2_LINK_STATUS_10HALF;
947 else
948 fw_link_status = BNX2_LINK_STATUS_10FULL;
949 break;
950 case SPEED_100:
951 if (bp->duplex == DUPLEX_HALF)
952 fw_link_status = BNX2_LINK_STATUS_100HALF;
953 else
954 fw_link_status = BNX2_LINK_STATUS_100FULL;
955 break;
956 case SPEED_1000:
957 if (bp->duplex == DUPLEX_HALF)
958 fw_link_status = BNX2_LINK_STATUS_1000HALF;
959 else
960 fw_link_status = BNX2_LINK_STATUS_1000FULL;
961 break;
962 case SPEED_2500:
963 if (bp->duplex == DUPLEX_HALF)
964 fw_link_status = BNX2_LINK_STATUS_2500HALF;
965 else
966 fw_link_status = BNX2_LINK_STATUS_2500FULL;
967 break;
968 }
969
970 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
971
972 if (bp->autoneg) {
973 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
974
975 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
976 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
977
978 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
979 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
980 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
981 else
982 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
983 }
984 }
985 else
986 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
987
988 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
989}
990
991static char *
992bnx2_xceiver_str(struct bnx2 *bp)
993{
994 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
995 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
996 "Copper");
997}
998
999static void
1000bnx2_report_link(struct bnx2 *bp)
1001{
1002 if (bp->link_up) {
1003 netif_carrier_on(bp->dev);
1004 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1005 bnx2_xceiver_str(bp),
1006 bp->line_speed,
1007 bp->duplex == DUPLEX_FULL ? "full" : "half");
1008
1009 if (bp->flow_ctrl) {
1010 if (bp->flow_ctrl & FLOW_CTRL_RX) {
1011 pr_cont(", receive ");
1012 if (bp->flow_ctrl & FLOW_CTRL_TX)
1013 pr_cont("& transmit ");
1014 }
1015 else {
1016 pr_cont(", transmit ");
1017 }
1018 pr_cont("flow control ON");
1019 }
1020 pr_cont("\n");
1021 } else {
1022 netif_carrier_off(bp->dev);
1023 netdev_err(bp->dev, "NIC %s Link is Down\n",
1024 bnx2_xceiver_str(bp));
1025 }
1026
1027 bnx2_report_fw_link(bp);
1028}
1029
1030static void
1031bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1032{
1033 u32 local_adv, remote_adv;
1034
1035 bp->flow_ctrl = 0;
1036 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1037 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1038
1039 if (bp->duplex == DUPLEX_FULL) {
1040 bp->flow_ctrl = bp->req_flow_ctrl;
1041 }
1042 return;
1043 }
1044
1045 if (bp->duplex != DUPLEX_FULL) {
1046 return;
1047 }
1048
1049 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1050 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1051 u32 val;
1052
1053 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1054 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1055 bp->flow_ctrl |= FLOW_CTRL_TX;
1056 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1057 bp->flow_ctrl |= FLOW_CTRL_RX;
1058 return;
1059 }
1060
1061 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1062 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1063
1064 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1065 u32 new_local_adv = 0;
1066 u32 new_remote_adv = 0;
1067
1068 if (local_adv & ADVERTISE_1000XPAUSE)
1069 new_local_adv |= ADVERTISE_PAUSE_CAP;
1070 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1071 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1072 if (remote_adv & ADVERTISE_1000XPAUSE)
1073 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1074 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1075 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1076
1077 local_adv = new_local_adv;
1078 remote_adv = new_remote_adv;
1079 }
1080
1081 /* See Table 28B-3 of 802.3ab-1999 spec. */
1082 if (local_adv & ADVERTISE_PAUSE_CAP) {
1083 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1084 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1085 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1086 }
1087 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1088 bp->flow_ctrl = FLOW_CTRL_RX;
1089 }
1090 }
1091 else {
1092 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1093 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1094 }
1095 }
1096 }
1097 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1098 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1099 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1100
1101 bp->flow_ctrl = FLOW_CTRL_TX;
1102 }
1103 }
1104}
1105
1106static int
1107bnx2_5709s_linkup(struct bnx2 *bp)
1108{
1109 u32 val, speed;
1110
1111 bp->link_up = 1;
1112
1113 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1114 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1116
1117 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1118 bp->line_speed = bp->req_line_speed;
1119 bp->duplex = bp->req_duplex;
1120 return 0;
1121 }
1122 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1123 switch (speed) {
1124 case MII_BNX2_GP_TOP_AN_SPEED_10:
1125 bp->line_speed = SPEED_10;
1126 break;
1127 case MII_BNX2_GP_TOP_AN_SPEED_100:
1128 bp->line_speed = SPEED_100;
1129 break;
1130 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1131 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1132 bp->line_speed = SPEED_1000;
1133 break;
1134 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1135 bp->line_speed = SPEED_2500;
1136 break;
1137 }
1138 if (val & MII_BNX2_GP_TOP_AN_FD)
1139 bp->duplex = DUPLEX_FULL;
1140 else
1141 bp->duplex = DUPLEX_HALF;
1142 return 0;
1143}
1144
1145static int
1146bnx2_5708s_linkup(struct bnx2 *bp)
1147{
1148 u32 val;
1149
1150 bp->link_up = 1;
1151 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1152 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1153 case BCM5708S_1000X_STAT1_SPEED_10:
1154 bp->line_speed = SPEED_10;
1155 break;
1156 case BCM5708S_1000X_STAT1_SPEED_100:
1157 bp->line_speed = SPEED_100;
1158 break;
1159 case BCM5708S_1000X_STAT1_SPEED_1G:
1160 bp->line_speed = SPEED_1000;
1161 break;
1162 case BCM5708S_1000X_STAT1_SPEED_2G5:
1163 bp->line_speed = SPEED_2500;
1164 break;
1165 }
1166 if (val & BCM5708S_1000X_STAT1_FD)
1167 bp->duplex = DUPLEX_FULL;
1168 else
1169 bp->duplex = DUPLEX_HALF;
1170
1171 return 0;
1172}
1173
1174static int
1175bnx2_5706s_linkup(struct bnx2 *bp)
1176{
1177 u32 bmcr, local_adv, remote_adv, common;
1178
1179 bp->link_up = 1;
1180 bp->line_speed = SPEED_1000;
1181
1182 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1183 if (bmcr & BMCR_FULLDPLX) {
1184 bp->duplex = DUPLEX_FULL;
1185 }
1186 else {
1187 bp->duplex = DUPLEX_HALF;
1188 }
1189
1190 if (!(bmcr & BMCR_ANENABLE)) {
1191 return 0;
1192 }
1193
1194 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1195 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1196
1197 common = local_adv & remote_adv;
1198 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1199
1200 if (common & ADVERTISE_1000XFULL) {
1201 bp->duplex = DUPLEX_FULL;
1202 }
1203 else {
1204 bp->duplex = DUPLEX_HALF;
1205 }
1206 }
1207
1208 return 0;
1209}
1210
1211static int
1212bnx2_copper_linkup(struct bnx2 *bp)
1213{
1214 u32 bmcr;
1215
1216 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1217
1218 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1219 if (bmcr & BMCR_ANENABLE) {
1220 u32 local_adv, remote_adv, common;
1221
1222 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1223 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1224
1225 common = local_adv & (remote_adv >> 2);
1226 if (common & ADVERTISE_1000FULL) {
1227 bp->line_speed = SPEED_1000;
1228 bp->duplex = DUPLEX_FULL;
1229 }
1230 else if (common & ADVERTISE_1000HALF) {
1231 bp->line_speed = SPEED_1000;
1232 bp->duplex = DUPLEX_HALF;
1233 }
1234 else {
1235 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1236 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1237
1238 common = local_adv & remote_adv;
1239 if (common & ADVERTISE_100FULL) {
1240 bp->line_speed = SPEED_100;
1241 bp->duplex = DUPLEX_FULL;
1242 }
1243 else if (common & ADVERTISE_100HALF) {
1244 bp->line_speed = SPEED_100;
1245 bp->duplex = DUPLEX_HALF;
1246 }
1247 else if (common & ADVERTISE_10FULL) {
1248 bp->line_speed = SPEED_10;
1249 bp->duplex = DUPLEX_FULL;
1250 }
1251 else if (common & ADVERTISE_10HALF) {
1252 bp->line_speed = SPEED_10;
1253 bp->duplex = DUPLEX_HALF;
1254 }
1255 else {
1256 bp->line_speed = 0;
1257 bp->link_up = 0;
1258 }
1259 }
1260 }
1261 else {
1262 if (bmcr & BMCR_SPEED100) {
1263 bp->line_speed = SPEED_100;
1264 }
1265 else {
1266 bp->line_speed = SPEED_10;
1267 }
1268 if (bmcr & BMCR_FULLDPLX) {
1269 bp->duplex = DUPLEX_FULL;
1270 }
1271 else {
1272 bp->duplex = DUPLEX_HALF;
1273 }
1274 }
1275
1276 if (bp->link_up) {
1277 u32 ext_status;
1278
1279 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1280 if (ext_status & EXT_STATUS_MDIX)
1281 bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1282 }
1283
1284 return 0;
1285}
1286
1287static void
1288bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1289{
1290 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1291
1292 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1293 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1294 val |= 0x02 << 8;
1295
1296 if (bp->flow_ctrl & FLOW_CTRL_TX)
1297 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1298
1299 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1300}
1301
1302static void
1303bnx2_init_all_rx_contexts(struct bnx2 *bp)
1304{
1305 int i;
1306 u32 cid;
1307
1308 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1309 if (i == 1)
1310 cid = RX_RSS_CID;
1311 bnx2_init_rx_context(bp, cid);
1312 }
1313}
1314
1315static void
1316bnx2_set_mac_link(struct bnx2 *bp)
1317{
1318 u32 val;
1319
1320 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1321 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1322 (bp->duplex == DUPLEX_HALF)) {
1323 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1324 }
1325
1326 /* Configure the EMAC mode register. */
1327 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1328
1329 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1330 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1331 BNX2_EMAC_MODE_25G_MODE);
1332
1333 if (bp->link_up) {
1334 switch (bp->line_speed) {
1335 case SPEED_10:
1336 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1337 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1338 break;
1339 }
1340 fallthrough;
1341 case SPEED_100:
1342 val |= BNX2_EMAC_MODE_PORT_MII;
1343 break;
1344 case SPEED_2500:
1345 val |= BNX2_EMAC_MODE_25G_MODE;
1346 fallthrough;
1347 case SPEED_1000:
1348 val |= BNX2_EMAC_MODE_PORT_GMII;
1349 break;
1350 }
1351 }
1352 else {
1353 val |= BNX2_EMAC_MODE_PORT_GMII;
1354 }
1355
1356 /* Set the MAC to operate in the appropriate duplex mode. */
1357 if (bp->duplex == DUPLEX_HALF)
1358 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1359 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1360
1361 /* Enable/disable rx PAUSE. */
1362 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363
1364 if (bp->flow_ctrl & FLOW_CTRL_RX)
1365 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1366 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1367
1368 /* Enable/disable tx PAUSE. */
1369 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1370 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1371
1372 if (bp->flow_ctrl & FLOW_CTRL_TX)
1373 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1374 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1375
1376 /* Acknowledge the interrupt. */
1377 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378
1379 bnx2_init_all_rx_contexts(bp);
1380}
1381
1382static void
1383bnx2_enable_bmsr1(struct bnx2 *bp)
1384{
1385 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1387 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388 MII_BNX2_BLK_ADDR_GP_STATUS);
1389}
1390
1391static void
1392bnx2_disable_bmsr1(struct bnx2 *bp)
1393{
1394 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1395 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1396 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1398}
1399
1400static int
1401bnx2_test_and_enable_2g5(struct bnx2 *bp)
1402{
1403 u32 up1;
1404 int ret = 1;
1405
1406 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1407 return 0;
1408
1409 if (bp->autoneg & AUTONEG_SPEED)
1410 bp->advertising |= ADVERTISED_2500baseX_Full;
1411
1412 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414
1415 bnx2_read_phy(bp, bp->mii_up1, &up1);
1416 if (!(up1 & BCM5708S_UP1_2G5)) {
1417 up1 |= BCM5708S_UP1_2G5;
1418 bnx2_write_phy(bp, bp->mii_up1, up1);
1419 ret = 0;
1420 }
1421
1422 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425
1426 return ret;
1427}
1428
1429static int
1430bnx2_test_and_disable_2g5(struct bnx2 *bp)
1431{
1432 u32 up1;
1433 int ret = 0;
1434
1435 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436 return 0;
1437
1438 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1439 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1440
1441 bnx2_read_phy(bp, bp->mii_up1, &up1);
1442 if (up1 & BCM5708S_UP1_2G5) {
1443 up1 &= ~BCM5708S_UP1_2G5;
1444 bnx2_write_phy(bp, bp->mii_up1, up1);
1445 ret = 1;
1446 }
1447
1448 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1449 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1451
1452 return ret;
1453}
1454
1455static void
1456bnx2_enable_forced_2g5(struct bnx2 *bp)
1457{
1458 u32 bmcr;
1459 int err;
1460
1461 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1462 return;
1463
1464 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1465 u32 val;
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 MII_BNX2_BLK_ADDR_SERDES_DIG);
1469 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1470 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1471 val |= MII_BNX2_SD_MISC1_FORCE |
1472 MII_BNX2_SD_MISC1_FORCE_2_5G;
1473 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1474 }
1475
1476 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1477 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1478 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1481 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1482 if (!err)
1483 bmcr |= BCM5708S_BMCR_FORCE_2500;
1484 } else {
1485 return;
1486 }
1487
1488 if (err)
1489 return;
1490
1491 if (bp->autoneg & AUTONEG_SPEED) {
1492 bmcr &= ~BMCR_ANENABLE;
1493 if (bp->req_duplex == DUPLEX_FULL)
1494 bmcr |= BMCR_FULLDPLX;
1495 }
1496 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1497}
1498
1499static void
1500bnx2_disable_forced_2g5(struct bnx2 *bp)
1501{
1502 u32 bmcr;
1503 int err;
1504
1505 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1506 return;
1507
1508 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1509 u32 val;
1510
1511 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1512 MII_BNX2_BLK_ADDR_SERDES_DIG);
1513 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1514 val &= ~MII_BNX2_SD_MISC1_FORCE;
1515 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1516 }
1517
1518 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1520 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521
1522 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1523 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1524 if (!err)
1525 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1526 } else {
1527 return;
1528 }
1529
1530 if (err)
1531 return;
1532
1533 if (bp->autoneg & AUTONEG_SPEED)
1534 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1535 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1536}
1537
1538static void
1539bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1540{
1541 u32 val;
1542
1543 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1544 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1545 if (start)
1546 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1547 else
1548 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1549}
1550
1551static int
1552bnx2_set_link(struct bnx2 *bp)
1553{
1554 u32 bmsr;
1555 u8 link_up;
1556
1557 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1558 bp->link_up = 1;
1559 return 0;
1560 }
1561
1562 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1563 return 0;
1564
1565 link_up = bp->link_up;
1566
1567 bnx2_enable_bmsr1(bp);
1568 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1570 bnx2_disable_bmsr1(bp);
1571
1572 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1573 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1574 u32 val, an_dbg;
1575
1576 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1577 bnx2_5706s_force_link_dn(bp, 0);
1578 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1579 }
1580 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1581
1582 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1583 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1585
1586 if ((val & BNX2_EMAC_STATUS_LINK) &&
1587 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1588 bmsr |= BMSR_LSTATUS;
1589 else
1590 bmsr &= ~BMSR_LSTATUS;
1591 }
1592
1593 if (bmsr & BMSR_LSTATUS) {
1594 bp->link_up = 1;
1595
1596 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1597 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1598 bnx2_5706s_linkup(bp);
1599 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1600 bnx2_5708s_linkup(bp);
1601 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1602 bnx2_5709s_linkup(bp);
1603 }
1604 else {
1605 bnx2_copper_linkup(bp);
1606 }
1607 bnx2_resolve_flow_ctrl(bp);
1608 }
1609 else {
1610 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1611 (bp->autoneg & AUTONEG_SPEED))
1612 bnx2_disable_forced_2g5(bp);
1613
1614 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1615 u32 bmcr;
1616
1617 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618 bmcr |= BMCR_ANENABLE;
1619 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1620
1621 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1622 }
1623 bp->link_up = 0;
1624 }
1625
1626 if (bp->link_up != link_up) {
1627 bnx2_report_link(bp);
1628 }
1629
1630 bnx2_set_mac_link(bp);
1631
1632 return 0;
1633}
1634
1635static int
1636bnx2_reset_phy(struct bnx2 *bp)
1637{
1638 int i;
1639 u32 reg;
1640
1641 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1642
1643#define PHY_RESET_MAX_WAIT 100
1644 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1645 udelay(10);
1646
1647 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1648 if (!(reg & BMCR_RESET)) {
1649 udelay(20);
1650 break;
1651 }
1652 }
1653 if (i == PHY_RESET_MAX_WAIT) {
1654 return -EBUSY;
1655 }
1656 return 0;
1657}
1658
1659static u32
1660bnx2_phy_get_pause_adv(struct bnx2 *bp)
1661{
1662 u32 adv = 0;
1663
1664 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1665 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1666
1667 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668 adv = ADVERTISE_1000XPAUSE;
1669 }
1670 else {
1671 adv = ADVERTISE_PAUSE_CAP;
1672 }
1673 }
1674 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1675 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1676 adv = ADVERTISE_1000XPSE_ASYM;
1677 }
1678 else {
1679 adv = ADVERTISE_PAUSE_ASYM;
1680 }
1681 }
1682 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1683 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1684 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1685 }
1686 else {
1687 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1688 }
1689 }
1690 return adv;
1691}
1692
1693static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1694
1695static int
1696bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1697__releases(&bp->phy_lock)
1698__acquires(&bp->phy_lock)
1699{
1700 u32 speed_arg = 0, pause_adv;
1701
1702 pause_adv = bnx2_phy_get_pause_adv(bp);
1703
1704 if (bp->autoneg & AUTONEG_SPEED) {
1705 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1706 if (bp->advertising & ADVERTISED_10baseT_Half)
1707 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708 if (bp->advertising & ADVERTISED_10baseT_Full)
1709 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710 if (bp->advertising & ADVERTISED_100baseT_Half)
1711 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712 if (bp->advertising & ADVERTISED_100baseT_Full)
1713 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1714 if (bp->advertising & ADVERTISED_1000baseT_Full)
1715 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1716 if (bp->advertising & ADVERTISED_2500baseX_Full)
1717 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718 } else {
1719 if (bp->req_line_speed == SPEED_2500)
1720 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1721 else if (bp->req_line_speed == SPEED_1000)
1722 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1723 else if (bp->req_line_speed == SPEED_100) {
1724 if (bp->req_duplex == DUPLEX_FULL)
1725 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1726 else
1727 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1728 } else if (bp->req_line_speed == SPEED_10) {
1729 if (bp->req_duplex == DUPLEX_FULL)
1730 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1731 else
1732 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1733 }
1734 }
1735
1736 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1737 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1738 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1739 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1740
1741 if (port == PORT_TP)
1742 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1743 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1744
1745 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1746
1747 spin_unlock_bh(&bp->phy_lock);
1748 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1749 spin_lock_bh(&bp->phy_lock);
1750
1751 return 0;
1752}
1753
1754static int
1755bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1756__releases(&bp->phy_lock)
1757__acquires(&bp->phy_lock)
1758{
1759 u32 adv, bmcr;
1760 u32 new_adv = 0;
1761
1762 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1763 return bnx2_setup_remote_phy(bp, port);
1764
1765 if (!(bp->autoneg & AUTONEG_SPEED)) {
1766 u32 new_bmcr;
1767 int force_link_down = 0;
1768
1769 if (bp->req_line_speed == SPEED_2500) {
1770 if (!bnx2_test_and_enable_2g5(bp))
1771 force_link_down = 1;
1772 } else if (bp->req_line_speed == SPEED_1000) {
1773 if (bnx2_test_and_disable_2g5(bp))
1774 force_link_down = 1;
1775 }
1776 bnx2_read_phy(bp, bp->mii_adv, &adv);
1777 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1778
1779 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1780 new_bmcr = bmcr & ~BMCR_ANENABLE;
1781 new_bmcr |= BMCR_SPEED1000;
1782
1783 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1784 if (bp->req_line_speed == SPEED_2500)
1785 bnx2_enable_forced_2g5(bp);
1786 else if (bp->req_line_speed == SPEED_1000) {
1787 bnx2_disable_forced_2g5(bp);
1788 new_bmcr &= ~0x2000;
1789 }
1790
1791 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1792 if (bp->req_line_speed == SPEED_2500)
1793 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1794 else
1795 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1796 }
1797
1798 if (bp->req_duplex == DUPLEX_FULL) {
1799 adv |= ADVERTISE_1000XFULL;
1800 new_bmcr |= BMCR_FULLDPLX;
1801 }
1802 else {
1803 adv |= ADVERTISE_1000XHALF;
1804 new_bmcr &= ~BMCR_FULLDPLX;
1805 }
1806 if ((new_bmcr != bmcr) || (force_link_down)) {
1807 /* Force a link down visible on the other side */
1808 if (bp->link_up) {
1809 bnx2_write_phy(bp, bp->mii_adv, adv &
1810 ~(ADVERTISE_1000XFULL |
1811 ADVERTISE_1000XHALF));
1812 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1813 BMCR_ANRESTART | BMCR_ANENABLE);
1814
1815 bp->link_up = 0;
1816 netif_carrier_off(bp->dev);
1817 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1818 bnx2_report_link(bp);
1819 }
1820 bnx2_write_phy(bp, bp->mii_adv, adv);
1821 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822 } else {
1823 bnx2_resolve_flow_ctrl(bp);
1824 bnx2_set_mac_link(bp);
1825 }
1826 return 0;
1827 }
1828
1829 bnx2_test_and_enable_2g5(bp);
1830
1831 if (bp->advertising & ADVERTISED_1000baseT_Full)
1832 new_adv |= ADVERTISE_1000XFULL;
1833
1834 new_adv |= bnx2_phy_get_pause_adv(bp);
1835
1836 bnx2_read_phy(bp, bp->mii_adv, &adv);
1837 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1838
1839 bp->serdes_an_pending = 0;
1840 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1841 /* Force a link down visible on the other side */
1842 if (bp->link_up) {
1843 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1844 spin_unlock_bh(&bp->phy_lock);
1845 msleep(20);
1846 spin_lock_bh(&bp->phy_lock);
1847 }
1848
1849 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1850 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1851 BMCR_ANENABLE);
1852 /* Speed up link-up time when the link partner
1853 * does not autonegotiate which is very common
1854 * in blade servers. Some blade servers use
1855 * IPMI for kerboard input and it's important
1856 * to minimize link disruptions. Autoneg. involves
1857 * exchanging base pages plus 3 next pages and
1858 * normally completes in about 120 msec.
1859 */
1860 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1861 bp->serdes_an_pending = 1;
1862 mod_timer(&bp->timer, jiffies + bp->current_interval);
1863 } else {
1864 bnx2_resolve_flow_ctrl(bp);
1865 bnx2_set_mac_link(bp);
1866 }
1867
1868 return 0;
1869}
1870
1871#define ETHTOOL_ALL_FIBRE_SPEED \
1872 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1873 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1874 (ADVERTISED_1000baseT_Full)
1875
1876#define ETHTOOL_ALL_COPPER_SPEED \
1877 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1878 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1879 ADVERTISED_1000baseT_Full)
1880
1881#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1882 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1883
1884#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1885
1886static void
1887bnx2_set_default_remote_link(struct bnx2 *bp)
1888{
1889 u32 link;
1890
1891 if (bp->phy_port == PORT_TP)
1892 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1893 else
1894 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1895
1896 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1897 bp->req_line_speed = 0;
1898 bp->autoneg |= AUTONEG_SPEED;
1899 bp->advertising = ADVERTISED_Autoneg;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1901 bp->advertising |= ADVERTISED_10baseT_Half;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1903 bp->advertising |= ADVERTISED_10baseT_Full;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905 bp->advertising |= ADVERTISED_100baseT_Half;
1906 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1907 bp->advertising |= ADVERTISED_100baseT_Full;
1908 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1909 bp->advertising |= ADVERTISED_1000baseT_Full;
1910 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1911 bp->advertising |= ADVERTISED_2500baseX_Full;
1912 } else {
1913 bp->autoneg = 0;
1914 bp->advertising = 0;
1915 bp->req_duplex = DUPLEX_FULL;
1916 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1917 bp->req_line_speed = SPEED_10;
1918 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1919 bp->req_duplex = DUPLEX_HALF;
1920 }
1921 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1922 bp->req_line_speed = SPEED_100;
1923 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1924 bp->req_duplex = DUPLEX_HALF;
1925 }
1926 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1927 bp->req_line_speed = SPEED_1000;
1928 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1929 bp->req_line_speed = SPEED_2500;
1930 }
1931}
1932
1933static void
1934bnx2_set_default_link(struct bnx2 *bp)
1935{
1936 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1937 bnx2_set_default_remote_link(bp);
1938 return;
1939 }
1940
1941 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1942 bp->req_line_speed = 0;
1943 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1944 u32 reg;
1945
1946 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1947
1948 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1949 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1950 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1951 bp->autoneg = 0;
1952 bp->req_line_speed = bp->line_speed = SPEED_1000;
1953 bp->req_duplex = DUPLEX_FULL;
1954 }
1955 } else
1956 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1957}
1958
1959static void
1960bnx2_send_heart_beat(struct bnx2 *bp)
1961{
1962 u32 msg;
1963 u32 addr;
1964
1965 spin_lock(&bp->indirect_lock);
1966 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1967 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1968 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1969 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1970 spin_unlock(&bp->indirect_lock);
1971}
1972
1973static void
1974bnx2_remote_phy_event(struct bnx2 *bp)
1975{
1976 u32 msg;
1977 u8 link_up = bp->link_up;
1978 u8 old_port;
1979
1980 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1981
1982 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1983 bnx2_send_heart_beat(bp);
1984
1985 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1986
1987 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1988 bp->link_up = 0;
1989 else {
1990 u32 speed;
1991
1992 bp->link_up = 1;
1993 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1994 bp->duplex = DUPLEX_FULL;
1995 switch (speed) {
1996 case BNX2_LINK_STATUS_10HALF:
1997 bp->duplex = DUPLEX_HALF;
1998 fallthrough;
1999 case BNX2_LINK_STATUS_10FULL:
2000 bp->line_speed = SPEED_10;
2001 break;
2002 case BNX2_LINK_STATUS_100HALF:
2003 bp->duplex = DUPLEX_HALF;
2004 fallthrough;
2005 case BNX2_LINK_STATUS_100BASE_T4:
2006 case BNX2_LINK_STATUS_100FULL:
2007 bp->line_speed = SPEED_100;
2008 break;
2009 case BNX2_LINK_STATUS_1000HALF:
2010 bp->duplex = DUPLEX_HALF;
2011 fallthrough;
2012 case BNX2_LINK_STATUS_1000FULL:
2013 bp->line_speed = SPEED_1000;
2014 break;
2015 case BNX2_LINK_STATUS_2500HALF:
2016 bp->duplex = DUPLEX_HALF;
2017 fallthrough;
2018 case BNX2_LINK_STATUS_2500FULL:
2019 bp->line_speed = SPEED_2500;
2020 break;
2021 default:
2022 bp->line_speed = 0;
2023 break;
2024 }
2025
2026 bp->flow_ctrl = 0;
2027 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2028 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2029 if (bp->duplex == DUPLEX_FULL)
2030 bp->flow_ctrl = bp->req_flow_ctrl;
2031 } else {
2032 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2033 bp->flow_ctrl |= FLOW_CTRL_TX;
2034 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2035 bp->flow_ctrl |= FLOW_CTRL_RX;
2036 }
2037
2038 old_port = bp->phy_port;
2039 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2040 bp->phy_port = PORT_FIBRE;
2041 else
2042 bp->phy_port = PORT_TP;
2043
2044 if (old_port != bp->phy_port)
2045 bnx2_set_default_link(bp);
2046
2047 }
2048 if (bp->link_up != link_up)
2049 bnx2_report_link(bp);
2050
2051 bnx2_set_mac_link(bp);
2052}
2053
2054static int
2055bnx2_set_remote_link(struct bnx2 *bp)
2056{
2057 u32 evt_code;
2058
2059 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2060 switch (evt_code) {
2061 case BNX2_FW_EVT_CODE_LINK_EVENT:
2062 bnx2_remote_phy_event(bp);
2063 break;
2064 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2065 default:
2066 bnx2_send_heart_beat(bp);
2067 break;
2068 }
2069 return 0;
2070}
2071
2072static int
2073bnx2_setup_copper_phy(struct bnx2 *bp)
2074__releases(&bp->phy_lock)
2075__acquires(&bp->phy_lock)
2076{
2077 u32 bmcr, adv_reg, new_adv = 0;
2078 u32 new_bmcr;
2079
2080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2081
2082 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2083 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2084 ADVERTISE_PAUSE_ASYM);
2085
2086 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2087
2088 if (bp->autoneg & AUTONEG_SPEED) {
2089 u32 adv1000_reg;
2090 u32 new_adv1000 = 0;
2091
2092 new_adv |= bnx2_phy_get_pause_adv(bp);
2093
2094 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2095 adv1000_reg &= PHY_ALL_1000_SPEED;
2096
2097 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2098 if ((adv1000_reg != new_adv1000) ||
2099 (adv_reg != new_adv) ||
2100 ((bmcr & BMCR_ANENABLE) == 0)) {
2101
2102 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2104 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2105 BMCR_ANENABLE);
2106 }
2107 else if (bp->link_up) {
2108 /* Flow ctrl may have changed from auto to forced */
2109 /* or vice-versa. */
2110
2111 bnx2_resolve_flow_ctrl(bp);
2112 bnx2_set_mac_link(bp);
2113 }
2114 return 0;
2115 }
2116
2117 /* advertise nothing when forcing speed */
2118 if (adv_reg != new_adv)
2119 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2120
2121 new_bmcr = 0;
2122 if (bp->req_line_speed == SPEED_100) {
2123 new_bmcr |= BMCR_SPEED100;
2124 }
2125 if (bp->req_duplex == DUPLEX_FULL) {
2126 new_bmcr |= BMCR_FULLDPLX;
2127 }
2128 if (new_bmcr != bmcr) {
2129 u32 bmsr;
2130
2131 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133
2134 if (bmsr & BMSR_LSTATUS) {
2135 /* Force link down */
2136 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137 spin_unlock_bh(&bp->phy_lock);
2138 msleep(50);
2139 spin_lock_bh(&bp->phy_lock);
2140
2141 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143 }
2144
2145 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146
2147 /* Normally, the new speed is setup after the link has
2148 * gone down and up again. In some cases, link will not go
2149 * down so we need to set up the new speed here.
2150 */
2151 if (bmsr & BMSR_LSTATUS) {
2152 bp->line_speed = bp->req_line_speed;
2153 bp->duplex = bp->req_duplex;
2154 bnx2_resolve_flow_ctrl(bp);
2155 bnx2_set_mac_link(bp);
2156 }
2157 } else {
2158 bnx2_resolve_flow_ctrl(bp);
2159 bnx2_set_mac_link(bp);
2160 }
2161 return 0;
2162}
2163
2164static int
2165bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166__releases(&bp->phy_lock)
2167__acquires(&bp->phy_lock)
2168{
2169 if (bp->loopback == MAC_LOOPBACK)
2170 return 0;
2171
2172 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173 return bnx2_setup_serdes_phy(bp, port);
2174 }
2175 else {
2176 return bnx2_setup_copper_phy(bp);
2177 }
2178}
2179
2180static int
2181bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182{
2183 u32 val;
2184
2185 bp->mii_bmcr = MII_BMCR + 0x10;
2186 bp->mii_bmsr = MII_BMSR + 0x10;
2187 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188 bp->mii_adv = MII_ADVERTISE + 0x10;
2189 bp->mii_lpa = MII_LPA + 0x10;
2190 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191
2192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194
2195 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196 if (reset_phy)
2197 bnx2_reset_phy(bp);
2198
2199 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200
2201 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205
2206 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209 val |= BCM5708S_UP1_2G5;
2210 else
2211 val &= ~BCM5708S_UP1_2G5;
2212 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213
2214 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218
2219 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220
2221 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224
2225 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226
2227 return 0;
2228}
2229
2230static int
2231bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232{
2233 u32 val;
2234
2235 if (reset_phy)
2236 bnx2_reset_phy(bp);
2237
2238 bp->mii_up1 = BCM5708S_UP1;
2239
2240 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243
2244 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247
2248 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251
2252 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254 val |= BCM5708S_UP1_2G5;
2255 bnx2_write_phy(bp, BCM5708S_UP1, val);
2256 }
2257
2258 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2259 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2260 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2261 /* increase tx signal amplitude */
2262 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 BCM5708S_BLK_ADDR_TX_MISC);
2264 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268 }
2269
2270 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272
2273 if (val) {
2274 u32 is_backplane;
2275
2276 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279 BCM5708S_BLK_ADDR_TX_MISC);
2280 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282 BCM5708S_BLK_ADDR_DIG);
2283 }
2284 }
2285 return 0;
2286}
2287
2288static int
2289bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290{
2291 if (reset_phy)
2292 bnx2_reset_phy(bp);
2293
2294 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295
2296 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2297 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298
2299 if (bp->dev->mtu > ETH_DATA_LEN) {
2300 u32 val;
2301
2302 /* Set extended packet length bit */
2303 bnx2_write_phy(bp, 0x18, 0x7);
2304 bnx2_read_phy(bp, 0x18, &val);
2305 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306
2307 bnx2_write_phy(bp, 0x1c, 0x6c00);
2308 bnx2_read_phy(bp, 0x1c, &val);
2309 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310 }
2311 else {
2312 u32 val;
2313
2314 bnx2_write_phy(bp, 0x18, 0x7);
2315 bnx2_read_phy(bp, 0x18, &val);
2316 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317
2318 bnx2_write_phy(bp, 0x1c, 0x6c00);
2319 bnx2_read_phy(bp, 0x1c, &val);
2320 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321 }
2322
2323 return 0;
2324}
2325
2326static int
2327bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328{
2329 u32 val;
2330
2331 if (reset_phy)
2332 bnx2_reset_phy(bp);
2333
2334 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335 bnx2_write_phy(bp, 0x18, 0x0c00);
2336 bnx2_write_phy(bp, 0x17, 0x000a);
2337 bnx2_write_phy(bp, 0x15, 0x310b);
2338 bnx2_write_phy(bp, 0x17, 0x201f);
2339 bnx2_write_phy(bp, 0x15, 0x9506);
2340 bnx2_write_phy(bp, 0x17, 0x401f);
2341 bnx2_write_phy(bp, 0x15, 0x14e2);
2342 bnx2_write_phy(bp, 0x18, 0x0400);
2343 }
2344
2345 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347 MII_BNX2_DSP_EXPAND_REG | 0x8);
2348 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349 val &= ~(1 << 8);
2350 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351 }
2352
2353 if (bp->dev->mtu > ETH_DATA_LEN) {
2354 /* Set extended packet length bit */
2355 bnx2_write_phy(bp, 0x18, 0x7);
2356 bnx2_read_phy(bp, 0x18, &val);
2357 bnx2_write_phy(bp, 0x18, val | 0x4000);
2358
2359 bnx2_read_phy(bp, 0x10, &val);
2360 bnx2_write_phy(bp, 0x10, val | 0x1);
2361 }
2362 else {
2363 bnx2_write_phy(bp, 0x18, 0x7);
2364 bnx2_read_phy(bp, 0x18, &val);
2365 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366
2367 bnx2_read_phy(bp, 0x10, &val);
2368 bnx2_write_phy(bp, 0x10, val & ~0x1);
2369 }
2370
2371 /* ethernet@wirespeed */
2372 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2373 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2374 val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2375
2376 /* auto-mdix */
2377 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2378 val |= AUX_CTL_MISC_CTL_AUTOMDIX;
2379
2380 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2381 return 0;
2382}
2383
2384
2385static int
2386bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2387__releases(&bp->phy_lock)
2388__acquires(&bp->phy_lock)
2389{
2390 u32 val;
2391 int rc = 0;
2392
2393 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2394 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2395
2396 bp->mii_bmcr = MII_BMCR;
2397 bp->mii_bmsr = MII_BMSR;
2398 bp->mii_bmsr1 = MII_BMSR;
2399 bp->mii_adv = MII_ADVERTISE;
2400 bp->mii_lpa = MII_LPA;
2401
2402 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2403
2404 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2405 goto setup_phy;
2406
2407 bnx2_read_phy(bp, MII_PHYSID1, &val);
2408 bp->phy_id = val << 16;
2409 bnx2_read_phy(bp, MII_PHYSID2, &val);
2410 bp->phy_id |= val & 0xffff;
2411
2412 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2413 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2414 rc = bnx2_init_5706s_phy(bp, reset_phy);
2415 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2416 rc = bnx2_init_5708s_phy(bp, reset_phy);
2417 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2418 rc = bnx2_init_5709s_phy(bp, reset_phy);
2419 }
2420 else {
2421 rc = bnx2_init_copper_phy(bp, reset_phy);
2422 }
2423
2424setup_phy:
2425 if (!rc)
2426 rc = bnx2_setup_phy(bp, bp->phy_port);
2427
2428 return rc;
2429}
2430
2431static int
2432bnx2_set_mac_loopback(struct bnx2 *bp)
2433{
2434 u32 mac_mode;
2435
2436 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2437 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2438 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2439 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440 bp->link_up = 1;
2441 return 0;
2442}
2443
2444static int bnx2_test_link(struct bnx2 *);
2445
2446static int
2447bnx2_set_phy_loopback(struct bnx2 *bp)
2448{
2449 u32 mac_mode;
2450 int rc, i;
2451
2452 spin_lock_bh(&bp->phy_lock);
2453 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2454 BMCR_SPEED1000);
2455 spin_unlock_bh(&bp->phy_lock);
2456 if (rc)
2457 return rc;
2458
2459 for (i = 0; i < 10; i++) {
2460 if (bnx2_test_link(bp) == 0)
2461 break;
2462 msleep(100);
2463 }
2464
2465 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2466 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2467 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2468 BNX2_EMAC_MODE_25G_MODE);
2469
2470 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2471 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2472 bp->link_up = 1;
2473 return 0;
2474}
2475
2476static void
2477bnx2_dump_mcp_state(struct bnx2 *bp)
2478{
2479 struct net_device *dev = bp->dev;
2480 u32 mcp_p0, mcp_p1;
2481
2482 netdev_err(dev, "<--- start MCP states dump --->\n");
2483 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2484 mcp_p0 = BNX2_MCP_STATE_P0;
2485 mcp_p1 = BNX2_MCP_STATE_P1;
2486 } else {
2487 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2488 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2489 }
2490 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2491 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2492 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2493 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2494 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2495 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2496 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2497 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2499 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2500 netdev_err(dev, "DEBUG: shmem states:\n");
2501 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2502 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2503 bnx2_shmem_rd(bp, BNX2_FW_MB),
2504 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2505 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2506 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2507 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2508 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2509 pr_cont(" condition[%08x]\n",
2510 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2511 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2512 DP_SHMEM_LINE(bp, 0x3cc);
2513 DP_SHMEM_LINE(bp, 0x3dc);
2514 DP_SHMEM_LINE(bp, 0x3ec);
2515 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2516 netdev_err(dev, "<--- end MCP states dump --->\n");
2517}
2518
2519static int
2520bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2521{
2522 int i;
2523 u32 val;
2524
2525 bp->fw_wr_seq++;
2526 msg_data |= bp->fw_wr_seq;
2527 bp->fw_last_msg = msg_data;
2528
2529 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2530
2531 if (!ack)
2532 return 0;
2533
2534 /* wait for an acknowledgement. */
2535 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2536 msleep(10);
2537
2538 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2539
2540 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2541 break;
2542 }
2543 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2544 return 0;
2545
2546 /* If we timed out, inform the firmware that this is the case. */
2547 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2548 msg_data &= ~BNX2_DRV_MSG_CODE;
2549 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2550
2551 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2552 if (!silent) {
2553 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2554 bnx2_dump_mcp_state(bp);
2555 }
2556
2557 return -EBUSY;
2558 }
2559
2560 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2561 return -EIO;
2562
2563 return 0;
2564}
2565
2566static int
2567bnx2_init_5709_context(struct bnx2 *bp)
2568{
2569 int i, ret = 0;
2570 u32 val;
2571
2572 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2573 val |= (BNX2_PAGE_BITS - 8) << 16;
2574 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2575 for (i = 0; i < 10; i++) {
2576 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2577 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2578 break;
2579 udelay(2);
2580 }
2581 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2582 return -EBUSY;
2583
2584 for (i = 0; i < bp->ctx_pages; i++) {
2585 int j;
2586
2587 if (bp->ctx_blk[i])
2588 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2589 else
2590 return -ENOMEM;
2591
2592 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2593 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2594 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2595 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2596 (u64) bp->ctx_blk_mapping[i] >> 32);
2597 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2598 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2599 for (j = 0; j < 10; j++) {
2600
2601 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2602 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2603 break;
2604 udelay(5);
2605 }
2606 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2607 ret = -EBUSY;
2608 break;
2609 }
2610 }
2611 return ret;
2612}
2613
2614static void
2615bnx2_init_context(struct bnx2 *bp)
2616{
2617 u32 vcid;
2618
2619 vcid = 96;
2620 while (vcid) {
2621 u32 vcid_addr, pcid_addr, offset;
2622 int i;
2623
2624 vcid--;
2625
2626 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2627 u32 new_vcid;
2628
2629 vcid_addr = GET_PCID_ADDR(vcid);
2630 if (vcid & 0x8) {
2631 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2632 }
2633 else {
2634 new_vcid = vcid;
2635 }
2636 pcid_addr = GET_PCID_ADDR(new_vcid);
2637 }
2638 else {
2639 vcid_addr = GET_CID_ADDR(vcid);
2640 pcid_addr = vcid_addr;
2641 }
2642
2643 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2644 vcid_addr += (i << PHY_CTX_SHIFT);
2645 pcid_addr += (i << PHY_CTX_SHIFT);
2646
2647 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2648 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2649
2650 /* Zero out the context. */
2651 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2652 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2653 }
2654 }
2655}
2656
2657static int
2658bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2659{
2660 u16 *good_mbuf;
2661 u32 good_mbuf_cnt;
2662 u32 val;
2663
2664 good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2665 if (!good_mbuf)
2666 return -ENOMEM;
2667
2668 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2669 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2670
2671 good_mbuf_cnt = 0;
2672
2673 /* Allocate a bunch of mbufs and save the good ones in an array. */
2674 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2676 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2677 BNX2_RBUF_COMMAND_ALLOC_REQ);
2678
2679 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2680
2681 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2682
2683 /* The addresses with Bit 9 set are bad memory blocks. */
2684 if (!(val & (1 << 9))) {
2685 good_mbuf[good_mbuf_cnt] = (u16) val;
2686 good_mbuf_cnt++;
2687 }
2688
2689 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2690 }
2691
2692 /* Free the good ones back to the mbuf pool thus discarding
2693 * all the bad ones. */
2694 while (good_mbuf_cnt) {
2695 good_mbuf_cnt--;
2696
2697 val = good_mbuf[good_mbuf_cnt];
2698 val = (val << 9) | val | 1;
2699
2700 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2701 }
2702 kfree(good_mbuf);
2703 return 0;
2704}
2705
2706static void
2707bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
2708{
2709 u32 val;
2710
2711 val = (mac_addr[0] << 8) | mac_addr[1];
2712
2713 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2714
2715 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2716 (mac_addr[4] << 8) | mac_addr[5];
2717
2718 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2719}
2720
2721static inline int
2722bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2723{
2724 dma_addr_t mapping;
2725 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2726 struct bnx2_rx_bd *rxbd =
2727 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2728 struct page *page = alloc_page(gfp);
2729
2730 if (!page)
2731 return -ENOMEM;
2732 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2733 DMA_FROM_DEVICE);
2734 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2735 __free_page(page);
2736 return -EIO;
2737 }
2738
2739 rx_pg->page = page;
2740 dma_unmap_addr_set(rx_pg, mapping, mapping);
2741 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743 return 0;
2744}
2745
2746static void
2747bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2748{
2749 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2750 struct page *page = rx_pg->page;
2751
2752 if (!page)
2753 return;
2754
2755 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2756 PAGE_SIZE, DMA_FROM_DEVICE);
2757
2758 __free_page(page);
2759 rx_pg->page = NULL;
2760}
2761
2762static inline int
2763bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2764{
2765 u8 *data;
2766 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2767 dma_addr_t mapping;
2768 struct bnx2_rx_bd *rxbd =
2769 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2770
2771 data = kmalloc(bp->rx_buf_size, gfp);
2772 if (!data)
2773 return -ENOMEM;
2774
2775 mapping = dma_map_single(&bp->pdev->dev,
2776 get_l2_fhdr(data),
2777 bp->rx_buf_use_size,
2778 DMA_FROM_DEVICE);
2779 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2780 kfree(data);
2781 return -EIO;
2782 }
2783
2784 rx_buf->data = data;
2785 dma_unmap_addr_set(rx_buf, mapping, mapping);
2786
2787 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2788 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2789
2790 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2791
2792 return 0;
2793}
2794
2795static int
2796bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2797{
2798 struct status_block *sblk = bnapi->status_blk.msi;
2799 u32 new_link_state, old_link_state;
2800 int is_set = 1;
2801
2802 new_link_state = sblk->status_attn_bits & event;
2803 old_link_state = sblk->status_attn_bits_ack & event;
2804 if (new_link_state != old_link_state) {
2805 if (new_link_state)
2806 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2807 else
2808 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2809 } else
2810 is_set = 0;
2811
2812 return is_set;
2813}
2814
2815static void
2816bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2817{
2818 spin_lock(&bp->phy_lock);
2819
2820 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2821 bnx2_set_link(bp);
2822 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2823 bnx2_set_remote_link(bp);
2824
2825 spin_unlock(&bp->phy_lock);
2826
2827}
2828
2829static inline u16
2830bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2831{
2832 u16 cons;
2833
2834 cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2835
2836 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2837 cons++;
2838 return cons;
2839}
2840
2841static int
2842bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2843{
2844 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2845 u16 hw_cons, sw_cons, sw_ring_cons;
2846 int tx_pkt = 0, index;
2847 unsigned int tx_bytes = 0;
2848 struct netdev_queue *txq;
2849
2850 index = (bnapi - bp->bnx2_napi);
2851 txq = netdev_get_tx_queue(bp->dev, index);
2852
2853 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854 sw_cons = txr->tx_cons;
2855
2856 while (sw_cons != hw_cons) {
2857 struct bnx2_sw_tx_bd *tx_buf;
2858 struct sk_buff *skb;
2859 int i, last;
2860
2861 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2862
2863 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2864 skb = tx_buf->skb;
2865
2866 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2867 prefetch(&skb->end);
2868
2869 /* partial BD completions possible with TSO packets */
2870 if (tx_buf->is_gso) {
2871 u16 last_idx, last_ring_idx;
2872
2873 last_idx = sw_cons + tx_buf->nr_frags + 1;
2874 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2875 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2876 last_idx++;
2877 }
2878 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2879 break;
2880 }
2881 }
2882
2883 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2884 skb_headlen(skb), DMA_TO_DEVICE);
2885
2886 tx_buf->skb = NULL;
2887 last = tx_buf->nr_frags;
2888
2889 for (i = 0; i < last; i++) {
2890 struct bnx2_sw_tx_bd *tx_buf;
2891
2892 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2893
2894 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2895 dma_unmap_page(&bp->pdev->dev,
2896 dma_unmap_addr(tx_buf, mapping),
2897 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2898 DMA_TO_DEVICE);
2899 }
2900
2901 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902
2903 tx_bytes += skb->len;
2904 dev_kfree_skb_any(skb);
2905 tx_pkt++;
2906 if (tx_pkt == budget)
2907 break;
2908
2909 if (hw_cons == sw_cons)
2910 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2911 }
2912
2913 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2914 txr->hw_tx_cons = hw_cons;
2915 txr->tx_cons = sw_cons;
2916
2917 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2918 * before checking for netif_tx_queue_stopped(). Without the
2919 * memory barrier, there is a small possibility that bnx2_start_xmit()
2920 * will miss it and cause the queue to be stopped forever.
2921 */
2922 smp_mb();
2923
2924 if (unlikely(netif_tx_queue_stopped(txq)) &&
2925 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2926 __netif_tx_lock(txq, smp_processor_id());
2927 if ((netif_tx_queue_stopped(txq)) &&
2928 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2929 netif_tx_wake_queue(txq);
2930 __netif_tx_unlock(txq);
2931 }
2932
2933 return tx_pkt;
2934}
2935
2936static void
2937bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2938 struct sk_buff *skb, int count)
2939{
2940 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2941 struct bnx2_rx_bd *cons_bd, *prod_bd;
2942 int i;
2943 u16 hw_prod, prod;
2944 u16 cons = rxr->rx_pg_cons;
2945
2946 cons_rx_pg = &rxr->rx_pg_ring[cons];
2947
2948 /* The caller was unable to allocate a new page to replace the
2949 * last one in the frags array, so we need to recycle that page
2950 * and then free the skb.
2951 */
2952 if (skb) {
2953 struct page *page;
2954 struct skb_shared_info *shinfo;
2955
2956 shinfo = skb_shinfo(skb);
2957 shinfo->nr_frags--;
2958 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2959
2960 cons_rx_pg->page = page;
2961 dev_kfree_skb(skb);
2962 }
2963
2964 hw_prod = rxr->rx_pg_prod;
2965
2966 for (i = 0; i < count; i++) {
2967 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2968
2969 prod_rx_pg = &rxr->rx_pg_ring[prod];
2970 cons_rx_pg = &rxr->rx_pg_ring[cons];
2971 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2972 [BNX2_RX_IDX(cons)];
2973 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2974 [BNX2_RX_IDX(prod)];
2975
2976 if (prod != cons) {
2977 prod_rx_pg->page = cons_rx_pg->page;
2978 cons_rx_pg->page = NULL;
2979 dma_unmap_addr_set(prod_rx_pg, mapping,
2980 dma_unmap_addr(cons_rx_pg, mapping));
2981
2982 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2983 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2984
2985 }
2986 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2987 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2988 }
2989 rxr->rx_pg_prod = hw_prod;
2990 rxr->rx_pg_cons = cons;
2991}
2992
2993static inline void
2994bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2995 u8 *data, u16 cons, u16 prod)
2996{
2997 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2998 struct bnx2_rx_bd *cons_bd, *prod_bd;
2999
3000 cons_rx_buf = &rxr->rx_buf_ring[cons];
3001 prod_rx_buf = &rxr->rx_buf_ring[prod];
3002
3003 dma_sync_single_for_device(&bp->pdev->dev,
3004 dma_unmap_addr(cons_rx_buf, mapping),
3005 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
3006
3007 rxr->rx_prod_bseq += bp->rx_buf_use_size;
3008
3009 prod_rx_buf->data = data;
3010
3011 if (cons == prod)
3012 return;
3013
3014 dma_unmap_addr_set(prod_rx_buf, mapping,
3015 dma_unmap_addr(cons_rx_buf, mapping));
3016
3017 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3018 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3019 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3020 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3021}
3022
3023static struct sk_buff *
3024bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3025 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3026 u32 ring_idx)
3027{
3028 int err;
3029 u16 prod = ring_idx & 0xffff;
3030 struct sk_buff *skb;
3031
3032 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3033 if (unlikely(err)) {
3034 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3035error:
3036 if (hdr_len) {
3037 unsigned int raw_len = len + 4;
3038 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3039
3040 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3041 }
3042 return NULL;
3043 }
3044
3045 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3046 DMA_FROM_DEVICE);
3047 skb = slab_build_skb(data);
3048 if (!skb) {
3049 kfree(data);
3050 goto error;
3051 }
3052 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3053 if (hdr_len == 0) {
3054 skb_put(skb, len);
3055 return skb;
3056 } else {
3057 unsigned int i, frag_len, frag_size, pages;
3058 struct bnx2_sw_pg *rx_pg;
3059 u16 pg_cons = rxr->rx_pg_cons;
3060 u16 pg_prod = rxr->rx_pg_prod;
3061
3062 frag_size = len + 4 - hdr_len;
3063 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3064 skb_put(skb, hdr_len);
3065
3066 for (i = 0; i < pages; i++) {
3067 dma_addr_t mapping_old;
3068
3069 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3070 if (unlikely(frag_len <= 4)) {
3071 unsigned int tail = 4 - frag_len;
3072
3073 rxr->rx_pg_cons = pg_cons;
3074 rxr->rx_pg_prod = pg_prod;
3075 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3076 pages - i);
3077 skb->len -= tail;
3078 if (i == 0) {
3079 skb->tail -= tail;
3080 } else {
3081 skb_frag_t *frag =
3082 &skb_shinfo(skb)->frags[i - 1];
3083 skb_frag_size_sub(frag, tail);
3084 skb->data_len -= tail;
3085 }
3086 return skb;
3087 }
3088 rx_pg = &rxr->rx_pg_ring[pg_cons];
3089
3090 /* Don't unmap yet. If we're unable to allocate a new
3091 * page, we need to recycle the page and the DMA addr.
3092 */
3093 mapping_old = dma_unmap_addr(rx_pg, mapping);
3094 if (i == pages - 1)
3095 frag_len -= 4;
3096
3097 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3098 rx_pg->page = NULL;
3099
3100 err = bnx2_alloc_rx_page(bp, rxr,
3101 BNX2_RX_PG_RING_IDX(pg_prod),
3102 GFP_ATOMIC);
3103 if (unlikely(err)) {
3104 rxr->rx_pg_cons = pg_cons;
3105 rxr->rx_pg_prod = pg_prod;
3106 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3107 pages - i);
3108 return NULL;
3109 }
3110
3111 dma_unmap_page(&bp->pdev->dev, mapping_old,
3112 PAGE_SIZE, DMA_FROM_DEVICE);
3113
3114 frag_size -= frag_len;
3115 skb->data_len += frag_len;
3116 skb->truesize += PAGE_SIZE;
3117 skb->len += frag_len;
3118
3119 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3120 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3121 }
3122 rxr->rx_pg_prod = pg_prod;
3123 rxr->rx_pg_cons = pg_cons;
3124 }
3125 return skb;
3126}
3127
3128static inline u16
3129bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3130{
3131 u16 cons;
3132
3133 cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3134
3135 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3136 cons++;
3137 return cons;
3138}
3139
3140static int
3141bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3142{
3143 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3144 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3145 struct l2_fhdr *rx_hdr;
3146 int rx_pkt = 0, pg_ring_used = 0;
3147
3148 if (budget <= 0)
3149 return rx_pkt;
3150
3151 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3152 sw_cons = rxr->rx_cons;
3153 sw_prod = rxr->rx_prod;
3154
3155 /* Memory barrier necessary as speculative reads of the rx
3156 * buffer can be ahead of the index in the status block
3157 */
3158 rmb();
3159 while (sw_cons != hw_cons) {
3160 unsigned int len, hdr_len;
3161 u32 status;
3162 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3163 struct sk_buff *skb;
3164 dma_addr_t dma_addr;
3165 u8 *data;
3166 u16 next_ring_idx;
3167
3168 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3169 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3170
3171 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3172 data = rx_buf->data;
3173 rx_buf->data = NULL;
3174
3175 rx_hdr = get_l2_fhdr(data);
3176 prefetch(rx_hdr);
3177
3178 dma_addr = dma_unmap_addr(rx_buf, mapping);
3179
3180 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3181 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3182 DMA_FROM_DEVICE);
3183
3184 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3185 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3186 prefetch(get_l2_fhdr(next_rx_buf->data));
3187
3188 len = rx_hdr->l2_fhdr_pkt_len;
3189 status = rx_hdr->l2_fhdr_status;
3190
3191 hdr_len = 0;
3192 if (status & L2_FHDR_STATUS_SPLIT) {
3193 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3194 pg_ring_used = 1;
3195 } else if (len > bp->rx_jumbo_thresh) {
3196 hdr_len = bp->rx_jumbo_thresh;
3197 pg_ring_used = 1;
3198 }
3199
3200 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3201 L2_FHDR_ERRORS_PHY_DECODE |
3202 L2_FHDR_ERRORS_ALIGNMENT |
3203 L2_FHDR_ERRORS_TOO_SHORT |
3204 L2_FHDR_ERRORS_GIANT_FRAME))) {
3205
3206 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3207 sw_ring_prod);
3208 if (pg_ring_used) {
3209 int pages;
3210
3211 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3212
3213 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3214 }
3215 goto next_rx;
3216 }
3217
3218 len -= 4;
3219
3220 if (len <= bp->rx_copy_thresh) {
3221 skb = netdev_alloc_skb(bp->dev, len + 6);
3222 if (!skb) {
3223 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3224 sw_ring_prod);
3225 goto next_rx;
3226 }
3227
3228 /* aligned copy */
3229 memcpy(skb->data,
3230 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3231 len + 6);
3232 skb_reserve(skb, 6);
3233 skb_put(skb, len);
3234
3235 bnx2_reuse_rx_data(bp, rxr, data,
3236 sw_ring_cons, sw_ring_prod);
3237
3238 } else {
3239 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3240 (sw_ring_cons << 16) | sw_ring_prod);
3241 if (!skb)
3242 goto next_rx;
3243 }
3244 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3245 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3246 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3247
3248 skb->protocol = eth_type_trans(skb, bp->dev);
3249
3250 if (len > (bp->dev->mtu + ETH_HLEN) &&
3251 skb->protocol != htons(0x8100) &&
3252 skb->protocol != htons(ETH_P_8021AD)) {
3253
3254 dev_kfree_skb(skb);
3255 goto next_rx;
3256
3257 }
3258
3259 skb_checksum_none_assert(skb);
3260 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3261 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3262 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3263
3264 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3265 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3266 skb->ip_summed = CHECKSUM_UNNECESSARY;
3267 }
3268 if ((bp->dev->features & NETIF_F_RXHASH) &&
3269 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3270 L2_FHDR_STATUS_USE_RXHASH))
3271 skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3272 PKT_HASH_TYPE_L3);
3273
3274 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3275 napi_gro_receive(&bnapi->napi, skb);
3276 rx_pkt++;
3277
3278next_rx:
3279 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3280 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3281
3282 if (rx_pkt == budget)
3283 break;
3284
3285 /* Refresh hw_cons to see if there is new work */
3286 if (sw_cons == hw_cons) {
3287 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3288 rmb();
3289 }
3290 }
3291 rxr->rx_cons = sw_cons;
3292 rxr->rx_prod = sw_prod;
3293
3294 if (pg_ring_used)
3295 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3296
3297 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3298
3299 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3300
3301 return rx_pkt;
3302
3303}
3304
3305/* MSI ISR - The only difference between this and the INTx ISR
3306 * is that the MSI interrupt is always serviced.
3307 */
3308static irqreturn_t
3309bnx2_msi(int irq, void *dev_instance)
3310{
3311 struct bnx2_napi *bnapi = dev_instance;
3312 struct bnx2 *bp = bnapi->bp;
3313
3314 prefetch(bnapi->status_blk.msi);
3315 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3316 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3317 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3318
3319 /* Return here if interrupt is disabled. */
3320 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3321 return IRQ_HANDLED;
3322
3323 napi_schedule(&bnapi->napi);
3324
3325 return IRQ_HANDLED;
3326}
3327
3328static irqreturn_t
3329bnx2_msi_1shot(int irq, void *dev_instance)
3330{
3331 struct bnx2_napi *bnapi = dev_instance;
3332 struct bnx2 *bp = bnapi->bp;
3333
3334 prefetch(bnapi->status_blk.msi);
3335
3336 /* Return here if interrupt is disabled. */
3337 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3338 return IRQ_HANDLED;
3339
3340 napi_schedule(&bnapi->napi);
3341
3342 return IRQ_HANDLED;
3343}
3344
3345static irqreturn_t
3346bnx2_interrupt(int irq, void *dev_instance)
3347{
3348 struct bnx2_napi *bnapi = dev_instance;
3349 struct bnx2 *bp = bnapi->bp;
3350 struct status_block *sblk = bnapi->status_blk.msi;
3351
3352 /* When using INTx, it is possible for the interrupt to arrive
3353 * at the CPU before the status block posted prior to the
3354 * interrupt. Reading a register will flush the status block.
3355 * When using MSI, the MSI message will always complete after
3356 * the status block write.
3357 */
3358 if ((sblk->status_idx == bnapi->last_status_idx) &&
3359 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3360 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3361 return IRQ_NONE;
3362
3363 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3364 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3365 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3366
3367 /* Read back to deassert IRQ immediately to avoid too many
3368 * spurious interrupts.
3369 */
3370 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3371
3372 /* Return here if interrupt is shared and is disabled. */
3373 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3374 return IRQ_HANDLED;
3375
3376 if (napi_schedule_prep(&bnapi->napi)) {
3377 bnapi->last_status_idx = sblk->status_idx;
3378 __napi_schedule(&bnapi->napi);
3379 }
3380
3381 return IRQ_HANDLED;
3382}
3383
3384static inline int
3385bnx2_has_fast_work(struct bnx2_napi *bnapi)
3386{
3387 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3388 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3389
3390 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3391 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3392 return 1;
3393 return 0;
3394}
3395
3396#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3397 STATUS_ATTN_BITS_TIMER_ABORT)
3398
3399static inline int
3400bnx2_has_work(struct bnx2_napi *bnapi)
3401{
3402 struct status_block *sblk = bnapi->status_blk.msi;
3403
3404 if (bnx2_has_fast_work(bnapi))
3405 return 1;
3406
3407#ifdef BCM_CNIC
3408 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3409 return 1;
3410#endif
3411
3412 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3413 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3414 return 1;
3415
3416 return 0;
3417}
3418
3419static void
3420bnx2_chk_missed_msi(struct bnx2 *bp)
3421{
3422 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3423 u32 msi_ctrl;
3424
3425 if (bnx2_has_work(bnapi)) {
3426 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3427 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3428 return;
3429
3430 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3431 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3432 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3433 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3434 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3435 }
3436 }
3437
3438 bp->idle_chk_status_idx = bnapi->last_status_idx;
3439}
3440
3441#ifdef BCM_CNIC
3442static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3443{
3444 struct cnic_ops *c_ops;
3445
3446 if (!bnapi->cnic_present)
3447 return;
3448
3449 rcu_read_lock();
3450 c_ops = rcu_dereference(bp->cnic_ops);
3451 if (c_ops)
3452 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3453 bnapi->status_blk.msi);
3454 rcu_read_unlock();
3455}
3456#endif
3457
3458static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3459{
3460 struct status_block *sblk = bnapi->status_blk.msi;
3461 u32 status_attn_bits = sblk->status_attn_bits;
3462 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3463
3464 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3465 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3466
3467 bnx2_phy_int(bp, bnapi);
3468
3469 /* This is needed to take care of transient status
3470 * during link changes.
3471 */
3472 BNX2_WR(bp, BNX2_HC_COMMAND,
3473 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3474 BNX2_RD(bp, BNX2_HC_COMMAND);
3475 }
3476}
3477
3478static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3479 int work_done, int budget)
3480{
3481 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3482 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3483
3484 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3485 bnx2_tx_int(bp, bnapi, 0);
3486
3487 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3488 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3489
3490 return work_done;
3491}
3492
3493static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3494{
3495 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3496 struct bnx2 *bp = bnapi->bp;
3497 int work_done = 0;
3498 struct status_block_msix *sblk = bnapi->status_blk.msix;
3499
3500 while (1) {
3501 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3502 if (unlikely(work_done >= budget))
3503 break;
3504
3505 bnapi->last_status_idx = sblk->status_idx;
3506 /* status idx must be read before checking for more work. */
3507 rmb();
3508 if (likely(!bnx2_has_fast_work(bnapi))) {
3509
3510 napi_complete_done(napi, work_done);
3511 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3512 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3513 bnapi->last_status_idx);
3514 break;
3515 }
3516 }
3517 return work_done;
3518}
3519
3520static int bnx2_poll(struct napi_struct *napi, int budget)
3521{
3522 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3523 struct bnx2 *bp = bnapi->bp;
3524 int work_done = 0;
3525 struct status_block *sblk = bnapi->status_blk.msi;
3526
3527 while (1) {
3528 bnx2_poll_link(bp, bnapi);
3529
3530 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3531
3532#ifdef BCM_CNIC
3533 bnx2_poll_cnic(bp, bnapi);
3534#endif
3535
3536 /* bnapi->last_status_idx is used below to tell the hw how
3537 * much work has been processed, so we must read it before
3538 * checking for more work.
3539 */
3540 bnapi->last_status_idx = sblk->status_idx;
3541
3542 if (unlikely(work_done >= budget))
3543 break;
3544
3545 rmb();
3546 if (likely(!bnx2_has_work(bnapi))) {
3547 napi_complete_done(napi, work_done);
3548 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3549 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3550 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3551 bnapi->last_status_idx);
3552 break;
3553 }
3554 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3555 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3556 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3557 bnapi->last_status_idx);
3558
3559 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3560 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3561 bnapi->last_status_idx);
3562 break;
3563 }
3564 }
3565
3566 return work_done;
3567}
3568
3569/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3570 * from set_multicast.
3571 */
3572static void
3573bnx2_set_rx_mode(struct net_device *dev)
3574{
3575 struct bnx2 *bp = netdev_priv(dev);
3576 u32 rx_mode, sort_mode;
3577 struct netdev_hw_addr *ha;
3578 int i;
3579
3580 if (!netif_running(dev))
3581 return;
3582
3583 spin_lock_bh(&bp->phy_lock);
3584
3585 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3586 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3587 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3588 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3589 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3590 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3591 if (dev->flags & IFF_PROMISC) {
3592 /* Promiscuous mode. */
3593 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3594 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3595 BNX2_RPM_SORT_USER0_PROM_VLAN;
3596 }
3597 else if (dev->flags & IFF_ALLMULTI) {
3598 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3599 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3600 0xffffffff);
3601 }
3602 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3603 }
3604 else {
3605 /* Accept one or more multicast(s). */
3606 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3607 u32 regidx;
3608 u32 bit;
3609 u32 crc;
3610
3611 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3612
3613 netdev_for_each_mc_addr(ha, dev) {
3614 crc = ether_crc_le(ETH_ALEN, ha->addr);
3615 bit = crc & 0xff;
3616 regidx = (bit & 0xe0) >> 5;
3617 bit &= 0x1f;
3618 mc_filter[regidx] |= (1 << bit);
3619 }
3620
3621 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3622 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3623 mc_filter[i]);
3624 }
3625
3626 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3627 }
3628
3629 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3630 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3631 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3632 BNX2_RPM_SORT_USER0_PROM_VLAN;
3633 } else if (!(dev->flags & IFF_PROMISC)) {
3634 /* Add all entries into to the match filter list */
3635 i = 0;
3636 netdev_for_each_uc_addr(ha, dev) {
3637 bnx2_set_mac_addr(bp, ha->addr,
3638 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3639 sort_mode |= (1 <<
3640 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3641 i++;
3642 }
3643
3644 }
3645
3646 if (rx_mode != bp->rx_mode) {
3647 bp->rx_mode = rx_mode;
3648 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3649 }
3650
3651 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3652 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3653 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3654
3655 spin_unlock_bh(&bp->phy_lock);
3656}
3657
3658static int
3659check_fw_section(const struct firmware *fw,
3660 const struct bnx2_fw_file_section *section,
3661 u32 alignment, bool non_empty)
3662{
3663 u32 offset = be32_to_cpu(section->offset);
3664 u32 len = be32_to_cpu(section->len);
3665
3666 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3667 return -EINVAL;
3668 if ((non_empty && len == 0) || len > fw->size - offset ||
3669 len & (alignment - 1))
3670 return -EINVAL;
3671 return 0;
3672}
3673
3674static int
3675check_mips_fw_entry(const struct firmware *fw,
3676 const struct bnx2_mips_fw_file_entry *entry)
3677{
3678 if (check_fw_section(fw, &entry->text, 4, true) ||
3679 check_fw_section(fw, &entry->data, 4, false) ||
3680 check_fw_section(fw, &entry->rodata, 4, false))
3681 return -EINVAL;
3682 return 0;
3683}
3684
3685static void bnx2_release_firmware(struct bnx2 *bp)
3686{
3687 if (bp->rv2p_firmware) {
3688 release_firmware(bp->mips_firmware);
3689 release_firmware(bp->rv2p_firmware);
3690 bp->rv2p_firmware = NULL;
3691 }
3692}
3693
3694static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3695{
3696 const char *mips_fw_file, *rv2p_fw_file;
3697 const struct bnx2_mips_fw_file *mips_fw;
3698 const struct bnx2_rv2p_fw_file *rv2p_fw;
3699 int rc;
3700
3701 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3702 mips_fw_file = FW_MIPS_FILE_09;
3703 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3704 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3705 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3706 else
3707 rv2p_fw_file = FW_RV2P_FILE_09;
3708 } else {
3709 mips_fw_file = FW_MIPS_FILE_06;
3710 rv2p_fw_file = FW_RV2P_FILE_06;
3711 }
3712
3713 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3714 if (rc) {
3715 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3716 goto out;
3717 }
3718
3719 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3720 if (rc) {
3721 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3722 goto err_release_mips_firmware;
3723 }
3724 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3725 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3726 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3727 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3728 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3729 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3730 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3731 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3732 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3733 rc = -EINVAL;
3734 goto err_release_firmware;
3735 }
3736 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3737 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3738 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3739 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3740 rc = -EINVAL;
3741 goto err_release_firmware;
3742 }
3743out:
3744 return rc;
3745
3746err_release_firmware:
3747 release_firmware(bp->rv2p_firmware);
3748 bp->rv2p_firmware = NULL;
3749err_release_mips_firmware:
3750 release_firmware(bp->mips_firmware);
3751 goto out;
3752}
3753
3754static int bnx2_request_firmware(struct bnx2 *bp)
3755{
3756 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3757}
3758
3759static u32
3760rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3761{
3762 switch (idx) {
3763 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3764 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3765 rv2p_code |= RV2P_BD_PAGE_SIZE;
3766 break;
3767 }
3768 return rv2p_code;
3769}
3770
3771static int
3772load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3773 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3774{
3775 u32 rv2p_code_len, file_offset;
3776 __be32 *rv2p_code;
3777 int i;
3778 u32 val, cmd, addr;
3779
3780 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3781 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3782
3783 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3784
3785 if (rv2p_proc == RV2P_PROC1) {
3786 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3787 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3788 } else {
3789 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3790 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3791 }
3792
3793 for (i = 0; i < rv2p_code_len; i += 8) {
3794 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3795 rv2p_code++;
3796 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3797 rv2p_code++;
3798
3799 val = (i / 8) | cmd;
3800 BNX2_WR(bp, addr, val);
3801 }
3802
3803 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3804 for (i = 0; i < 8; i++) {
3805 u32 loc, code;
3806
3807 loc = be32_to_cpu(fw_entry->fixup[i]);
3808 if (loc && ((loc * 4) < rv2p_code_len)) {
3809 code = be32_to_cpu(*(rv2p_code + loc - 1));
3810 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3811 code = be32_to_cpu(*(rv2p_code + loc));
3812 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3813 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3814
3815 val = (loc / 2) | cmd;
3816 BNX2_WR(bp, addr, val);
3817 }
3818 }
3819
3820 /* Reset the processor, un-stall is done later. */
3821 if (rv2p_proc == RV2P_PROC1) {
3822 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3823 }
3824 else {
3825 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3826 }
3827
3828 return 0;
3829}
3830
3831static void
3832load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3833 const struct bnx2_mips_fw_file_entry *fw_entry)
3834{
3835 u32 addr, len, file_offset;
3836 __be32 *data;
3837 u32 offset;
3838 u32 val;
3839
3840 /* Halt the CPU. */
3841 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3842 val |= cpu_reg->mode_value_halt;
3843 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3844 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3845
3846 /* Load the Text area. */
3847 addr = be32_to_cpu(fw_entry->text.addr);
3848 len = be32_to_cpu(fw_entry->text.len);
3849 file_offset = be32_to_cpu(fw_entry->text.offset);
3850 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3851
3852 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3853 if (len) {
3854 int j;
3855
3856 for (j = 0; j < (len / 4); j++, offset += 4)
3857 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3858 }
3859
3860 /* Load the Data area. */
3861 addr = be32_to_cpu(fw_entry->data.addr);
3862 len = be32_to_cpu(fw_entry->data.len);
3863 file_offset = be32_to_cpu(fw_entry->data.offset);
3864 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3865
3866 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3867 if (len) {
3868 int j;
3869
3870 for (j = 0; j < (len / 4); j++, offset += 4)
3871 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3872 }
3873
3874 /* Load the Read-Only area. */
3875 addr = be32_to_cpu(fw_entry->rodata.addr);
3876 len = be32_to_cpu(fw_entry->rodata.len);
3877 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3878 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3879
3880 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3881 if (len) {
3882 int j;
3883
3884 for (j = 0; j < (len / 4); j++, offset += 4)
3885 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3886 }
3887
3888 /* Clear the pre-fetch instruction. */
3889 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3890
3891 val = be32_to_cpu(fw_entry->start_addr);
3892 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3893
3894 /* Start the CPU. */
3895 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3896 val &= ~cpu_reg->mode_value_halt;
3897 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3898 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3899}
3900
3901static void
3902bnx2_init_cpus(struct bnx2 *bp)
3903{
3904 const struct bnx2_mips_fw_file *mips_fw =
3905 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3906 const struct bnx2_rv2p_fw_file *rv2p_fw =
3907 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3908
3909 /* Initialize the RV2P processor. */
3910 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3911 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3912
3913 /* Initialize the RX Processor. */
3914 load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3915
3916 /* Initialize the TX Processor. */
3917 load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3918
3919 /* Initialize the TX Patch-up Processor. */
3920 load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3921
3922 /* Initialize the Completion Processor. */
3923 load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3924
3925 /* Initialize the Command Processor. */
3926 load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3927}
3928
3929static void
3930bnx2_setup_wol(struct bnx2 *bp)
3931{
3932 int i;
3933 u32 val, wol_msg;
3934
3935 if (bp->wol) {
3936 u32 advertising;
3937 u8 autoneg;
3938
3939 autoneg = bp->autoneg;
3940 advertising = bp->advertising;
3941
3942 if (bp->phy_port == PORT_TP) {
3943 bp->autoneg = AUTONEG_SPEED;
3944 bp->advertising = ADVERTISED_10baseT_Half |
3945 ADVERTISED_10baseT_Full |
3946 ADVERTISED_100baseT_Half |
3947 ADVERTISED_100baseT_Full |
3948 ADVERTISED_Autoneg;
3949 }
3950
3951 spin_lock_bh(&bp->phy_lock);
3952 bnx2_setup_phy(bp, bp->phy_port);
3953 spin_unlock_bh(&bp->phy_lock);
3954
3955 bp->autoneg = autoneg;
3956 bp->advertising = advertising;
3957
3958 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3959
3960 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3961
3962 /* Enable port mode. */
3963 val &= ~BNX2_EMAC_MODE_PORT;
3964 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3965 BNX2_EMAC_MODE_ACPI_RCVD |
3966 BNX2_EMAC_MODE_MPKT;
3967 if (bp->phy_port == PORT_TP) {
3968 val |= BNX2_EMAC_MODE_PORT_MII;
3969 } else {
3970 val |= BNX2_EMAC_MODE_PORT_GMII;
3971 if (bp->line_speed == SPEED_2500)
3972 val |= BNX2_EMAC_MODE_25G_MODE;
3973 }
3974
3975 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3976
3977 /* receive all multicast */
3978 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3979 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3980 0xffffffff);
3981 }
3982 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3983
3984 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3985 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3986 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3987 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3988
3989 /* Need to enable EMAC and RPM for WOL. */
3990 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3991 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3992 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3993 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3994
3995 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3996 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3997 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3998
3999 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4000 } else {
4001 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4002 }
4003
4004 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4005 u32 val;
4006
4007 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4008 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4009 bnx2_fw_sync(bp, wol_msg, 1, 0);
4010 return;
4011 }
4012 /* Tell firmware not to power down the PHY yet, otherwise
4013 * the chip will take a long time to respond to MMIO reads.
4014 */
4015 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4016 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4017 val | BNX2_PORT_FEATURE_ASF_ENABLED);
4018 bnx2_fw_sync(bp, wol_msg, 1, 0);
4019 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4020 }
4021
4022}
4023
4024static int
4025bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4026{
4027 switch (state) {
4028 case PCI_D0: {
4029 u32 val;
4030
4031 pci_enable_wake(bp->pdev, PCI_D0, false);
4032 pci_set_power_state(bp->pdev, PCI_D0);
4033
4034 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4035 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4036 val &= ~BNX2_EMAC_MODE_MPKT;
4037 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4038
4039 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4040 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4041 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4042 break;
4043 }
4044 case PCI_D3hot: {
4045 bnx2_setup_wol(bp);
4046 pci_wake_from_d3(bp->pdev, bp->wol);
4047 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4048 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4049
4050 if (bp->wol)
4051 pci_set_power_state(bp->pdev, PCI_D3hot);
4052 break;
4053
4054 }
4055 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4056 u32 val;
4057
4058 /* Tell firmware not to power down the PHY yet,
4059 * otherwise the other port may not respond to
4060 * MMIO reads.
4061 */
4062 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4063 val &= ~BNX2_CONDITION_PM_STATE_MASK;
4064 val |= BNX2_CONDITION_PM_STATE_UNPREP;
4065 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4066 }
4067 pci_set_power_state(bp->pdev, PCI_D3hot);
4068
4069 /* No more memory access after this point until
4070 * device is brought back to D0.
4071 */
4072 break;
4073 }
4074 default:
4075 return -EINVAL;
4076 }
4077 return 0;
4078}
4079
4080static int
4081bnx2_acquire_nvram_lock(struct bnx2 *bp)
4082{
4083 u32 val;
4084 int j;
4085
4086 /* Request access to the flash interface. */
4087 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4088 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4089 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4090 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4091 break;
4092
4093 udelay(5);
4094 }
4095
4096 if (j >= NVRAM_TIMEOUT_COUNT)
4097 return -EBUSY;
4098
4099 return 0;
4100}
4101
4102static int
4103bnx2_release_nvram_lock(struct bnx2 *bp)
4104{
4105 int j;
4106 u32 val;
4107
4108 /* Relinquish nvram interface. */
4109 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4110
4111 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4112 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4113 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4114 break;
4115
4116 udelay(5);
4117 }
4118
4119 if (j >= NVRAM_TIMEOUT_COUNT)
4120 return -EBUSY;
4121
4122 return 0;
4123}
4124
4125
4126static int
4127bnx2_enable_nvram_write(struct bnx2 *bp)
4128{
4129 u32 val;
4130
4131 val = BNX2_RD(bp, BNX2_MISC_CFG);
4132 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4133
4134 if (bp->flash_info->flags & BNX2_NV_WREN) {
4135 int j;
4136
4137 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4138 BNX2_WR(bp, BNX2_NVM_COMMAND,
4139 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4140
4141 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4142 udelay(5);
4143
4144 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4145 if (val & BNX2_NVM_COMMAND_DONE)
4146 break;
4147 }
4148
4149 if (j >= NVRAM_TIMEOUT_COUNT)
4150 return -EBUSY;
4151 }
4152 return 0;
4153}
4154
4155static void
4156bnx2_disable_nvram_write(struct bnx2 *bp)
4157{
4158 u32 val;
4159
4160 val = BNX2_RD(bp, BNX2_MISC_CFG);
4161 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4162}
4163
4164
4165static void
4166bnx2_enable_nvram_access(struct bnx2 *bp)
4167{
4168 u32 val;
4169
4170 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4171 /* Enable both bits, even on read. */
4172 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4173 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4174}
4175
4176static void
4177bnx2_disable_nvram_access(struct bnx2 *bp)
4178{
4179 u32 val;
4180
4181 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4182 /* Disable both bits, even after read. */
4183 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4184 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4185 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4186}
4187
4188static int
4189bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4190{
4191 u32 cmd;
4192 int j;
4193
4194 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4195 /* Buffered flash, no erase needed */
4196 return 0;
4197
4198 /* Build an erase command */
4199 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4200 BNX2_NVM_COMMAND_DOIT;
4201
4202 /* Need to clear DONE bit separately. */
4203 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4204
4205 /* Address of the NVRAM to read from. */
4206 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4207
4208 /* Issue an erase command. */
4209 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4210
4211 /* Wait for completion. */
4212 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4213 u32 val;
4214
4215 udelay(5);
4216
4217 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4218 if (val & BNX2_NVM_COMMAND_DONE)
4219 break;
4220 }
4221
4222 if (j >= NVRAM_TIMEOUT_COUNT)
4223 return -EBUSY;
4224
4225 return 0;
4226}
4227
4228static int
4229bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4230{
4231 u32 cmd;
4232 int j;
4233
4234 /* Build the command word. */
4235 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4236
4237 /* Calculate an offset of a buffered flash, not needed for 5709. */
4238 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4239 offset = ((offset / bp->flash_info->page_size) <<
4240 bp->flash_info->page_bits) +
4241 (offset % bp->flash_info->page_size);
4242 }
4243
4244 /* Need to clear DONE bit separately. */
4245 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4246
4247 /* Address of the NVRAM to read from. */
4248 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4249
4250 /* Issue a read command. */
4251 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4252
4253 /* Wait for completion. */
4254 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4255 u32 val;
4256
4257 udelay(5);
4258
4259 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4260 if (val & BNX2_NVM_COMMAND_DONE) {
4261 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4262 memcpy(ret_val, &v, 4);
4263 break;
4264 }
4265 }
4266 if (j >= NVRAM_TIMEOUT_COUNT)
4267 return -EBUSY;
4268
4269 return 0;
4270}
4271
4272
4273static int
4274bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4275{
4276 u32 cmd;
4277 __be32 val32;
4278 int j;
4279
4280 /* Build the command word. */
4281 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4282
4283 /* Calculate an offset of a buffered flash, not needed for 5709. */
4284 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4285 offset = ((offset / bp->flash_info->page_size) <<
4286 bp->flash_info->page_bits) +
4287 (offset % bp->flash_info->page_size);
4288 }
4289
4290 /* Need to clear DONE bit separately. */
4291 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4292
4293 memcpy(&val32, val, 4);
4294
4295 /* Write the data. */
4296 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4297
4298 /* Address of the NVRAM to write to. */
4299 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4300
4301 /* Issue the write command. */
4302 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4303
4304 /* Wait for completion. */
4305 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4306 udelay(5);
4307
4308 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4309 break;
4310 }
4311 if (j >= NVRAM_TIMEOUT_COUNT)
4312 return -EBUSY;
4313
4314 return 0;
4315}
4316
4317static int
4318bnx2_init_nvram(struct bnx2 *bp)
4319{
4320 u32 val;
4321 int j, entry_count, rc = 0;
4322 const struct flash_spec *flash;
4323
4324 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4325 bp->flash_info = &flash_5709;
4326 goto get_flash_size;
4327 }
4328
4329 /* Determine the selected interface. */
4330 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4331
4332 entry_count = ARRAY_SIZE(flash_table);
4333
4334 if (val & 0x40000000) {
4335
4336 /* Flash interface has been reconfigured */
4337 for (j = 0, flash = &flash_table[0]; j < entry_count;
4338 j++, flash++) {
4339 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4340 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4341 bp->flash_info = flash;
4342 break;
4343 }
4344 }
4345 }
4346 else {
4347 u32 mask;
4348 /* Not yet been reconfigured */
4349
4350 if (val & (1 << 23))
4351 mask = FLASH_BACKUP_STRAP_MASK;
4352 else
4353 mask = FLASH_STRAP_MASK;
4354
4355 for (j = 0, flash = &flash_table[0]; j < entry_count;
4356 j++, flash++) {
4357
4358 if ((val & mask) == (flash->strapping & mask)) {
4359 bp->flash_info = flash;
4360
4361 /* Request access to the flash interface. */
4362 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4363 return rc;
4364
4365 /* Enable access to flash interface */
4366 bnx2_enable_nvram_access(bp);
4367
4368 /* Reconfigure the flash interface */
4369 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4370 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4371 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4372 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4373
4374 /* Disable access to flash interface */
4375 bnx2_disable_nvram_access(bp);
4376 bnx2_release_nvram_lock(bp);
4377
4378 break;
4379 }
4380 }
4381 } /* if (val & 0x40000000) */
4382
4383 if (j == entry_count) {
4384 bp->flash_info = NULL;
4385 pr_alert("Unknown flash/EEPROM type\n");
4386 return -ENODEV;
4387 }
4388
4389get_flash_size:
4390 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4391 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4392 if (val)
4393 bp->flash_size = val;
4394 else
4395 bp->flash_size = bp->flash_info->total_size;
4396
4397 return rc;
4398}
4399
4400static int
4401bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4402 int buf_size)
4403{
4404 int rc = 0;
4405 u32 cmd_flags, offset32, len32, extra;
4406
4407 if (buf_size == 0)
4408 return 0;
4409
4410 /* Request access to the flash interface. */
4411 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4412 return rc;
4413
4414 /* Enable access to flash interface */
4415 bnx2_enable_nvram_access(bp);
4416
4417 len32 = buf_size;
4418 offset32 = offset;
4419 extra = 0;
4420
4421 cmd_flags = 0;
4422
4423 if (offset32 & 3) {
4424 u8 buf[4];
4425 u32 pre_len;
4426
4427 offset32 &= ~3;
4428 pre_len = 4 - (offset & 3);
4429
4430 if (pre_len >= len32) {
4431 pre_len = len32;
4432 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4433 BNX2_NVM_COMMAND_LAST;
4434 }
4435 else {
4436 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4437 }
4438
4439 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4440
4441 if (rc)
4442 return rc;
4443
4444 memcpy(ret_buf, buf + (offset & 3), pre_len);
4445
4446 offset32 += 4;
4447 ret_buf += pre_len;
4448 len32 -= pre_len;
4449 }
4450 if (len32 & 3) {
4451 extra = 4 - (len32 & 3);
4452 len32 = (len32 + 4) & ~3;
4453 }
4454
4455 if (len32 == 4) {
4456 u8 buf[4];
4457
4458 if (cmd_flags)
4459 cmd_flags = BNX2_NVM_COMMAND_LAST;
4460 else
4461 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4462 BNX2_NVM_COMMAND_LAST;
4463
4464 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4465
4466 memcpy(ret_buf, buf, 4 - extra);
4467 }
4468 else if (len32 > 0) {
4469 u8 buf[4];
4470
4471 /* Read the first word. */
4472 if (cmd_flags)
4473 cmd_flags = 0;
4474 else
4475 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4476
4477 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4478
4479 /* Advance to the next dword. */
4480 offset32 += 4;
4481 ret_buf += 4;
4482 len32 -= 4;
4483
4484 while (len32 > 4 && rc == 0) {
4485 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4486
4487 /* Advance to the next dword. */
4488 offset32 += 4;
4489 ret_buf += 4;
4490 len32 -= 4;
4491 }
4492
4493 if (rc)
4494 return rc;
4495
4496 cmd_flags = BNX2_NVM_COMMAND_LAST;
4497 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4498
4499 memcpy(ret_buf, buf, 4 - extra);
4500 }
4501
4502 /* Disable access to flash interface */
4503 bnx2_disable_nvram_access(bp);
4504
4505 bnx2_release_nvram_lock(bp);
4506
4507 return rc;
4508}
4509
4510static int
4511bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4512 int buf_size)
4513{
4514 u32 written, offset32, len32;
4515 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4516 int rc = 0;
4517 int align_start, align_end;
4518
4519 buf = data_buf;
4520 offset32 = offset;
4521 len32 = buf_size;
4522 align_start = align_end = 0;
4523
4524 if ((align_start = (offset32 & 3))) {
4525 offset32 &= ~3;
4526 len32 += align_start;
4527 if (len32 < 4)
4528 len32 = 4;
4529 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4530 return rc;
4531 }
4532
4533 if (len32 & 3) {
4534 align_end = 4 - (len32 & 3);
4535 len32 += align_end;
4536 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4537 return rc;
4538 }
4539
4540 if (align_start || align_end) {
4541 align_buf = kmalloc(len32, GFP_KERNEL);
4542 if (!align_buf)
4543 return -ENOMEM;
4544 if (align_start) {
4545 memcpy(align_buf, start, 4);
4546 }
4547 if (align_end) {
4548 memcpy(align_buf + len32 - 4, end, 4);
4549 }
4550 memcpy(align_buf + align_start, data_buf, buf_size);
4551 buf = align_buf;
4552 }
4553
4554 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4555 flash_buffer = kmalloc(264, GFP_KERNEL);
4556 if (!flash_buffer) {
4557 rc = -ENOMEM;
4558 goto nvram_write_end;
4559 }
4560 }
4561
4562 written = 0;
4563 while ((written < len32) && (rc == 0)) {
4564 u32 page_start, page_end, data_start, data_end;
4565 u32 addr, cmd_flags;
4566 int i;
4567
4568 /* Find the page_start addr */
4569 page_start = offset32 + written;
4570 page_start -= (page_start % bp->flash_info->page_size);
4571 /* Find the page_end addr */
4572 page_end = page_start + bp->flash_info->page_size;
4573 /* Find the data_start addr */
4574 data_start = (written == 0) ? offset32 : page_start;
4575 /* Find the data_end addr */
4576 data_end = (page_end > offset32 + len32) ?
4577 (offset32 + len32) : page_end;
4578
4579 /* Request access to the flash interface. */
4580 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4581 goto nvram_write_end;
4582
4583 /* Enable access to flash interface */
4584 bnx2_enable_nvram_access(bp);
4585
4586 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4587 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4588 int j;
4589
4590 /* Read the whole page into the buffer
4591 * (non-buffer flash only) */
4592 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4593 if (j == (bp->flash_info->page_size - 4)) {
4594 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4595 }
4596 rc = bnx2_nvram_read_dword(bp,
4597 page_start + j,
4598 &flash_buffer[j],
4599 cmd_flags);
4600
4601 if (rc)
4602 goto nvram_write_end;
4603
4604 cmd_flags = 0;
4605 }
4606 }
4607
4608 /* Enable writes to flash interface (unlock write-protect) */
4609 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4610 goto nvram_write_end;
4611
4612 /* Loop to write back the buffer data from page_start to
4613 * data_start */
4614 i = 0;
4615 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4616 /* Erase the page */
4617 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4618 goto nvram_write_end;
4619
4620 /* Re-enable the write again for the actual write */
4621 bnx2_enable_nvram_write(bp);
4622
4623 for (addr = page_start; addr < data_start;
4624 addr += 4, i += 4) {
4625
4626 rc = bnx2_nvram_write_dword(bp, addr,
4627 &flash_buffer[i], cmd_flags);
4628
4629 if (rc != 0)
4630 goto nvram_write_end;
4631
4632 cmd_flags = 0;
4633 }
4634 }
4635
4636 /* Loop to write the new data from data_start to data_end */
4637 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4638 if ((addr == page_end - 4) ||
4639 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4640 (addr == data_end - 4))) {
4641
4642 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4643 }
4644 rc = bnx2_nvram_write_dword(bp, addr, buf,
4645 cmd_flags);
4646
4647 if (rc != 0)
4648 goto nvram_write_end;
4649
4650 cmd_flags = 0;
4651 buf += 4;
4652 }
4653
4654 /* Loop to write back the buffer data from data_end
4655 * to page_end */
4656 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4657 for (addr = data_end; addr < page_end;
4658 addr += 4, i += 4) {
4659
4660 if (addr == page_end-4) {
4661 cmd_flags = BNX2_NVM_COMMAND_LAST;
4662 }
4663 rc = bnx2_nvram_write_dword(bp, addr,
4664 &flash_buffer[i], cmd_flags);
4665
4666 if (rc != 0)
4667 goto nvram_write_end;
4668
4669 cmd_flags = 0;
4670 }
4671 }
4672
4673 /* Disable writes to flash interface (lock write-protect) */
4674 bnx2_disable_nvram_write(bp);
4675
4676 /* Disable access to flash interface */
4677 bnx2_disable_nvram_access(bp);
4678 bnx2_release_nvram_lock(bp);
4679
4680 /* Increment written */
4681 written += data_end - data_start;
4682 }
4683
4684nvram_write_end:
4685 kfree(flash_buffer);
4686 kfree(align_buf);
4687 return rc;
4688}
4689
4690static void
4691bnx2_init_fw_cap(struct bnx2 *bp)
4692{
4693 u32 val, sig = 0;
4694
4695 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4696 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4697
4698 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4699 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4700
4701 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4702 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4703 return;
4704
4705 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4706 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4707 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4708 }
4709
4710 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4711 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4712 u32 link;
4713
4714 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4715
4716 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4717 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4718 bp->phy_port = PORT_FIBRE;
4719 else
4720 bp->phy_port = PORT_TP;
4721
4722 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4723 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4724 }
4725
4726 if (netif_running(bp->dev) && sig)
4727 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4728}
4729
4730static void
4731bnx2_setup_msix_tbl(struct bnx2 *bp)
4732{
4733 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4734
4735 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4736 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4737}
4738
4739static void
4740bnx2_wait_dma_complete(struct bnx2 *bp)
4741{
4742 u32 val;
4743 int i;
4744
4745 /*
4746 * Wait for the current PCI transaction to complete before
4747 * issuing a reset.
4748 */
4749 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4750 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4751 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4752 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4753 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4754 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4755 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4756 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4757 udelay(5);
4758 } else { /* 5709 */
4759 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4760 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4761 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4762 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4763
4764 for (i = 0; i < 100; i++) {
4765 msleep(1);
4766 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4767 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4768 break;
4769 }
4770 }
4771
4772 return;
4773}
4774
4775
4776static int
4777bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4778{
4779 u32 val;
4780 int i, rc = 0;
4781 u8 old_port;
4782
4783 /* Wait for the current PCI transaction to complete before
4784 * issuing a reset. */
4785 bnx2_wait_dma_complete(bp);
4786
4787 /* Wait for the firmware to tell us it is ok to issue a reset. */
4788 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4789
4790 /* Deposit a driver reset signature so the firmware knows that
4791 * this is a soft reset. */
4792 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4793 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4794
4795 /* Do a dummy read to force the chip to complete all current transaction
4796 * before we issue a reset. */
4797 val = BNX2_RD(bp, BNX2_MISC_ID);
4798
4799 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4800 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4801 BNX2_RD(bp, BNX2_MISC_COMMAND);
4802 udelay(5);
4803
4804 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4805 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4806
4807 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4808
4809 } else {
4810 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4811 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4812 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4813
4814 /* Chip reset. */
4815 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4816
4817 /* Reading back any register after chip reset will hang the
4818 * bus on 5706 A0 and A1. The msleep below provides plenty
4819 * of margin for write posting.
4820 */
4821 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4822 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4823 msleep(20);
4824
4825 /* Reset takes approximate 30 usec */
4826 for (i = 0; i < 10; i++) {
4827 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4828 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4829 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4830 break;
4831 udelay(10);
4832 }
4833
4834 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4835 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4836 pr_err("Chip reset did not complete\n");
4837 return -EBUSY;
4838 }
4839 }
4840
4841 /* Make sure byte swapping is properly configured. */
4842 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4843 if (val != 0x01020304) {
4844 pr_err("Chip not in correct endian mode\n");
4845 return -ENODEV;
4846 }
4847
4848 /* Wait for the firmware to finish its initialization. */
4849 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4850 if (rc)
4851 return rc;
4852
4853 spin_lock_bh(&bp->phy_lock);
4854 old_port = bp->phy_port;
4855 bnx2_init_fw_cap(bp);
4856 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4857 old_port != bp->phy_port)
4858 bnx2_set_default_remote_link(bp);
4859 spin_unlock_bh(&bp->phy_lock);
4860
4861 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4862 /* Adjust the voltage regular to two steps lower. The default
4863 * of this register is 0x0000000e. */
4864 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4865
4866 /* Remove bad rbuf memory from the free pool. */
4867 rc = bnx2_alloc_bad_rbuf(bp);
4868 }
4869
4870 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4871 bnx2_setup_msix_tbl(bp);
4872 /* Prevent MSIX table reads and write from timing out */
4873 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4874 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4875 }
4876
4877 return rc;
4878}
4879
4880static int
4881bnx2_init_chip(struct bnx2 *bp)
4882{
4883 u32 val, mtu;
4884 int rc, i;
4885
4886 /* Make sure the interrupt is not active. */
4887 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4888
4889 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4890 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4891#ifdef __BIG_ENDIAN
4892 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4893#endif
4894 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4895 DMA_READ_CHANS << 12 |
4896 DMA_WRITE_CHANS << 16;
4897
4898 val |= (0x2 << 20) | (1 << 11);
4899
4900 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4901 val |= (1 << 23);
4902
4903 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4904 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4905 !(bp->flags & BNX2_FLAG_PCIX))
4906 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4907
4908 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4909
4910 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4911 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4912 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4913 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4914 }
4915
4916 if (bp->flags & BNX2_FLAG_PCIX) {
4917 u16 val16;
4918
4919 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4920 &val16);
4921 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4922 val16 & ~PCI_X_CMD_ERO);
4923 }
4924
4925 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4926 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4927 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4928 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4929
4930 /* Initialize context mapping and zero out the quick contexts. The
4931 * context block must have already been enabled. */
4932 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4933 rc = bnx2_init_5709_context(bp);
4934 if (rc)
4935 return rc;
4936 } else
4937 bnx2_init_context(bp);
4938
4939 bnx2_init_cpus(bp);
4940
4941 bnx2_init_nvram(bp);
4942
4943 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4944
4945 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4946 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4947 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4948 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4949 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4950 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4951 val |= BNX2_MQ_CONFIG_HALT_DIS;
4952 }
4953
4954 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4955
4956 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4957 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4958 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4959
4960 val = (BNX2_PAGE_BITS - 8) << 24;
4961 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4962
4963 /* Configure page size. */
4964 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4965 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4966 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4967 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4968
4969 val = bp->mac_addr[0] +
4970 (bp->mac_addr[1] << 8) +
4971 (bp->mac_addr[2] << 16) +
4972 bp->mac_addr[3] +
4973 (bp->mac_addr[4] << 8) +
4974 (bp->mac_addr[5] << 16);
4975 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4976
4977 /* Program the MTU. Also include 4 bytes for CRC32. */
4978 mtu = bp->dev->mtu;
4979 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4980 if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4981 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4982 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4983
4984 if (mtu < ETH_DATA_LEN)
4985 mtu = ETH_DATA_LEN;
4986
4987 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4988 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4989 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4990
4991 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4992 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4993 bp->bnx2_napi[i].last_status_idx = 0;
4994
4995 bp->idle_chk_status_idx = 0xffff;
4996
4997 /* Set up how to generate a link change interrupt. */
4998 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4999
5000 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5001 (u64) bp->status_blk_mapping & 0xffffffff);
5002 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5003
5004 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5005 (u64) bp->stats_blk_mapping & 0xffffffff);
5006 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5007 (u64) bp->stats_blk_mapping >> 32);
5008
5009 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5010 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5011
5012 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5013 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5014
5015 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5016 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5017
5018 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5019
5020 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5021
5022 BNX2_WR(bp, BNX2_HC_COM_TICKS,
5023 (bp->com_ticks_int << 16) | bp->com_ticks);
5024
5025 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5026 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5027
5028 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5029 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5030 else
5031 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5032 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
5033
5034 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5035 val = BNX2_HC_CONFIG_COLLECT_STATS;
5036 else {
5037 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5038 BNX2_HC_CONFIG_COLLECT_STATS;
5039 }
5040
5041 if (bp->flags & BNX2_FLAG_USING_MSIX) {
5042 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5043 BNX2_HC_MSIX_BIT_VECTOR_VAL);
5044
5045 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5046 }
5047
5048 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5049 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5050
5051 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5052
5053 if (bp->rx_ticks < 25)
5054 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5055 else
5056 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5057
5058 for (i = 1; i < bp->irq_nvecs; i++) {
5059 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5060 BNX2_HC_SB_CONFIG_1;
5061
5062 BNX2_WR(bp, base,
5063 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5064 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5065 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5066
5067 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5068 (bp->tx_quick_cons_trip_int << 16) |
5069 bp->tx_quick_cons_trip);
5070
5071 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5072 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5073
5074 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5075 (bp->rx_quick_cons_trip_int << 16) |
5076 bp->rx_quick_cons_trip);
5077
5078 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5079 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5080 }
5081
5082 /* Clear internal stats counters. */
5083 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5084
5085 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5086
5087 /* Initialize the receive filter. */
5088 bnx2_set_rx_mode(bp->dev);
5089
5090 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5091 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5092 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5093 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5094 }
5095 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5096 1, 0);
5097
5098 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5099 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5100
5101 udelay(20);
5102
5103 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5104
5105 return rc;
5106}
5107
5108static void
5109bnx2_clear_ring_states(struct bnx2 *bp)
5110{
5111 struct bnx2_napi *bnapi;
5112 struct bnx2_tx_ring_info *txr;
5113 struct bnx2_rx_ring_info *rxr;
5114 int i;
5115
5116 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5117 bnapi = &bp->bnx2_napi[i];
5118 txr = &bnapi->tx_ring;
5119 rxr = &bnapi->rx_ring;
5120
5121 txr->tx_cons = 0;
5122 txr->hw_tx_cons = 0;
5123 rxr->rx_prod_bseq = 0;
5124 rxr->rx_prod = 0;
5125 rxr->rx_cons = 0;
5126 rxr->rx_pg_prod = 0;
5127 rxr->rx_pg_cons = 0;
5128 }
5129}
5130
5131static void
5132bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5133{
5134 u32 val, offset0, offset1, offset2, offset3;
5135 u32 cid_addr = GET_CID_ADDR(cid);
5136
5137 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5138 offset0 = BNX2_L2CTX_TYPE_XI;
5139 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5140 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5141 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5142 } else {
5143 offset0 = BNX2_L2CTX_TYPE;
5144 offset1 = BNX2_L2CTX_CMD_TYPE;
5145 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5146 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5147 }
5148 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5149 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5150
5151 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5152 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5153
5154 val = (u64) txr->tx_desc_mapping >> 32;
5155 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5156
5157 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5158 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5159}
5160
5161static void
5162bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5163{
5164 struct bnx2_tx_bd *txbd;
5165 u32 cid = TX_CID;
5166 struct bnx2_napi *bnapi;
5167 struct bnx2_tx_ring_info *txr;
5168
5169 bnapi = &bp->bnx2_napi[ring_num];
5170 txr = &bnapi->tx_ring;
5171
5172 if (ring_num == 0)
5173 cid = TX_CID;
5174 else
5175 cid = TX_TSS_CID + ring_num - 1;
5176
5177 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5178
5179 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5180
5181 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5182 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5183
5184 txr->tx_prod = 0;
5185 txr->tx_prod_bseq = 0;
5186
5187 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5188 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5189
5190 bnx2_init_tx_context(bp, cid, txr);
5191}
5192
5193static void
5194bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5195 u32 buf_size, int num_rings)
5196{
5197 int i;
5198 struct bnx2_rx_bd *rxbd;
5199
5200 for (i = 0; i < num_rings; i++) {
5201 int j;
5202
5203 rxbd = &rx_ring[i][0];
5204 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5205 rxbd->rx_bd_len = buf_size;
5206 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5207 }
5208 if (i == (num_rings - 1))
5209 j = 0;
5210 else
5211 j = i + 1;
5212 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5213 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5214 }
5215}
5216
5217static void
5218bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5219{
5220 int i;
5221 u16 prod, ring_prod;
5222 u32 cid, rx_cid_addr, val;
5223 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5224 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5225
5226 if (ring_num == 0)
5227 cid = RX_CID;
5228 else
5229 cid = RX_RSS_CID + ring_num - 1;
5230
5231 rx_cid_addr = GET_CID_ADDR(cid);
5232
5233 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5234 bp->rx_buf_use_size, bp->rx_max_ring);
5235
5236 bnx2_init_rx_context(bp, cid);
5237
5238 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5239 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5240 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5241 }
5242
5243 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5244 if (bp->rx_pg_ring_size) {
5245 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5246 rxr->rx_pg_desc_mapping,
5247 PAGE_SIZE, bp->rx_max_pg_ring);
5248 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5249 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5250 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5251 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5252
5253 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5254 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5255
5256 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5257 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5258
5259 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5260 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5261 }
5262
5263 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5264 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5265
5266 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5267 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5268
5269 ring_prod = prod = rxr->rx_pg_prod;
5270 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5271 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5272 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5273 ring_num, i, bp->rx_pg_ring_size);
5274 break;
5275 }
5276 prod = BNX2_NEXT_RX_BD(prod);
5277 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5278 }
5279 rxr->rx_pg_prod = prod;
5280
5281 ring_prod = prod = rxr->rx_prod;
5282 for (i = 0; i < bp->rx_ring_size; i++) {
5283 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5284 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5285 ring_num, i, bp->rx_ring_size);
5286 break;
5287 }
5288 prod = BNX2_NEXT_RX_BD(prod);
5289 ring_prod = BNX2_RX_RING_IDX(prod);
5290 }
5291 rxr->rx_prod = prod;
5292
5293 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5294 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5295 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5296
5297 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5298 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5299
5300 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5301}
5302
5303static void
5304bnx2_init_all_rings(struct bnx2 *bp)
5305{
5306 int i;
5307 u32 val;
5308
5309 bnx2_clear_ring_states(bp);
5310
5311 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5312 for (i = 0; i < bp->num_tx_rings; i++)
5313 bnx2_init_tx_ring(bp, i);
5314
5315 if (bp->num_tx_rings > 1)
5316 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5317 (TX_TSS_CID << 7));
5318
5319 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5320 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5321
5322 for (i = 0; i < bp->num_rx_rings; i++)
5323 bnx2_init_rx_ring(bp, i);
5324
5325 if (bp->num_rx_rings > 1) {
5326 u32 tbl_32 = 0;
5327
5328 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5329 int shift = (i % 8) << 2;
5330
5331 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5332 if ((i % 8) == 7) {
5333 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5334 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5335 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5336 BNX2_RLUP_RSS_COMMAND_WRITE |
5337 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5338 tbl_32 = 0;
5339 }
5340 }
5341
5342 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5343 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5344
5345 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5346
5347 }
5348}
5349
5350static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5351{
5352 u32 max, num_rings = 1;
5353
5354 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5355 ring_size -= BNX2_MAX_RX_DESC_CNT;
5356 num_rings++;
5357 }
5358 /* round to next power of 2 */
5359 max = max_size;
5360 while ((max & num_rings) == 0)
5361 max >>= 1;
5362
5363 if (num_rings != max)
5364 max <<= 1;
5365
5366 return max;
5367}
5368
5369static void
5370bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5371{
5372 u32 rx_size, rx_space, jumbo_size;
5373
5374 /* 8 for CRC and VLAN */
5375 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5376
5377 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5378 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5379
5380 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5381 bp->rx_pg_ring_size = 0;
5382 bp->rx_max_pg_ring = 0;
5383 bp->rx_max_pg_ring_idx = 0;
5384 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5385 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5386
5387 jumbo_size = size * pages;
5388 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5389 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5390
5391 bp->rx_pg_ring_size = jumbo_size;
5392 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5393 BNX2_MAX_RX_PG_RINGS);
5394 bp->rx_max_pg_ring_idx =
5395 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5396 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5397 bp->rx_copy_thresh = 0;
5398 }
5399
5400 bp->rx_buf_use_size = rx_size;
5401 /* hw alignment + build_skb() overhead*/
5402 bp->rx_buf_size = kmalloc_size_roundup(
5403 SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5404 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
5405 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5406 bp->rx_ring_size = size;
5407 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5408 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5409}
5410
5411static void
5412bnx2_free_tx_skbs(struct bnx2 *bp)
5413{
5414 int i;
5415
5416 for (i = 0; i < bp->num_tx_rings; i++) {
5417 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5418 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5419 int j;
5420
5421 if (!txr->tx_buf_ring)
5422 continue;
5423
5424 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5425 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5426 struct sk_buff *skb = tx_buf->skb;
5427 int k, last;
5428
5429 if (!skb) {
5430 j = BNX2_NEXT_TX_BD(j);
5431 continue;
5432 }
5433
5434 dma_unmap_single(&bp->pdev->dev,
5435 dma_unmap_addr(tx_buf, mapping),
5436 skb_headlen(skb),
5437 DMA_TO_DEVICE);
5438
5439 tx_buf->skb = NULL;
5440
5441 last = tx_buf->nr_frags;
5442 j = BNX2_NEXT_TX_BD(j);
5443 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5444 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5445 dma_unmap_page(&bp->pdev->dev,
5446 dma_unmap_addr(tx_buf, mapping),
5447 skb_frag_size(&skb_shinfo(skb)->frags[k]),
5448 DMA_TO_DEVICE);
5449 }
5450 dev_kfree_skb(skb);
5451 }
5452 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5453 }
5454}
5455
5456static void
5457bnx2_free_rx_skbs(struct bnx2 *bp)
5458{
5459 int i;
5460
5461 for (i = 0; i < bp->num_rx_rings; i++) {
5462 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5463 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5464 int j;
5465
5466 if (!rxr->rx_buf_ring)
5467 return;
5468
5469 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5470 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5471 u8 *data = rx_buf->data;
5472
5473 if (!data)
5474 continue;
5475
5476 dma_unmap_single(&bp->pdev->dev,
5477 dma_unmap_addr(rx_buf, mapping),
5478 bp->rx_buf_use_size,
5479 DMA_FROM_DEVICE);
5480
5481 rx_buf->data = NULL;
5482
5483 kfree(data);
5484 }
5485 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5486 bnx2_free_rx_page(bp, rxr, j);
5487 }
5488}
5489
5490static void
5491bnx2_free_skbs(struct bnx2 *bp)
5492{
5493 bnx2_free_tx_skbs(bp);
5494 bnx2_free_rx_skbs(bp);
5495}
5496
5497static int
5498bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5499{
5500 int rc;
5501
5502 rc = bnx2_reset_chip(bp, reset_code);
5503 bnx2_free_skbs(bp);
5504 if (rc)
5505 return rc;
5506
5507 if ((rc = bnx2_init_chip(bp)) != 0)
5508 return rc;
5509
5510 bnx2_init_all_rings(bp);
5511 return 0;
5512}
5513
5514static int
5515bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5516{
5517 int rc;
5518
5519 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5520 return rc;
5521
5522 spin_lock_bh(&bp->phy_lock);
5523 bnx2_init_phy(bp, reset_phy);
5524 bnx2_set_link(bp);
5525 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5526 bnx2_remote_phy_event(bp);
5527 spin_unlock_bh(&bp->phy_lock);
5528 return 0;
5529}
5530
5531static int
5532bnx2_shutdown_chip(struct bnx2 *bp)
5533{
5534 u32 reset_code;
5535
5536 if (bp->flags & BNX2_FLAG_NO_WOL)
5537 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5538 else if (bp->wol)
5539 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5540 else
5541 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5542
5543 return bnx2_reset_chip(bp, reset_code);
5544}
5545
5546static int
5547bnx2_test_registers(struct bnx2 *bp)
5548{
5549 int ret;
5550 int i, is_5709;
5551 static const struct {
5552 u16 offset;
5553 u16 flags;
5554#define BNX2_FL_NOT_5709 1
5555 u32 rw_mask;
5556 u32 ro_mask;
5557 } reg_tbl[] = {
5558 { 0x006c, 0, 0x00000000, 0x0000003f },
5559 { 0x0090, 0, 0xffffffff, 0x00000000 },
5560 { 0x0094, 0, 0x00000000, 0x00000000 },
5561
5562 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5563 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5564 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5565 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5566 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5567 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5568 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5569 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5570 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5571
5572 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5573 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5574 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5575 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5576 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5577 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5578
5579 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5580 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5581 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5582
5583 { 0x1000, 0, 0x00000000, 0x00000001 },
5584 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5585
5586 { 0x1408, 0, 0x01c00800, 0x00000000 },
5587 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5588 { 0x14a8, 0, 0x00000000, 0x000001ff },
5589 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5590 { 0x14b0, 0, 0x00000002, 0x00000001 },
5591 { 0x14b8, 0, 0x00000000, 0x00000000 },
5592 { 0x14c0, 0, 0x00000000, 0x00000009 },
5593 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5594 { 0x14cc, 0, 0x00000000, 0x00000001 },
5595 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5596
5597 { 0x1800, 0, 0x00000000, 0x00000001 },
5598 { 0x1804, 0, 0x00000000, 0x00000003 },
5599
5600 { 0x2800, 0, 0x00000000, 0x00000001 },
5601 { 0x2804, 0, 0x00000000, 0x00003f01 },
5602 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5603 { 0x2810, 0, 0xffff0000, 0x00000000 },
5604 { 0x2814, 0, 0xffff0000, 0x00000000 },
5605 { 0x2818, 0, 0xffff0000, 0x00000000 },
5606 { 0x281c, 0, 0xffff0000, 0x00000000 },
5607 { 0x2834, 0, 0xffffffff, 0x00000000 },
5608 { 0x2840, 0, 0x00000000, 0xffffffff },
5609 { 0x2844, 0, 0x00000000, 0xffffffff },
5610 { 0x2848, 0, 0xffffffff, 0x00000000 },
5611 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5612
5613 { 0x2c00, 0, 0x00000000, 0x00000011 },
5614 { 0x2c04, 0, 0x00000000, 0x00030007 },
5615
5616 { 0x3c00, 0, 0x00000000, 0x00000001 },
5617 { 0x3c04, 0, 0x00000000, 0x00070000 },
5618 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5619 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5620 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5621 { 0x3c14, 0, 0x00000000, 0xffffffff },
5622 { 0x3c18, 0, 0x00000000, 0xffffffff },
5623 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5624 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5625
5626 { 0x5004, 0, 0x00000000, 0x0000007f },
5627 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5628
5629 { 0x5c00, 0, 0x00000000, 0x00000001 },
5630 { 0x5c04, 0, 0x00000000, 0x0003000f },
5631 { 0x5c08, 0, 0x00000003, 0x00000000 },
5632 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5633 { 0x5c10, 0, 0x00000000, 0xffffffff },
5634 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5635 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5636 { 0x5c88, 0, 0x00000000, 0x00077373 },
5637 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5638
5639 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5640 { 0x680c, 0, 0xffffffff, 0x00000000 },
5641 { 0x6810, 0, 0xffffffff, 0x00000000 },
5642 { 0x6814, 0, 0xffffffff, 0x00000000 },
5643 { 0x6818, 0, 0xffffffff, 0x00000000 },
5644 { 0x681c, 0, 0xffffffff, 0x00000000 },
5645 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5646 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5647 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5648 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5649 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5650 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5651 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5652 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5653 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5654 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5655 { 0x684c, 0, 0xffffffff, 0x00000000 },
5656 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5657 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5658 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5659 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5660 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5661 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5662
5663 { 0xffff, 0, 0x00000000, 0x00000000 },
5664 };
5665
5666 ret = 0;
5667 is_5709 = 0;
5668 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5669 is_5709 = 1;
5670
5671 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5672 u32 offset, rw_mask, ro_mask, save_val, val;
5673 u16 flags = reg_tbl[i].flags;
5674
5675 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5676 continue;
5677
5678 offset = (u32) reg_tbl[i].offset;
5679 rw_mask = reg_tbl[i].rw_mask;
5680 ro_mask = reg_tbl[i].ro_mask;
5681
5682 save_val = readl(bp->regview + offset);
5683
5684 writel(0, bp->regview + offset);
5685
5686 val = readl(bp->regview + offset);
5687 if ((val & rw_mask) != 0) {
5688 goto reg_test_err;
5689 }
5690
5691 if ((val & ro_mask) != (save_val & ro_mask)) {
5692 goto reg_test_err;
5693 }
5694
5695 writel(0xffffffff, bp->regview + offset);
5696
5697 val = readl(bp->regview + offset);
5698 if ((val & rw_mask) != rw_mask) {
5699 goto reg_test_err;
5700 }
5701
5702 if ((val & ro_mask) != (save_val & ro_mask)) {
5703 goto reg_test_err;
5704 }
5705
5706 writel(save_val, bp->regview + offset);
5707 continue;
5708
5709reg_test_err:
5710 writel(save_val, bp->regview + offset);
5711 ret = -ENODEV;
5712 break;
5713 }
5714 return ret;
5715}
5716
5717static int
5718bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5719{
5720 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5721 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5722 int i;
5723
5724 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5725 u32 offset;
5726
5727 for (offset = 0; offset < size; offset += 4) {
5728
5729 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5730
5731 if (bnx2_reg_rd_ind(bp, start + offset) !=
5732 test_pattern[i]) {
5733 return -ENODEV;
5734 }
5735 }
5736 }
5737 return 0;
5738}
5739
5740static int
5741bnx2_test_memory(struct bnx2 *bp)
5742{
5743 int ret = 0;
5744 int i;
5745 static struct mem_entry {
5746 u32 offset;
5747 u32 len;
5748 } mem_tbl_5706[] = {
5749 { 0x60000, 0x4000 },
5750 { 0xa0000, 0x3000 },
5751 { 0xe0000, 0x4000 },
5752 { 0x120000, 0x4000 },
5753 { 0x1a0000, 0x4000 },
5754 { 0x160000, 0x4000 },
5755 { 0xffffffff, 0 },
5756 },
5757 mem_tbl_5709[] = {
5758 { 0x60000, 0x4000 },
5759 { 0xa0000, 0x3000 },
5760 { 0xe0000, 0x4000 },
5761 { 0x120000, 0x4000 },
5762 { 0x1a0000, 0x4000 },
5763 { 0xffffffff, 0 },
5764 };
5765 struct mem_entry *mem_tbl;
5766
5767 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5768 mem_tbl = mem_tbl_5709;
5769 else
5770 mem_tbl = mem_tbl_5706;
5771
5772 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5773 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5774 mem_tbl[i].len)) != 0) {
5775 return ret;
5776 }
5777 }
5778
5779 return ret;
5780}
5781
5782#define BNX2_MAC_LOOPBACK 0
5783#define BNX2_PHY_LOOPBACK 1
5784
5785static int
5786bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5787{
5788 unsigned int pkt_size, num_pkts, i;
5789 struct sk_buff *skb;
5790 u8 *data;
5791 unsigned char *packet;
5792 u16 rx_start_idx, rx_idx;
5793 dma_addr_t map;
5794 struct bnx2_tx_bd *txbd;
5795 struct bnx2_sw_bd *rx_buf;
5796 struct l2_fhdr *rx_hdr;
5797 int ret = -ENODEV;
5798 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5799 struct bnx2_tx_ring_info *txr;
5800 struct bnx2_rx_ring_info *rxr;
5801
5802 tx_napi = bnapi;
5803
5804 txr = &tx_napi->tx_ring;
5805 rxr = &bnapi->rx_ring;
5806 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5807 bp->loopback = MAC_LOOPBACK;
5808 bnx2_set_mac_loopback(bp);
5809 }
5810 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5811 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5812 return 0;
5813
5814 bp->loopback = PHY_LOOPBACK;
5815 bnx2_set_phy_loopback(bp);
5816 }
5817 else
5818 return -EINVAL;
5819
5820 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5821 skb = netdev_alloc_skb(bp->dev, pkt_size);
5822 if (!skb)
5823 return -ENOMEM;
5824 packet = skb_put(skb, pkt_size);
5825 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5826 memset(packet + ETH_ALEN, 0x0, 8);
5827 for (i = 14; i < pkt_size; i++)
5828 packet[i] = (unsigned char) (i & 0xff);
5829
5830 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5831 DMA_TO_DEVICE);
5832 if (dma_mapping_error(&bp->pdev->dev, map)) {
5833 dev_kfree_skb(skb);
5834 return -EIO;
5835 }
5836
5837 BNX2_WR(bp, BNX2_HC_COMMAND,
5838 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5839
5840 BNX2_RD(bp, BNX2_HC_COMMAND);
5841
5842 udelay(5);
5843 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5844
5845 num_pkts = 0;
5846
5847 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5848
5849 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5850 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5851 txbd->tx_bd_mss_nbytes = pkt_size;
5852 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5853
5854 num_pkts++;
5855 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5856 txr->tx_prod_bseq += pkt_size;
5857
5858 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5859 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5860
5861 udelay(100);
5862
5863 BNX2_WR(bp, BNX2_HC_COMMAND,
5864 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5865
5866 BNX2_RD(bp, BNX2_HC_COMMAND);
5867
5868 udelay(5);
5869
5870 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
5871 dev_kfree_skb(skb);
5872
5873 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5874 goto loopback_test_done;
5875
5876 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5877 if (rx_idx != rx_start_idx + num_pkts) {
5878 goto loopback_test_done;
5879 }
5880
5881 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5882 data = rx_buf->data;
5883
5884 rx_hdr = get_l2_fhdr(data);
5885 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5886
5887 dma_sync_single_for_cpu(&bp->pdev->dev,
5888 dma_unmap_addr(rx_buf, mapping),
5889 bp->rx_buf_use_size, DMA_FROM_DEVICE);
5890
5891 if (rx_hdr->l2_fhdr_status &
5892 (L2_FHDR_ERRORS_BAD_CRC |
5893 L2_FHDR_ERRORS_PHY_DECODE |
5894 L2_FHDR_ERRORS_ALIGNMENT |
5895 L2_FHDR_ERRORS_TOO_SHORT |
5896 L2_FHDR_ERRORS_GIANT_FRAME)) {
5897
5898 goto loopback_test_done;
5899 }
5900
5901 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5902 goto loopback_test_done;
5903 }
5904
5905 for (i = 14; i < pkt_size; i++) {
5906 if (*(data + i) != (unsigned char) (i & 0xff)) {
5907 goto loopback_test_done;
5908 }
5909 }
5910
5911 ret = 0;
5912
5913loopback_test_done:
5914 bp->loopback = 0;
5915 return ret;
5916}
5917
5918#define BNX2_MAC_LOOPBACK_FAILED 1
5919#define BNX2_PHY_LOOPBACK_FAILED 2
5920#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5921 BNX2_PHY_LOOPBACK_FAILED)
5922
5923static int
5924bnx2_test_loopback(struct bnx2 *bp)
5925{
5926 int rc = 0;
5927
5928 if (!netif_running(bp->dev))
5929 return BNX2_LOOPBACK_FAILED;
5930
5931 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5932 spin_lock_bh(&bp->phy_lock);
5933 bnx2_init_phy(bp, 1);
5934 spin_unlock_bh(&bp->phy_lock);
5935 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5936 rc |= BNX2_MAC_LOOPBACK_FAILED;
5937 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5938 rc |= BNX2_PHY_LOOPBACK_FAILED;
5939 return rc;
5940}
5941
5942#define NVRAM_SIZE 0x200
5943#define CRC32_RESIDUAL 0xdebb20e3
5944
5945static int
5946bnx2_test_nvram(struct bnx2 *bp)
5947{
5948 __be32 buf[NVRAM_SIZE / 4];
5949 u8 *data = (u8 *) buf;
5950 int rc = 0;
5951 u32 magic, csum;
5952
5953 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5954 goto test_nvram_done;
5955
5956 magic = be32_to_cpu(buf[0]);
5957 if (magic != 0x669955aa) {
5958 rc = -ENODEV;
5959 goto test_nvram_done;
5960 }
5961
5962 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5963 goto test_nvram_done;
5964
5965 csum = ether_crc_le(0x100, data);
5966 if (csum != CRC32_RESIDUAL) {
5967 rc = -ENODEV;
5968 goto test_nvram_done;
5969 }
5970
5971 csum = ether_crc_le(0x100, data + 0x100);
5972 if (csum != CRC32_RESIDUAL) {
5973 rc = -ENODEV;
5974 }
5975
5976test_nvram_done:
5977 return rc;
5978}
5979
5980static int
5981bnx2_test_link(struct bnx2 *bp)
5982{
5983 u32 bmsr;
5984
5985 if (!netif_running(bp->dev))
5986 return -ENODEV;
5987
5988 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5989 if (bp->link_up)
5990 return 0;
5991 return -ENODEV;
5992 }
5993 spin_lock_bh(&bp->phy_lock);
5994 bnx2_enable_bmsr1(bp);
5995 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5996 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5997 bnx2_disable_bmsr1(bp);
5998 spin_unlock_bh(&bp->phy_lock);
5999
6000 if (bmsr & BMSR_LSTATUS) {
6001 return 0;
6002 }
6003 return -ENODEV;
6004}
6005
6006static int
6007bnx2_test_intr(struct bnx2 *bp)
6008{
6009 int i;
6010 u16 status_idx;
6011
6012 if (!netif_running(bp->dev))
6013 return -ENODEV;
6014
6015 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6016
6017 /* This register is not touched during run-time. */
6018 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6019 BNX2_RD(bp, BNX2_HC_COMMAND);
6020
6021 for (i = 0; i < 10; i++) {
6022 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6023 status_idx) {
6024
6025 break;
6026 }
6027
6028 msleep_interruptible(10);
6029 }
6030 if (i < 10)
6031 return 0;
6032
6033 return -ENODEV;
6034}
6035
6036/* Determining link for parallel detection. */
6037static int
6038bnx2_5706_serdes_has_link(struct bnx2 *bp)
6039{
6040 u32 mode_ctl, an_dbg, exp;
6041
6042 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6043 return 0;
6044
6045 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6046 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6047
6048 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6049 return 0;
6050
6051 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6052 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6053 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6054
6055 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6056 return 0;
6057
6058 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6059 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6060 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6061
6062 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6063 return 0;
6064
6065 return 1;
6066}
6067
6068static void
6069bnx2_5706_serdes_timer(struct bnx2 *bp)
6070{
6071 int check_link = 1;
6072
6073 spin_lock(&bp->phy_lock);
6074 if (bp->serdes_an_pending) {
6075 bp->serdes_an_pending--;
6076 check_link = 0;
6077 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6078 u32 bmcr;
6079
6080 bp->current_interval = BNX2_TIMER_INTERVAL;
6081
6082 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6083
6084 if (bmcr & BMCR_ANENABLE) {
6085 if (bnx2_5706_serdes_has_link(bp)) {
6086 bmcr &= ~BMCR_ANENABLE;
6087 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6088 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6089 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6090 }
6091 }
6092 }
6093 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6094 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6095 u32 phy2;
6096
6097 bnx2_write_phy(bp, 0x17, 0x0f01);
6098 bnx2_read_phy(bp, 0x15, &phy2);
6099 if (phy2 & 0x20) {
6100 u32 bmcr;
6101
6102 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6103 bmcr |= BMCR_ANENABLE;
6104 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6105
6106 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6107 }
6108 } else
6109 bp->current_interval = BNX2_TIMER_INTERVAL;
6110
6111 if (check_link) {
6112 u32 val;
6113
6114 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6115 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6116 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6117
6118 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6119 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6120 bnx2_5706s_force_link_dn(bp, 1);
6121 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6122 } else
6123 bnx2_set_link(bp);
6124 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6125 bnx2_set_link(bp);
6126 }
6127 spin_unlock(&bp->phy_lock);
6128}
6129
6130static void
6131bnx2_5708_serdes_timer(struct bnx2 *bp)
6132{
6133 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6134 return;
6135
6136 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6137 bp->serdes_an_pending = 0;
6138 return;
6139 }
6140
6141 spin_lock(&bp->phy_lock);
6142 if (bp->serdes_an_pending)
6143 bp->serdes_an_pending--;
6144 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6145 u32 bmcr;
6146
6147 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6148 if (bmcr & BMCR_ANENABLE) {
6149 bnx2_enable_forced_2g5(bp);
6150 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6151 } else {
6152 bnx2_disable_forced_2g5(bp);
6153 bp->serdes_an_pending = 2;
6154 bp->current_interval = BNX2_TIMER_INTERVAL;
6155 }
6156
6157 } else
6158 bp->current_interval = BNX2_TIMER_INTERVAL;
6159
6160 spin_unlock(&bp->phy_lock);
6161}
6162
6163static void
6164bnx2_timer(struct timer_list *t)
6165{
6166 struct bnx2 *bp = from_timer(bp, t, timer);
6167
6168 if (!netif_running(bp->dev))
6169 return;
6170
6171 if (atomic_read(&bp->intr_sem) != 0)
6172 goto bnx2_restart_timer;
6173
6174 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6175 BNX2_FLAG_USING_MSI)
6176 bnx2_chk_missed_msi(bp);
6177
6178 bnx2_send_heart_beat(bp);
6179
6180 bp->stats_blk->stat_FwRxDrop =
6181 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6182
6183 /* workaround occasional corrupted counters */
6184 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6185 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6186 BNX2_HC_COMMAND_STATS_NOW);
6187
6188 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6189 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6190 bnx2_5706_serdes_timer(bp);
6191 else
6192 bnx2_5708_serdes_timer(bp);
6193 }
6194
6195bnx2_restart_timer:
6196 mod_timer(&bp->timer, jiffies + bp->current_interval);
6197}
6198
6199static int
6200bnx2_request_irq(struct bnx2 *bp)
6201{
6202 unsigned long flags;
6203 struct bnx2_irq *irq;
6204 int rc = 0, i;
6205
6206 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6207 flags = 0;
6208 else
6209 flags = IRQF_SHARED;
6210
6211 for (i = 0; i < bp->irq_nvecs; i++) {
6212 irq = &bp->irq_tbl[i];
6213 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6214 &bp->bnx2_napi[i]);
6215 if (rc)
6216 break;
6217 irq->requested = 1;
6218 }
6219 return rc;
6220}
6221
6222static void
6223__bnx2_free_irq(struct bnx2 *bp)
6224{
6225 struct bnx2_irq *irq;
6226 int i;
6227
6228 for (i = 0; i < bp->irq_nvecs; i++) {
6229 irq = &bp->irq_tbl[i];
6230 if (irq->requested)
6231 free_irq(irq->vector, &bp->bnx2_napi[i]);
6232 irq->requested = 0;
6233 }
6234}
6235
6236static void
6237bnx2_free_irq(struct bnx2 *bp)
6238{
6239
6240 __bnx2_free_irq(bp);
6241 if (bp->flags & BNX2_FLAG_USING_MSI)
6242 pci_disable_msi(bp->pdev);
6243 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6244 pci_disable_msix(bp->pdev);
6245
6246 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6247}
6248
6249static void
6250bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6251{
6252 int i, total_vecs;
6253 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6254 struct net_device *dev = bp->dev;
6255 const int len = sizeof(bp->irq_tbl[0].name);
6256
6257 bnx2_setup_msix_tbl(bp);
6258 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6259 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6260 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6261
6262 /* Need to flush the previous three writes to ensure MSI-X
6263 * is setup properly */
6264 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6265
6266 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6267 msix_ent[i].entry = i;
6268 msix_ent[i].vector = 0;
6269 }
6270
6271 total_vecs = msix_vecs;
6272#ifdef BCM_CNIC
6273 total_vecs++;
6274#endif
6275 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6276 BNX2_MIN_MSIX_VEC, total_vecs);
6277 if (total_vecs < 0)
6278 return;
6279
6280 msix_vecs = total_vecs;
6281#ifdef BCM_CNIC
6282 msix_vecs--;
6283#endif
6284 bp->irq_nvecs = msix_vecs;
6285 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6286 for (i = 0; i < total_vecs; i++) {
6287 bp->irq_tbl[i].vector = msix_ent[i].vector;
6288 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6289 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6290 }
6291}
6292
6293static int
6294bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6295{
6296 int cpus = netif_get_num_default_rss_queues();
6297 int msix_vecs;
6298
6299 if (!bp->num_req_rx_rings)
6300 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6301 else if (!bp->num_req_tx_rings)
6302 msix_vecs = max(cpus, bp->num_req_rx_rings);
6303 else
6304 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6305
6306 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6307
6308 bp->irq_tbl[0].handler = bnx2_interrupt;
6309 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6310 bp->irq_nvecs = 1;
6311 bp->irq_tbl[0].vector = bp->pdev->irq;
6312
6313 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6314 bnx2_enable_msix(bp, msix_vecs);
6315
6316 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6317 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6318 if (pci_enable_msi(bp->pdev) == 0) {
6319 bp->flags |= BNX2_FLAG_USING_MSI;
6320 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6321 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6322 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6323 } else
6324 bp->irq_tbl[0].handler = bnx2_msi;
6325
6326 bp->irq_tbl[0].vector = bp->pdev->irq;
6327 }
6328 }
6329
6330 if (!bp->num_req_tx_rings)
6331 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6332 else
6333 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6334
6335 if (!bp->num_req_rx_rings)
6336 bp->num_rx_rings = bp->irq_nvecs;
6337 else
6338 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6339
6340 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6341
6342 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6343}
6344
6345/* Called with rtnl_lock */
6346static int
6347bnx2_open(struct net_device *dev)
6348{
6349 struct bnx2 *bp = netdev_priv(dev);
6350 int rc;
6351
6352 rc = bnx2_request_firmware(bp);
6353 if (rc < 0)
6354 goto out;
6355
6356 netif_carrier_off(dev);
6357
6358 bnx2_disable_int(bp);
6359
6360 rc = bnx2_setup_int_mode(bp, disable_msi);
6361 if (rc)
6362 goto open_err;
6363 bnx2_init_napi(bp);
6364 bnx2_napi_enable(bp);
6365 rc = bnx2_alloc_mem(bp);
6366 if (rc)
6367 goto open_err;
6368
6369 rc = bnx2_request_irq(bp);
6370 if (rc)
6371 goto open_err;
6372
6373 rc = bnx2_init_nic(bp, 1);
6374 if (rc)
6375 goto open_err;
6376
6377 mod_timer(&bp->timer, jiffies + bp->current_interval);
6378
6379 atomic_set(&bp->intr_sem, 0);
6380
6381 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6382
6383 bnx2_enable_int(bp);
6384
6385 if (bp->flags & BNX2_FLAG_USING_MSI) {
6386 /* Test MSI to make sure it is working
6387 * If MSI test fails, go back to INTx mode
6388 */
6389 if (bnx2_test_intr(bp) != 0) {
6390 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6391
6392 bnx2_disable_int(bp);
6393 bnx2_free_irq(bp);
6394
6395 bnx2_setup_int_mode(bp, 1);
6396
6397 rc = bnx2_init_nic(bp, 0);
6398
6399 if (!rc)
6400 rc = bnx2_request_irq(bp);
6401
6402 if (rc) {
6403 del_timer_sync(&bp->timer);
6404 goto open_err;
6405 }
6406 bnx2_enable_int(bp);
6407 }
6408 }
6409 if (bp->flags & BNX2_FLAG_USING_MSI)
6410 netdev_info(dev, "using MSI\n");
6411 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6412 netdev_info(dev, "using MSIX\n");
6413
6414 netif_tx_start_all_queues(dev);
6415out:
6416 return rc;
6417
6418open_err:
6419 bnx2_napi_disable(bp);
6420 bnx2_free_skbs(bp);
6421 bnx2_free_irq(bp);
6422 bnx2_free_mem(bp);
6423 bnx2_del_napi(bp);
6424 bnx2_release_firmware(bp);
6425 goto out;
6426}
6427
6428static void
6429bnx2_reset_task(struct work_struct *work)
6430{
6431 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6432 int rc;
6433 u16 pcicmd;
6434
6435 rtnl_lock();
6436 if (!netif_running(bp->dev)) {
6437 rtnl_unlock();
6438 return;
6439 }
6440
6441 bnx2_netif_stop(bp, true);
6442
6443 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6444 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6445 /* in case PCI block has reset */
6446 pci_restore_state(bp->pdev);
6447 pci_save_state(bp->pdev);
6448 }
6449 rc = bnx2_init_nic(bp, 1);
6450 if (rc) {
6451 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6452 bnx2_napi_enable(bp);
6453 dev_close(bp->dev);
6454 rtnl_unlock();
6455 return;
6456 }
6457
6458 atomic_set(&bp->intr_sem, 1);
6459 bnx2_netif_start(bp, true);
6460 rtnl_unlock();
6461}
6462
6463#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6464
6465static void
6466bnx2_dump_ftq(struct bnx2 *bp)
6467{
6468 int i;
6469 u32 reg, bdidx, cid, valid;
6470 struct net_device *dev = bp->dev;
6471 static const struct ftq_reg {
6472 char *name;
6473 u32 off;
6474 } ftq_arr[] = {
6475 BNX2_FTQ_ENTRY(RV2P_P),
6476 BNX2_FTQ_ENTRY(RV2P_T),
6477 BNX2_FTQ_ENTRY(RV2P_M),
6478 BNX2_FTQ_ENTRY(TBDR_),
6479 BNX2_FTQ_ENTRY(TDMA_),
6480 BNX2_FTQ_ENTRY(TXP_),
6481 BNX2_FTQ_ENTRY(TXP_),
6482 BNX2_FTQ_ENTRY(TPAT_),
6483 BNX2_FTQ_ENTRY(RXP_C),
6484 BNX2_FTQ_ENTRY(RXP_),
6485 BNX2_FTQ_ENTRY(COM_COMXQ_),
6486 BNX2_FTQ_ENTRY(COM_COMTQ_),
6487 BNX2_FTQ_ENTRY(COM_COMQ_),
6488 BNX2_FTQ_ENTRY(CP_CPQ_),
6489 };
6490
6491 netdev_err(dev, "<--- start FTQ dump --->\n");
6492 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6493 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6494 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6495
6496 netdev_err(dev, "CPU states:\n");
6497 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6498 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6499 reg, bnx2_reg_rd_ind(bp, reg),
6500 bnx2_reg_rd_ind(bp, reg + 4),
6501 bnx2_reg_rd_ind(bp, reg + 8),
6502 bnx2_reg_rd_ind(bp, reg + 0x1c),
6503 bnx2_reg_rd_ind(bp, reg + 0x1c),
6504 bnx2_reg_rd_ind(bp, reg + 0x20));
6505
6506 netdev_err(dev, "<--- end FTQ dump --->\n");
6507 netdev_err(dev, "<--- start TBDC dump --->\n");
6508 netdev_err(dev, "TBDC free cnt: %ld\n",
6509 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6510 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6511 for (i = 0; i < 0x20; i++) {
6512 int j = 0;
6513
6514 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6515 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6516 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6517 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6518 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6519 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6520 j++;
6521
6522 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6523 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6524 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6525 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6526 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6527 bdidx >> 24, (valid >> 8) & 0x0ff);
6528 }
6529 netdev_err(dev, "<--- end TBDC dump --->\n");
6530}
6531
6532static void
6533bnx2_dump_state(struct bnx2 *bp)
6534{
6535 struct net_device *dev = bp->dev;
6536 u32 val1, val2;
6537
6538 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6539 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6540 atomic_read(&bp->intr_sem), val1);
6541 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6542 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6543 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6544 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6545 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6546 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6547 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6548 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6549 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6550 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6551 if (bp->flags & BNX2_FLAG_USING_MSIX)
6552 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6553 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6554}
6555
6556static void
6557bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6558{
6559 struct bnx2 *bp = netdev_priv(dev);
6560
6561 bnx2_dump_ftq(bp);
6562 bnx2_dump_state(bp);
6563 bnx2_dump_mcp_state(bp);
6564
6565 /* This allows the netif to be shutdown gracefully before resetting */
6566 schedule_work(&bp->reset_task);
6567}
6568
6569/* Called with netif_tx_lock.
6570 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6571 * netif_wake_queue().
6572 */
6573static netdev_tx_t
6574bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6575{
6576 struct bnx2 *bp = netdev_priv(dev);
6577 dma_addr_t mapping;
6578 struct bnx2_tx_bd *txbd;
6579 struct bnx2_sw_tx_bd *tx_buf;
6580 u32 len, vlan_tag_flags, last_frag, mss;
6581 u16 prod, ring_prod;
6582 int i;
6583 struct bnx2_napi *bnapi;
6584 struct bnx2_tx_ring_info *txr;
6585 struct netdev_queue *txq;
6586
6587 /* Determine which tx ring we will be placed on */
6588 i = skb_get_queue_mapping(skb);
6589 bnapi = &bp->bnx2_napi[i];
6590 txr = &bnapi->tx_ring;
6591 txq = netdev_get_tx_queue(dev, i);
6592
6593 if (unlikely(bnx2_tx_avail(bp, txr) <
6594 (skb_shinfo(skb)->nr_frags + 1))) {
6595 netif_tx_stop_queue(txq);
6596 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6597
6598 return NETDEV_TX_BUSY;
6599 }
6600 len = skb_headlen(skb);
6601 prod = txr->tx_prod;
6602 ring_prod = BNX2_TX_RING_IDX(prod);
6603
6604 vlan_tag_flags = 0;
6605 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6606 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6607 }
6608
6609 if (skb_vlan_tag_present(skb)) {
6610 vlan_tag_flags |=
6611 (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6612 }
6613
6614 if ((mss = skb_shinfo(skb)->gso_size)) {
6615 u32 tcp_opt_len;
6616 struct iphdr *iph;
6617
6618 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6619
6620 tcp_opt_len = tcp_optlen(skb);
6621
6622 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6623 u32 tcp_off = skb_transport_offset(skb) -
6624 sizeof(struct ipv6hdr) - ETH_HLEN;
6625
6626 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6627 TX_BD_FLAGS_SW_FLAGS;
6628 if (likely(tcp_off == 0))
6629 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6630 else {
6631 tcp_off >>= 3;
6632 vlan_tag_flags |= ((tcp_off & 0x3) <<
6633 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6634 ((tcp_off & 0x10) <<
6635 TX_BD_FLAGS_TCP6_OFF4_SHL);
6636 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6637 }
6638 } else {
6639 iph = ip_hdr(skb);
6640 if (tcp_opt_len || (iph->ihl > 5)) {
6641 vlan_tag_flags |= ((iph->ihl - 5) +
6642 (tcp_opt_len >> 2)) << 8;
6643 }
6644 }
6645 } else
6646 mss = 0;
6647
6648 mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
6649 DMA_TO_DEVICE);
6650 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6651 dev_kfree_skb_any(skb);
6652 return NETDEV_TX_OK;
6653 }
6654
6655 tx_buf = &txr->tx_buf_ring[ring_prod];
6656 tx_buf->skb = skb;
6657 dma_unmap_addr_set(tx_buf, mapping, mapping);
6658
6659 txbd = &txr->tx_desc_ring[ring_prod];
6660
6661 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6662 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6663 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6664 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6665
6666 last_frag = skb_shinfo(skb)->nr_frags;
6667 tx_buf->nr_frags = last_frag;
6668 tx_buf->is_gso = skb_is_gso(skb);
6669
6670 for (i = 0; i < last_frag; i++) {
6671 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6672
6673 prod = BNX2_NEXT_TX_BD(prod);
6674 ring_prod = BNX2_TX_RING_IDX(prod);
6675 txbd = &txr->tx_desc_ring[ring_prod];
6676
6677 len = skb_frag_size(frag);
6678 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6679 DMA_TO_DEVICE);
6680 if (dma_mapping_error(&bp->pdev->dev, mapping))
6681 goto dma_error;
6682 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6683 mapping);
6684
6685 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6686 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6687 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6688 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6689
6690 }
6691 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6692
6693 /* Sync BD data before updating TX mailbox */
6694 wmb();
6695
6696 netdev_tx_sent_queue(txq, skb->len);
6697
6698 prod = BNX2_NEXT_TX_BD(prod);
6699 txr->tx_prod_bseq += skb->len;
6700
6701 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6702 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6703
6704 txr->tx_prod = prod;
6705
6706 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6707 netif_tx_stop_queue(txq);
6708
6709 /* netif_tx_stop_queue() must be done before checking
6710 * tx index in bnx2_tx_avail() below, because in
6711 * bnx2_tx_int(), we update tx index before checking for
6712 * netif_tx_queue_stopped().
6713 */
6714 smp_mb();
6715 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6716 netif_tx_wake_queue(txq);
6717 }
6718
6719 return NETDEV_TX_OK;
6720dma_error:
6721 /* save value of frag that failed */
6722 last_frag = i;
6723
6724 /* start back at beginning and unmap skb */
6725 prod = txr->tx_prod;
6726 ring_prod = BNX2_TX_RING_IDX(prod);
6727 tx_buf = &txr->tx_buf_ring[ring_prod];
6728 tx_buf->skb = NULL;
6729 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6730 skb_headlen(skb), DMA_TO_DEVICE);
6731
6732 /* unmap remaining mapped pages */
6733 for (i = 0; i < last_frag; i++) {
6734 prod = BNX2_NEXT_TX_BD(prod);
6735 ring_prod = BNX2_TX_RING_IDX(prod);
6736 tx_buf = &txr->tx_buf_ring[ring_prod];
6737 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6738 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6739 DMA_TO_DEVICE);
6740 }
6741
6742 dev_kfree_skb_any(skb);
6743 return NETDEV_TX_OK;
6744}
6745
6746/* Called with rtnl_lock */
6747static int
6748bnx2_close(struct net_device *dev)
6749{
6750 struct bnx2 *bp = netdev_priv(dev);
6751
6752 bnx2_disable_int_sync(bp);
6753 bnx2_napi_disable(bp);
6754 netif_tx_disable(dev);
6755 del_timer_sync(&bp->timer);
6756 bnx2_shutdown_chip(bp);
6757 bnx2_free_irq(bp);
6758 bnx2_free_skbs(bp);
6759 bnx2_free_mem(bp);
6760 bnx2_del_napi(bp);
6761 bp->link_up = 0;
6762 netif_carrier_off(bp->dev);
6763 return 0;
6764}
6765
6766static void
6767bnx2_save_stats(struct bnx2 *bp)
6768{
6769 u32 *hw_stats = (u32 *) bp->stats_blk;
6770 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6771 int i;
6772
6773 /* The 1st 10 counters are 64-bit counters */
6774 for (i = 0; i < 20; i += 2) {
6775 u32 hi;
6776 u64 lo;
6777
6778 hi = temp_stats[i] + hw_stats[i];
6779 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6780 if (lo > 0xffffffff)
6781 hi++;
6782 temp_stats[i] = hi;
6783 temp_stats[i + 1] = lo & 0xffffffff;
6784 }
6785
6786 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6787 temp_stats[i] += hw_stats[i];
6788}
6789
6790#define GET_64BIT_NET_STATS64(ctr) \
6791 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6792
6793#define GET_64BIT_NET_STATS(ctr) \
6794 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6795 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6796
6797#define GET_32BIT_NET_STATS(ctr) \
6798 (unsigned long) (bp->stats_blk->ctr + \
6799 bp->temp_stats_blk->ctr)
6800
6801static void
6802bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6803{
6804 struct bnx2 *bp = netdev_priv(dev);
6805
6806 if (!bp->stats_blk)
6807 return;
6808
6809 net_stats->rx_packets =
6810 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6811 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6812 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6813
6814 net_stats->tx_packets =
6815 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6816 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6817 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6818
6819 net_stats->rx_bytes =
6820 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6821
6822 net_stats->tx_bytes =
6823 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6824
6825 net_stats->multicast =
6826 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6827
6828 net_stats->collisions =
6829 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6830
6831 net_stats->rx_length_errors =
6832 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6833 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6834
6835 net_stats->rx_over_errors =
6836 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6837 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6838
6839 net_stats->rx_frame_errors =
6840 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6841
6842 net_stats->rx_crc_errors =
6843 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6844
6845 net_stats->rx_errors = net_stats->rx_length_errors +
6846 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6847 net_stats->rx_crc_errors;
6848
6849 net_stats->tx_aborted_errors =
6850 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6851 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6852
6853 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6854 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6855 net_stats->tx_carrier_errors = 0;
6856 else {
6857 net_stats->tx_carrier_errors =
6858 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6859 }
6860
6861 net_stats->tx_errors =
6862 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6863 net_stats->tx_aborted_errors +
6864 net_stats->tx_carrier_errors;
6865
6866 net_stats->rx_missed_errors =
6867 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6868 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6869 GET_32BIT_NET_STATS(stat_FwRxDrop);
6870
6871}
6872
6873/* All ethtool functions called with rtnl_lock */
6874
6875static int
6876bnx2_get_link_ksettings(struct net_device *dev,
6877 struct ethtool_link_ksettings *cmd)
6878{
6879 struct bnx2 *bp = netdev_priv(dev);
6880 int support_serdes = 0, support_copper = 0;
6881 u32 supported, advertising;
6882
6883 supported = SUPPORTED_Autoneg;
6884 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6885 support_serdes = 1;
6886 support_copper = 1;
6887 } else if (bp->phy_port == PORT_FIBRE)
6888 support_serdes = 1;
6889 else
6890 support_copper = 1;
6891
6892 if (support_serdes) {
6893 supported |= SUPPORTED_1000baseT_Full |
6894 SUPPORTED_FIBRE;
6895 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6896 supported |= SUPPORTED_2500baseX_Full;
6897 }
6898 if (support_copper) {
6899 supported |= SUPPORTED_10baseT_Half |
6900 SUPPORTED_10baseT_Full |
6901 SUPPORTED_100baseT_Half |
6902 SUPPORTED_100baseT_Full |
6903 SUPPORTED_1000baseT_Full |
6904 SUPPORTED_TP;
6905 }
6906
6907 spin_lock_bh(&bp->phy_lock);
6908 cmd->base.port = bp->phy_port;
6909 advertising = bp->advertising;
6910
6911 if (bp->autoneg & AUTONEG_SPEED) {
6912 cmd->base.autoneg = AUTONEG_ENABLE;
6913 } else {
6914 cmd->base.autoneg = AUTONEG_DISABLE;
6915 }
6916
6917 if (netif_carrier_ok(dev)) {
6918 cmd->base.speed = bp->line_speed;
6919 cmd->base.duplex = bp->duplex;
6920 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6921 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6922 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6923 else
6924 cmd->base.eth_tp_mdix = ETH_TP_MDI;
6925 }
6926 }
6927 else {
6928 cmd->base.speed = SPEED_UNKNOWN;
6929 cmd->base.duplex = DUPLEX_UNKNOWN;
6930 }
6931 spin_unlock_bh(&bp->phy_lock);
6932
6933 cmd->base.phy_address = bp->phy_addr;
6934
6935 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6936 supported);
6937 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6938 advertising);
6939
6940 return 0;
6941}
6942
6943static int
6944bnx2_set_link_ksettings(struct net_device *dev,
6945 const struct ethtool_link_ksettings *cmd)
6946{
6947 struct bnx2 *bp = netdev_priv(dev);
6948 u8 autoneg = bp->autoneg;
6949 u8 req_duplex = bp->req_duplex;
6950 u16 req_line_speed = bp->req_line_speed;
6951 u32 advertising = bp->advertising;
6952 int err = -EINVAL;
6953
6954 spin_lock_bh(&bp->phy_lock);
6955
6956 if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6957 goto err_out_unlock;
6958
6959 if (cmd->base.port != bp->phy_port &&
6960 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6961 goto err_out_unlock;
6962
6963 /* If device is down, we can store the settings only if the user
6964 * is setting the currently active port.
6965 */
6966 if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6967 goto err_out_unlock;
6968
6969 if (cmd->base.autoneg == AUTONEG_ENABLE) {
6970 autoneg |= AUTONEG_SPEED;
6971
6972 ethtool_convert_link_mode_to_legacy_u32(
6973 &advertising, cmd->link_modes.advertising);
6974
6975 if (cmd->base.port == PORT_TP) {
6976 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6977 if (!advertising)
6978 advertising = ETHTOOL_ALL_COPPER_SPEED;
6979 } else {
6980 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6981 if (!advertising)
6982 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6983 }
6984 advertising |= ADVERTISED_Autoneg;
6985 }
6986 else {
6987 u32 speed = cmd->base.speed;
6988
6989 if (cmd->base.port == PORT_FIBRE) {
6990 if ((speed != SPEED_1000 &&
6991 speed != SPEED_2500) ||
6992 (cmd->base.duplex != DUPLEX_FULL))
6993 goto err_out_unlock;
6994
6995 if (speed == SPEED_2500 &&
6996 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6997 goto err_out_unlock;
6998 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6999 goto err_out_unlock;
7000
7001 autoneg &= ~AUTONEG_SPEED;
7002 req_line_speed = speed;
7003 req_duplex = cmd->base.duplex;
7004 advertising = 0;
7005 }
7006
7007 bp->autoneg = autoneg;
7008 bp->advertising = advertising;
7009 bp->req_line_speed = req_line_speed;
7010 bp->req_duplex = req_duplex;
7011
7012 err = 0;
7013 /* If device is down, the new settings will be picked up when it is
7014 * brought up.
7015 */
7016 if (netif_running(dev))
7017 err = bnx2_setup_phy(bp, cmd->base.port);
7018
7019err_out_unlock:
7020 spin_unlock_bh(&bp->phy_lock);
7021
7022 return err;
7023}
7024
7025static void
7026bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7027{
7028 struct bnx2 *bp = netdev_priv(dev);
7029
7030 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7031 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7032 strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7033}
7034
7035#define BNX2_REGDUMP_LEN (32 * 1024)
7036
7037static int
7038bnx2_get_regs_len(struct net_device *dev)
7039{
7040 return BNX2_REGDUMP_LEN;
7041}
7042
7043static void
7044bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7045{
7046 u32 *p = _p, i, offset;
7047 u8 *orig_p = _p;
7048 struct bnx2 *bp = netdev_priv(dev);
7049 static const u32 reg_boundaries[] = {
7050 0x0000, 0x0098, 0x0400, 0x045c,
7051 0x0800, 0x0880, 0x0c00, 0x0c10,
7052 0x0c30, 0x0d08, 0x1000, 0x101c,
7053 0x1040, 0x1048, 0x1080, 0x10a4,
7054 0x1400, 0x1490, 0x1498, 0x14f0,
7055 0x1500, 0x155c, 0x1580, 0x15dc,
7056 0x1600, 0x1658, 0x1680, 0x16d8,
7057 0x1800, 0x1820, 0x1840, 0x1854,
7058 0x1880, 0x1894, 0x1900, 0x1984,
7059 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7060 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7061 0x2000, 0x2030, 0x23c0, 0x2400,
7062 0x2800, 0x2820, 0x2830, 0x2850,
7063 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7064 0x3c00, 0x3c94, 0x4000, 0x4010,
7065 0x4080, 0x4090, 0x43c0, 0x4458,
7066 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7067 0x4fc0, 0x5010, 0x53c0, 0x5444,
7068 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7069 0x5fc0, 0x6000, 0x6400, 0x6428,
7070 0x6800, 0x6848, 0x684c, 0x6860,
7071 0x6888, 0x6910, 0x8000
7072 };
7073
7074 regs->version = 0;
7075
7076 memset(p, 0, BNX2_REGDUMP_LEN);
7077
7078 if (!netif_running(bp->dev))
7079 return;
7080
7081 i = 0;
7082 offset = reg_boundaries[0];
7083 p += offset;
7084 while (offset < BNX2_REGDUMP_LEN) {
7085 *p++ = BNX2_RD(bp, offset);
7086 offset += 4;
7087 if (offset == reg_boundaries[i + 1]) {
7088 offset = reg_boundaries[i + 2];
7089 p = (u32 *) (orig_p + offset);
7090 i += 2;
7091 }
7092 }
7093}
7094
7095static void
7096bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7097{
7098 struct bnx2 *bp = netdev_priv(dev);
7099
7100 if (bp->flags & BNX2_FLAG_NO_WOL) {
7101 wol->supported = 0;
7102 wol->wolopts = 0;
7103 }
7104 else {
7105 wol->supported = WAKE_MAGIC;
7106 if (bp->wol)
7107 wol->wolopts = WAKE_MAGIC;
7108 else
7109 wol->wolopts = 0;
7110 }
7111 memset(&wol->sopass, 0, sizeof(wol->sopass));
7112}
7113
7114static int
7115bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7116{
7117 struct bnx2 *bp = netdev_priv(dev);
7118
7119 if (wol->wolopts & ~WAKE_MAGIC)
7120 return -EINVAL;
7121
7122 if (wol->wolopts & WAKE_MAGIC) {
7123 if (bp->flags & BNX2_FLAG_NO_WOL)
7124 return -EINVAL;
7125
7126 bp->wol = 1;
7127 }
7128 else {
7129 bp->wol = 0;
7130 }
7131
7132 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7133
7134 return 0;
7135}
7136
7137static int
7138bnx2_nway_reset(struct net_device *dev)
7139{
7140 struct bnx2 *bp = netdev_priv(dev);
7141 u32 bmcr;
7142
7143 if (!netif_running(dev))
7144 return -EAGAIN;
7145
7146 if (!(bp->autoneg & AUTONEG_SPEED)) {
7147 return -EINVAL;
7148 }
7149
7150 spin_lock_bh(&bp->phy_lock);
7151
7152 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7153 int rc;
7154
7155 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7156 spin_unlock_bh(&bp->phy_lock);
7157 return rc;
7158 }
7159
7160 /* Force a link down visible on the other side */
7161 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7162 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7163 spin_unlock_bh(&bp->phy_lock);
7164
7165 msleep(20);
7166
7167 spin_lock_bh(&bp->phy_lock);
7168
7169 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7170 bp->serdes_an_pending = 1;
7171 mod_timer(&bp->timer, jiffies + bp->current_interval);
7172 }
7173
7174 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7175 bmcr &= ~BMCR_LOOPBACK;
7176 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7177
7178 spin_unlock_bh(&bp->phy_lock);
7179
7180 return 0;
7181}
7182
7183static u32
7184bnx2_get_link(struct net_device *dev)
7185{
7186 struct bnx2 *bp = netdev_priv(dev);
7187
7188 return bp->link_up;
7189}
7190
7191static int
7192bnx2_get_eeprom_len(struct net_device *dev)
7193{
7194 struct bnx2 *bp = netdev_priv(dev);
7195
7196 if (!bp->flash_info)
7197 return 0;
7198
7199 return (int) bp->flash_size;
7200}
7201
7202static int
7203bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7204 u8 *eebuf)
7205{
7206 struct bnx2 *bp = netdev_priv(dev);
7207 int rc;
7208
7209 /* parameters already validated in ethtool_get_eeprom */
7210
7211 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7212
7213 return rc;
7214}
7215
7216static int
7217bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7218 u8 *eebuf)
7219{
7220 struct bnx2 *bp = netdev_priv(dev);
7221 int rc;
7222
7223 /* parameters already validated in ethtool_set_eeprom */
7224
7225 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7226
7227 return rc;
7228}
7229
7230static int bnx2_get_coalesce(struct net_device *dev,
7231 struct ethtool_coalesce *coal,
7232 struct kernel_ethtool_coalesce *kernel_coal,
7233 struct netlink_ext_ack *extack)
7234{
7235 struct bnx2 *bp = netdev_priv(dev);
7236
7237 memset(coal, 0, sizeof(struct ethtool_coalesce));
7238
7239 coal->rx_coalesce_usecs = bp->rx_ticks;
7240 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7241 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7242 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7243
7244 coal->tx_coalesce_usecs = bp->tx_ticks;
7245 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7246 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7247 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7248
7249 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7250
7251 return 0;
7252}
7253
7254static int bnx2_set_coalesce(struct net_device *dev,
7255 struct ethtool_coalesce *coal,
7256 struct kernel_ethtool_coalesce *kernel_coal,
7257 struct netlink_ext_ack *extack)
7258{
7259 struct bnx2 *bp = netdev_priv(dev);
7260
7261 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7262 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7263
7264 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7265 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7266
7267 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7268 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7269
7270 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7271 if (bp->rx_quick_cons_trip_int > 0xff)
7272 bp->rx_quick_cons_trip_int = 0xff;
7273
7274 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7275 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7276
7277 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7278 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7279
7280 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7281 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7282
7283 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7284 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7285 0xff;
7286
7287 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7288 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7289 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7290 bp->stats_ticks = USEC_PER_SEC;
7291 }
7292 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7293 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7294 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7295
7296 if (netif_running(bp->dev)) {
7297 bnx2_netif_stop(bp, true);
7298 bnx2_init_nic(bp, 0);
7299 bnx2_netif_start(bp, true);
7300 }
7301
7302 return 0;
7303}
7304
7305static void
7306bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7307 struct kernel_ethtool_ringparam *kernel_ering,
7308 struct netlink_ext_ack *extack)
7309{
7310 struct bnx2 *bp = netdev_priv(dev);
7311
7312 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7313 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7314
7315 ering->rx_pending = bp->rx_ring_size;
7316 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7317
7318 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7319 ering->tx_pending = bp->tx_ring_size;
7320}
7321
7322static int
7323bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7324{
7325 if (netif_running(bp->dev)) {
7326 /* Reset will erase chipset stats; save them */
7327 bnx2_save_stats(bp);
7328
7329 bnx2_netif_stop(bp, true);
7330 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7331 if (reset_irq) {
7332 bnx2_free_irq(bp);
7333 bnx2_del_napi(bp);
7334 } else {
7335 __bnx2_free_irq(bp);
7336 }
7337 bnx2_free_skbs(bp);
7338 bnx2_free_mem(bp);
7339 }
7340
7341 bnx2_set_rx_ring_size(bp, rx);
7342 bp->tx_ring_size = tx;
7343
7344 if (netif_running(bp->dev)) {
7345 int rc = 0;
7346
7347 if (reset_irq) {
7348 rc = bnx2_setup_int_mode(bp, disable_msi);
7349 bnx2_init_napi(bp);
7350 }
7351
7352 if (!rc)
7353 rc = bnx2_alloc_mem(bp);
7354
7355 if (!rc)
7356 rc = bnx2_request_irq(bp);
7357
7358 if (!rc)
7359 rc = bnx2_init_nic(bp, 0);
7360
7361 if (rc) {
7362 bnx2_napi_enable(bp);
7363 dev_close(bp->dev);
7364 return rc;
7365 }
7366#ifdef BCM_CNIC
7367 mutex_lock(&bp->cnic_lock);
7368 /* Let cnic know about the new status block. */
7369 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7370 bnx2_setup_cnic_irq_info(bp);
7371 mutex_unlock(&bp->cnic_lock);
7372#endif
7373 bnx2_netif_start(bp, true);
7374 }
7375 return 0;
7376}
7377
7378static int
7379bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7380 struct kernel_ethtool_ringparam *kernel_ering,
7381 struct netlink_ext_ack *extack)
7382{
7383 struct bnx2 *bp = netdev_priv(dev);
7384 int rc;
7385
7386 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7387 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7388 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7389
7390 return -EINVAL;
7391 }
7392 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7393 false);
7394 return rc;
7395}
7396
7397static void
7398bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7399{
7400 struct bnx2 *bp = netdev_priv(dev);
7401
7402 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7403 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7404 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7405}
7406
7407static int
7408bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7409{
7410 struct bnx2 *bp = netdev_priv(dev);
7411
7412 bp->req_flow_ctrl = 0;
7413 if (epause->rx_pause)
7414 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7415 if (epause->tx_pause)
7416 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7417
7418 if (epause->autoneg) {
7419 bp->autoneg |= AUTONEG_FLOW_CTRL;
7420 }
7421 else {
7422 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7423 }
7424
7425 if (netif_running(dev)) {
7426 spin_lock_bh(&bp->phy_lock);
7427 bnx2_setup_phy(bp, bp->phy_port);
7428 spin_unlock_bh(&bp->phy_lock);
7429 }
7430
7431 return 0;
7432}
7433
7434static struct {
7435 char string[ETH_GSTRING_LEN];
7436} bnx2_stats_str_arr[] = {
7437 { "rx_bytes" },
7438 { "rx_error_bytes" },
7439 { "tx_bytes" },
7440 { "tx_error_bytes" },
7441 { "rx_ucast_packets" },
7442 { "rx_mcast_packets" },
7443 { "rx_bcast_packets" },
7444 { "tx_ucast_packets" },
7445 { "tx_mcast_packets" },
7446 { "tx_bcast_packets" },
7447 { "tx_mac_errors" },
7448 { "tx_carrier_errors" },
7449 { "rx_crc_errors" },
7450 { "rx_align_errors" },
7451 { "tx_single_collisions" },
7452 { "tx_multi_collisions" },
7453 { "tx_deferred" },
7454 { "tx_excess_collisions" },
7455 { "tx_late_collisions" },
7456 { "tx_total_collisions" },
7457 { "rx_fragments" },
7458 { "rx_jabbers" },
7459 { "rx_undersize_packets" },
7460 { "rx_oversize_packets" },
7461 { "rx_64_byte_packets" },
7462 { "rx_65_to_127_byte_packets" },
7463 { "rx_128_to_255_byte_packets" },
7464 { "rx_256_to_511_byte_packets" },
7465 { "rx_512_to_1023_byte_packets" },
7466 { "rx_1024_to_1522_byte_packets" },
7467 { "rx_1523_to_9022_byte_packets" },
7468 { "tx_64_byte_packets" },
7469 { "tx_65_to_127_byte_packets" },
7470 { "tx_128_to_255_byte_packets" },
7471 { "tx_256_to_511_byte_packets" },
7472 { "tx_512_to_1023_byte_packets" },
7473 { "tx_1024_to_1522_byte_packets" },
7474 { "tx_1523_to_9022_byte_packets" },
7475 { "rx_xon_frames" },
7476 { "rx_xoff_frames" },
7477 { "tx_xon_frames" },
7478 { "tx_xoff_frames" },
7479 { "rx_mac_ctrl_frames" },
7480 { "rx_filtered_packets" },
7481 { "rx_ftq_discards" },
7482 { "rx_discards" },
7483 { "rx_fw_discards" },
7484};
7485
7486#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7487
7488#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7489
7490static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7491 STATS_OFFSET32(stat_IfHCInOctets_hi),
7492 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7493 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7494 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7495 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7496 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7497 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7498 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7499 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7500 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7501 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7502 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7503 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7504 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7505 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7506 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7507 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7508 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7509 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7510 STATS_OFFSET32(stat_EtherStatsCollisions),
7511 STATS_OFFSET32(stat_EtherStatsFragments),
7512 STATS_OFFSET32(stat_EtherStatsJabbers),
7513 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7514 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7515 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7516 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7517 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7518 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7519 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7520 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7521 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7522 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7523 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7524 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7525 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7526 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7527 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7528 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7529 STATS_OFFSET32(stat_XonPauseFramesReceived),
7530 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7531 STATS_OFFSET32(stat_OutXonSent),
7532 STATS_OFFSET32(stat_OutXoffSent),
7533 STATS_OFFSET32(stat_MacControlFramesReceived),
7534 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7535 STATS_OFFSET32(stat_IfInFTQDiscards),
7536 STATS_OFFSET32(stat_IfInMBUFDiscards),
7537 STATS_OFFSET32(stat_FwRxDrop),
7538};
7539
7540/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7541 * skipped because of errata.
7542 */
7543static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7544 8,0,8,8,8,8,8,8,8,8,
7545 4,0,4,4,4,4,4,4,4,4,
7546 4,4,4,4,4,4,4,4,4,4,
7547 4,4,4,4,4,4,4,4,4,4,
7548 4,4,4,4,4,4,4,
7549};
7550
7551static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7552 8,0,8,8,8,8,8,8,8,8,
7553 4,4,4,4,4,4,4,4,4,4,
7554 4,4,4,4,4,4,4,4,4,4,
7555 4,4,4,4,4,4,4,4,4,4,
7556 4,4,4,4,4,4,4,
7557};
7558
7559#define BNX2_NUM_TESTS 6
7560
7561static struct {
7562 char string[ETH_GSTRING_LEN];
7563} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7564 { "register_test (offline)" },
7565 { "memory_test (offline)" },
7566 { "loopback_test (offline)" },
7567 { "nvram_test (online)" },
7568 { "interrupt_test (online)" },
7569 { "link_test (online)" },
7570};
7571
7572static int
7573bnx2_get_sset_count(struct net_device *dev, int sset)
7574{
7575 switch (sset) {
7576 case ETH_SS_TEST:
7577 return BNX2_NUM_TESTS;
7578 case ETH_SS_STATS:
7579 return BNX2_NUM_STATS;
7580 default:
7581 return -EOPNOTSUPP;
7582 }
7583}
7584
7585static void
7586bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7587{
7588 struct bnx2 *bp = netdev_priv(dev);
7589
7590 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7591 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7592 int i;
7593
7594 bnx2_netif_stop(bp, true);
7595 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7596 bnx2_free_skbs(bp);
7597
7598 if (bnx2_test_registers(bp) != 0) {
7599 buf[0] = 1;
7600 etest->flags |= ETH_TEST_FL_FAILED;
7601 }
7602 if (bnx2_test_memory(bp) != 0) {
7603 buf[1] = 1;
7604 etest->flags |= ETH_TEST_FL_FAILED;
7605 }
7606 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7607 etest->flags |= ETH_TEST_FL_FAILED;
7608
7609 if (!netif_running(bp->dev))
7610 bnx2_shutdown_chip(bp);
7611 else {
7612 bnx2_init_nic(bp, 1);
7613 bnx2_netif_start(bp, true);
7614 }
7615
7616 /* wait for link up */
7617 for (i = 0; i < 7; i++) {
7618 if (bp->link_up)
7619 break;
7620 msleep_interruptible(1000);
7621 }
7622 }
7623
7624 if (bnx2_test_nvram(bp) != 0) {
7625 buf[3] = 1;
7626 etest->flags |= ETH_TEST_FL_FAILED;
7627 }
7628 if (bnx2_test_intr(bp) != 0) {
7629 buf[4] = 1;
7630 etest->flags |= ETH_TEST_FL_FAILED;
7631 }
7632
7633 if (bnx2_test_link(bp) != 0) {
7634 buf[5] = 1;
7635 etest->flags |= ETH_TEST_FL_FAILED;
7636
7637 }
7638}
7639
7640static void
7641bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7642{
7643 switch (stringset) {
7644 case ETH_SS_STATS:
7645 memcpy(buf, bnx2_stats_str_arr,
7646 sizeof(bnx2_stats_str_arr));
7647 break;
7648 case ETH_SS_TEST:
7649 memcpy(buf, bnx2_tests_str_arr,
7650 sizeof(bnx2_tests_str_arr));
7651 break;
7652 }
7653}
7654
7655static void
7656bnx2_get_ethtool_stats(struct net_device *dev,
7657 struct ethtool_stats *stats, u64 *buf)
7658{
7659 struct bnx2 *bp = netdev_priv(dev);
7660 int i;
7661 u32 *hw_stats = (u32 *) bp->stats_blk;
7662 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7663 u8 *stats_len_arr = NULL;
7664
7665 if (!hw_stats) {
7666 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7667 return;
7668 }
7669
7670 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7671 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7672 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7673 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7674 stats_len_arr = bnx2_5706_stats_len_arr;
7675 else
7676 stats_len_arr = bnx2_5708_stats_len_arr;
7677
7678 for (i = 0; i < BNX2_NUM_STATS; i++) {
7679 unsigned long offset;
7680
7681 if (stats_len_arr[i] == 0) {
7682 /* skip this counter */
7683 buf[i] = 0;
7684 continue;
7685 }
7686
7687 offset = bnx2_stats_offset_arr[i];
7688 if (stats_len_arr[i] == 4) {
7689 /* 4-byte counter */
7690 buf[i] = (u64) *(hw_stats + offset) +
7691 *(temp_stats + offset);
7692 continue;
7693 }
7694 /* 8-byte counter */
7695 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7696 *(hw_stats + offset + 1) +
7697 (((u64) *(temp_stats + offset)) << 32) +
7698 *(temp_stats + offset + 1);
7699 }
7700}
7701
7702static int
7703bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7704{
7705 struct bnx2 *bp = netdev_priv(dev);
7706
7707 switch (state) {
7708 case ETHTOOL_ID_ACTIVE:
7709 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7710 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7711 return 1; /* cycle on/off once per second */
7712
7713 case ETHTOOL_ID_ON:
7714 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7715 BNX2_EMAC_LED_1000MB_OVERRIDE |
7716 BNX2_EMAC_LED_100MB_OVERRIDE |
7717 BNX2_EMAC_LED_10MB_OVERRIDE |
7718 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7719 BNX2_EMAC_LED_TRAFFIC);
7720 break;
7721
7722 case ETHTOOL_ID_OFF:
7723 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7724 break;
7725
7726 case ETHTOOL_ID_INACTIVE:
7727 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7728 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7729 break;
7730 }
7731
7732 return 0;
7733}
7734
7735static int
7736bnx2_set_features(struct net_device *dev, netdev_features_t features)
7737{
7738 struct bnx2 *bp = netdev_priv(dev);
7739
7740 /* TSO with VLAN tag won't work with current firmware */
7741 if (features & NETIF_F_HW_VLAN_CTAG_TX)
7742 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7743 else
7744 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7745
7746 if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7747 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7748 netif_running(dev)) {
7749 bnx2_netif_stop(bp, false);
7750 dev->features = features;
7751 bnx2_set_rx_mode(dev);
7752 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7753 bnx2_netif_start(bp, false);
7754 return 1;
7755 }
7756
7757 return 0;
7758}
7759
7760static void bnx2_get_channels(struct net_device *dev,
7761 struct ethtool_channels *channels)
7762{
7763 struct bnx2 *bp = netdev_priv(dev);
7764 u32 max_rx_rings = 1;
7765 u32 max_tx_rings = 1;
7766
7767 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7768 max_rx_rings = RX_MAX_RINGS;
7769 max_tx_rings = TX_MAX_RINGS;
7770 }
7771
7772 channels->max_rx = max_rx_rings;
7773 channels->max_tx = max_tx_rings;
7774 channels->max_other = 0;
7775 channels->max_combined = 0;
7776 channels->rx_count = bp->num_rx_rings;
7777 channels->tx_count = bp->num_tx_rings;
7778 channels->other_count = 0;
7779 channels->combined_count = 0;
7780}
7781
7782static int bnx2_set_channels(struct net_device *dev,
7783 struct ethtool_channels *channels)
7784{
7785 struct bnx2 *bp = netdev_priv(dev);
7786 u32 max_rx_rings = 1;
7787 u32 max_tx_rings = 1;
7788 int rc = 0;
7789
7790 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7791 max_rx_rings = RX_MAX_RINGS;
7792 max_tx_rings = TX_MAX_RINGS;
7793 }
7794 if (channels->rx_count > max_rx_rings ||
7795 channels->tx_count > max_tx_rings)
7796 return -EINVAL;
7797
7798 bp->num_req_rx_rings = channels->rx_count;
7799 bp->num_req_tx_rings = channels->tx_count;
7800
7801 if (netif_running(dev))
7802 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7803 bp->tx_ring_size, true);
7804
7805 return rc;
7806}
7807
7808static const struct ethtool_ops bnx2_ethtool_ops = {
7809 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7810 ETHTOOL_COALESCE_MAX_FRAMES |
7811 ETHTOOL_COALESCE_USECS_IRQ |
7812 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7813 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7814 .get_drvinfo = bnx2_get_drvinfo,
7815 .get_regs_len = bnx2_get_regs_len,
7816 .get_regs = bnx2_get_regs,
7817 .get_wol = bnx2_get_wol,
7818 .set_wol = bnx2_set_wol,
7819 .nway_reset = bnx2_nway_reset,
7820 .get_link = bnx2_get_link,
7821 .get_eeprom_len = bnx2_get_eeprom_len,
7822 .get_eeprom = bnx2_get_eeprom,
7823 .set_eeprom = bnx2_set_eeprom,
7824 .get_coalesce = bnx2_get_coalesce,
7825 .set_coalesce = bnx2_set_coalesce,
7826 .get_ringparam = bnx2_get_ringparam,
7827 .set_ringparam = bnx2_set_ringparam,
7828 .get_pauseparam = bnx2_get_pauseparam,
7829 .set_pauseparam = bnx2_set_pauseparam,
7830 .self_test = bnx2_self_test,
7831 .get_strings = bnx2_get_strings,
7832 .set_phys_id = bnx2_set_phys_id,
7833 .get_ethtool_stats = bnx2_get_ethtool_stats,
7834 .get_sset_count = bnx2_get_sset_count,
7835 .get_channels = bnx2_get_channels,
7836 .set_channels = bnx2_set_channels,
7837 .get_link_ksettings = bnx2_get_link_ksettings,
7838 .set_link_ksettings = bnx2_set_link_ksettings,
7839};
7840
7841/* Called with rtnl_lock */
7842static int
7843bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7844{
7845 struct mii_ioctl_data *data = if_mii(ifr);
7846 struct bnx2 *bp = netdev_priv(dev);
7847 int err;
7848
7849 switch(cmd) {
7850 case SIOCGMIIPHY:
7851 data->phy_id = bp->phy_addr;
7852
7853 fallthrough;
7854 case SIOCGMIIREG: {
7855 u32 mii_regval;
7856
7857 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7858 return -EOPNOTSUPP;
7859
7860 if (!netif_running(dev))
7861 return -EAGAIN;
7862
7863 spin_lock_bh(&bp->phy_lock);
7864 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7865 spin_unlock_bh(&bp->phy_lock);
7866
7867 data->val_out = mii_regval;
7868
7869 return err;
7870 }
7871
7872 case SIOCSMIIREG:
7873 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7874 return -EOPNOTSUPP;
7875
7876 if (!netif_running(dev))
7877 return -EAGAIN;
7878
7879 spin_lock_bh(&bp->phy_lock);
7880 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7881 spin_unlock_bh(&bp->phy_lock);
7882
7883 return err;
7884
7885 default:
7886 /* do nothing */
7887 break;
7888 }
7889 return -EOPNOTSUPP;
7890}
7891
7892/* Called with rtnl_lock */
7893static int
7894bnx2_change_mac_addr(struct net_device *dev, void *p)
7895{
7896 struct sockaddr *addr = p;
7897 struct bnx2 *bp = netdev_priv(dev);
7898
7899 if (!is_valid_ether_addr(addr->sa_data))
7900 return -EADDRNOTAVAIL;
7901
7902 eth_hw_addr_set(dev, addr->sa_data);
7903 if (netif_running(dev))
7904 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7905
7906 return 0;
7907}
7908
7909/* Called with rtnl_lock */
7910static int
7911bnx2_change_mtu(struct net_device *dev, int new_mtu)
7912{
7913 struct bnx2 *bp = netdev_priv(dev);
7914
7915 WRITE_ONCE(dev->mtu, new_mtu);
7916 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7917 false);
7918}
7919
7920#ifdef CONFIG_NET_POLL_CONTROLLER
7921static void
7922poll_bnx2(struct net_device *dev)
7923{
7924 struct bnx2 *bp = netdev_priv(dev);
7925 int i;
7926
7927 for (i = 0; i < bp->irq_nvecs; i++) {
7928 struct bnx2_irq *irq = &bp->irq_tbl[i];
7929
7930 disable_irq(irq->vector);
7931 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7932 enable_irq(irq->vector);
7933 }
7934}
7935#endif
7936
7937static void
7938bnx2_get_5709_media(struct bnx2 *bp)
7939{
7940 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7941 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7942 u32 strap;
7943
7944 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7945 return;
7946 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7947 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7948 return;
7949 }
7950
7951 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7952 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7953 else
7954 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7955
7956 if (bp->func == 0) {
7957 switch (strap) {
7958 case 0x4:
7959 case 0x5:
7960 case 0x6:
7961 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7962 return;
7963 }
7964 } else {
7965 switch (strap) {
7966 case 0x1:
7967 case 0x2:
7968 case 0x4:
7969 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7970 return;
7971 }
7972 }
7973}
7974
7975static void
7976bnx2_get_pci_speed(struct bnx2 *bp)
7977{
7978 u32 reg;
7979
7980 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7981 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7982 u32 clkreg;
7983
7984 bp->flags |= BNX2_FLAG_PCIX;
7985
7986 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7987
7988 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7989 switch (clkreg) {
7990 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7991 bp->bus_speed_mhz = 133;
7992 break;
7993
7994 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7995 bp->bus_speed_mhz = 100;
7996 break;
7997
7998 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7999 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8000 bp->bus_speed_mhz = 66;
8001 break;
8002
8003 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8004 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8005 bp->bus_speed_mhz = 50;
8006 break;
8007
8008 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8009 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8010 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8011 bp->bus_speed_mhz = 33;
8012 break;
8013 }
8014 }
8015 else {
8016 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8017 bp->bus_speed_mhz = 66;
8018 else
8019 bp->bus_speed_mhz = 33;
8020 }
8021
8022 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8023 bp->flags |= BNX2_FLAG_PCI_32BIT;
8024
8025}
8026
8027static void
8028bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8029{
8030 unsigned int len;
8031 int rc, i, j;
8032 u8 *data;
8033
8034#define BNX2_VPD_NVRAM_OFFSET 0x300
8035#define BNX2_VPD_LEN 128
8036#define BNX2_MAX_VER_SLEN 30
8037
8038 data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL);
8039 if (!data)
8040 return;
8041
8042 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN);
8043 if (rc)
8044 goto vpd_done;
8045
8046 for (i = 0; i < BNX2_VPD_LEN; i += 4)
8047 swab32s((u32 *)&data[i]);
8048
8049 j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8050 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
8051 if (j < 0)
8052 goto vpd_done;
8053
8054 if (len != 4 || memcmp(&data[j], "1028", 4))
8055 goto vpd_done;
8056
8057 j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8058 PCI_VPD_RO_KEYWORD_VENDOR0,
8059 &len);
8060 if (j < 0)
8061 goto vpd_done;
8062
8063 if (len > BNX2_MAX_VER_SLEN)
8064 goto vpd_done;
8065
8066 memcpy(bp->fw_version, &data[j], len);
8067 bp->fw_version[len] = ' ';
8068
8069vpd_done:
8070 kfree(data);
8071}
8072
8073static int
8074bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8075{
8076 struct bnx2 *bp;
8077 int rc, i, j;
8078 u32 reg;
8079 u64 dma_mask, persist_dma_mask;
8080
8081 SET_NETDEV_DEV(dev, &pdev->dev);
8082 bp = netdev_priv(dev);
8083
8084 bp->flags = 0;
8085 bp->phy_flags = 0;
8086
8087 bp->temp_stats_blk =
8088 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8089
8090 if (!bp->temp_stats_blk) {
8091 rc = -ENOMEM;
8092 goto err_out;
8093 }
8094
8095 /* enable device (incl. PCI PM wakeup), and bus-mastering */
8096 rc = pci_enable_device(pdev);
8097 if (rc) {
8098 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8099 goto err_out;
8100 }
8101
8102 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8103 dev_err(&pdev->dev,
8104 "Cannot find PCI device base address, aborting\n");
8105 rc = -ENODEV;
8106 goto err_out_disable;
8107 }
8108
8109 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8110 if (rc) {
8111 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8112 goto err_out_disable;
8113 }
8114
8115 pci_set_master(pdev);
8116
8117 bp->pm_cap = pdev->pm_cap;
8118 if (bp->pm_cap == 0) {
8119 dev_err(&pdev->dev,
8120 "Cannot find power management capability, aborting\n");
8121 rc = -EIO;
8122 goto err_out_release;
8123 }
8124
8125 bp->dev = dev;
8126 bp->pdev = pdev;
8127
8128 spin_lock_init(&bp->phy_lock);
8129 spin_lock_init(&bp->indirect_lock);
8130#ifdef BCM_CNIC
8131 mutex_init(&bp->cnic_lock);
8132#endif
8133 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8134
8135 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8136 TX_MAX_TSS_RINGS + 1));
8137 if (!bp->regview) {
8138 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8139 rc = -ENOMEM;
8140 goto err_out_release;
8141 }
8142
8143 /* Configure byte swap and enable write to the reg_window registers.
8144 * Rely on CPU to do target byte swapping on big endian systems
8145 * The chip's target access swapping will not swap all accesses
8146 */
8147 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8148 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8149 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8150
8151 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8152
8153 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8154 if (!pci_is_pcie(pdev)) {
8155 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8156 rc = -EIO;
8157 goto err_out_unmap;
8158 }
8159 bp->flags |= BNX2_FLAG_PCIE;
8160 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8161 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8162 } else {
8163 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8164 if (bp->pcix_cap == 0) {
8165 dev_err(&pdev->dev,
8166 "Cannot find PCIX capability, aborting\n");
8167 rc = -EIO;
8168 goto err_out_unmap;
8169 }
8170 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8171 }
8172
8173 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8174 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8175 if (pdev->msix_cap)
8176 bp->flags |= BNX2_FLAG_MSIX_CAP;
8177 }
8178
8179 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8180 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8181 if (pdev->msi_cap)
8182 bp->flags |= BNX2_FLAG_MSI_CAP;
8183 }
8184
8185 /* 5708 cannot support DMA addresses > 40-bit. */
8186 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8187 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8188 else
8189 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8190
8191 /* Configure DMA attributes. */
8192 if (dma_set_mask(&pdev->dev, dma_mask) == 0) {
8193 dev->features |= NETIF_F_HIGHDMA;
8194 rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
8195 if (rc) {
8196 dev_err(&pdev->dev,
8197 "dma_set_coherent_mask failed, aborting\n");
8198 goto err_out_unmap;
8199 }
8200 } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
8201 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8202 goto err_out_unmap;
8203 }
8204
8205 if (!(bp->flags & BNX2_FLAG_PCIE))
8206 bnx2_get_pci_speed(bp);
8207
8208 /* 5706A0 may falsely detect SERR and PERR. */
8209 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8210 reg = BNX2_RD(bp, PCI_COMMAND);
8211 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8212 BNX2_WR(bp, PCI_COMMAND, reg);
8213 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8214 !(bp->flags & BNX2_FLAG_PCIX)) {
8215 dev_err(&pdev->dev,
8216 "5706 A1 can only be used in a PCIX bus, aborting\n");
8217 rc = -EPERM;
8218 goto err_out_unmap;
8219 }
8220
8221 bnx2_init_nvram(bp);
8222
8223 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8224
8225 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8226 bp->func = 1;
8227
8228 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8229 BNX2_SHM_HDR_SIGNATURE_SIG) {
8230 u32 off = bp->func << 2;
8231
8232 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8233 } else
8234 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8235
8236 /* Get the permanent MAC address. First we need to make sure the
8237 * firmware is actually running.
8238 */
8239 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8240
8241 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8242 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8243 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8244 rc = -ENODEV;
8245 goto err_out_unmap;
8246 }
8247
8248 bnx2_read_vpd_fw_ver(bp);
8249
8250 j = strlen(bp->fw_version);
8251 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8252 for (i = 0; i < 3 && j < 24; i++) {
8253 u8 num, k, skip0;
8254
8255 if (i == 0) {
8256 bp->fw_version[j++] = 'b';
8257 bp->fw_version[j++] = 'c';
8258 bp->fw_version[j++] = ' ';
8259 }
8260 num = (u8) (reg >> (24 - (i * 8)));
8261 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8262 if (num >= k || !skip0 || k == 1) {
8263 bp->fw_version[j++] = (num / k) + '0';
8264 skip0 = 0;
8265 }
8266 }
8267 if (i != 2)
8268 bp->fw_version[j++] = '.';
8269 }
8270 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8271 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8272 bp->wol = 1;
8273
8274 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8275 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8276
8277 for (i = 0; i < 30; i++) {
8278 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8279 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8280 break;
8281 msleep(10);
8282 }
8283 }
8284 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8285 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8286 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8287 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8288 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8289
8290 if (j < 32)
8291 bp->fw_version[j++] = ' ';
8292 for (i = 0; i < 3 && j < 28; i++) {
8293 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8294 reg = be32_to_cpu(reg);
8295 memcpy(&bp->fw_version[j], ®, 4);
8296 j += 4;
8297 }
8298 }
8299
8300 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8301 bp->mac_addr[0] = (u8) (reg >> 8);
8302 bp->mac_addr[1] = (u8) reg;
8303
8304 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8305 bp->mac_addr[2] = (u8) (reg >> 24);
8306 bp->mac_addr[3] = (u8) (reg >> 16);
8307 bp->mac_addr[4] = (u8) (reg >> 8);
8308 bp->mac_addr[5] = (u8) reg;
8309
8310 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8311 bnx2_set_rx_ring_size(bp, 255);
8312
8313 bp->tx_quick_cons_trip_int = 2;
8314 bp->tx_quick_cons_trip = 20;
8315 bp->tx_ticks_int = 18;
8316 bp->tx_ticks = 80;
8317
8318 bp->rx_quick_cons_trip_int = 2;
8319 bp->rx_quick_cons_trip = 12;
8320 bp->rx_ticks_int = 18;
8321 bp->rx_ticks = 18;
8322
8323 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8324
8325 bp->current_interval = BNX2_TIMER_INTERVAL;
8326
8327 bp->phy_addr = 1;
8328
8329 /* allocate stats_blk */
8330 rc = bnx2_alloc_stats_blk(dev);
8331 if (rc)
8332 goto err_out_unmap;
8333
8334 /* Disable WOL support if we are running on a SERDES chip. */
8335 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8336 bnx2_get_5709_media(bp);
8337 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8338 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8339
8340 bp->phy_port = PORT_TP;
8341 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8342 bp->phy_port = PORT_FIBRE;
8343 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8344 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8345 bp->flags |= BNX2_FLAG_NO_WOL;
8346 bp->wol = 0;
8347 }
8348 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8349 /* Don't do parallel detect on this board because of
8350 * some board problems. The link will not go down
8351 * if we do parallel detect.
8352 */
8353 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8354 pdev->subsystem_device == 0x310c)
8355 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8356 } else {
8357 bp->phy_addr = 2;
8358 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8359 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8360 }
8361 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8362 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8363 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8364 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8365 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8366 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8367 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8368
8369 bnx2_init_fw_cap(bp);
8370
8371 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8372 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8373 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8374 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8375 bp->flags |= BNX2_FLAG_NO_WOL;
8376 bp->wol = 0;
8377 }
8378
8379 if (bp->flags & BNX2_FLAG_NO_WOL)
8380 device_set_wakeup_capable(&bp->pdev->dev, false);
8381 else
8382 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8383
8384 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8385 bp->tx_quick_cons_trip_int =
8386 bp->tx_quick_cons_trip;
8387 bp->tx_ticks_int = bp->tx_ticks;
8388 bp->rx_quick_cons_trip_int =
8389 bp->rx_quick_cons_trip;
8390 bp->rx_ticks_int = bp->rx_ticks;
8391 bp->comp_prod_trip_int = bp->comp_prod_trip;
8392 bp->com_ticks_int = bp->com_ticks;
8393 bp->cmd_ticks_int = bp->cmd_ticks;
8394 }
8395
8396 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8397 *
8398 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8399 * with byte enables disabled on the unused 32-bit word. This is legal
8400 * but causes problems on the AMD 8132 which will eventually stop
8401 * responding after a while.
8402 *
8403 * AMD believes this incompatibility is unique to the 5706, and
8404 * prefers to locally disable MSI rather than globally disabling it.
8405 */
8406 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8407 struct pci_dev *amd_8132 = NULL;
8408
8409 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8410 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8411 amd_8132))) {
8412
8413 if (amd_8132->revision >= 0x10 &&
8414 amd_8132->revision <= 0x13) {
8415 disable_msi = 1;
8416 pci_dev_put(amd_8132);
8417 break;
8418 }
8419 }
8420 }
8421
8422 bnx2_set_default_link(bp);
8423 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8424
8425 timer_setup(&bp->timer, bnx2_timer, 0);
8426 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8427
8428#ifdef BCM_CNIC
8429 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8430 bp->cnic_eth_dev.max_iscsi_conn =
8431 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8432 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8433 bp->cnic_probe = bnx2_cnic_probe;
8434#endif
8435 pci_save_state(pdev);
8436
8437 return 0;
8438
8439err_out_unmap:
8440 pci_iounmap(pdev, bp->regview);
8441 bp->regview = NULL;
8442
8443err_out_release:
8444 pci_release_regions(pdev);
8445
8446err_out_disable:
8447 pci_disable_device(pdev);
8448
8449err_out:
8450 kfree(bp->temp_stats_blk);
8451
8452 return rc;
8453}
8454
8455static char *
8456bnx2_bus_string(struct bnx2 *bp, char *str)
8457{
8458 char *s = str;
8459
8460 if (bp->flags & BNX2_FLAG_PCIE) {
8461 s += sprintf(s, "PCI Express");
8462 } else {
8463 s += sprintf(s, "PCI");
8464 if (bp->flags & BNX2_FLAG_PCIX)
8465 s += sprintf(s, "-X");
8466 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8467 s += sprintf(s, " 32-bit");
8468 else
8469 s += sprintf(s, " 64-bit");
8470 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8471 }
8472 return str;
8473}
8474
8475static void
8476bnx2_del_napi(struct bnx2 *bp)
8477{
8478 int i;
8479
8480 for (i = 0; i < bp->irq_nvecs; i++)
8481 netif_napi_del(&bp->bnx2_napi[i].napi);
8482}
8483
8484static void
8485bnx2_init_napi(struct bnx2 *bp)
8486{
8487 int i;
8488
8489 for (i = 0; i < bp->irq_nvecs; i++) {
8490 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8491 int (*poll)(struct napi_struct *, int);
8492
8493 if (i == 0)
8494 poll = bnx2_poll;
8495 else
8496 poll = bnx2_poll_msix;
8497
8498 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
8499 bnapi->bp = bp;
8500 }
8501}
8502
8503static const struct net_device_ops bnx2_netdev_ops = {
8504 .ndo_open = bnx2_open,
8505 .ndo_start_xmit = bnx2_start_xmit,
8506 .ndo_stop = bnx2_close,
8507 .ndo_get_stats64 = bnx2_get_stats64,
8508 .ndo_set_rx_mode = bnx2_set_rx_mode,
8509 .ndo_eth_ioctl = bnx2_ioctl,
8510 .ndo_validate_addr = eth_validate_addr,
8511 .ndo_set_mac_address = bnx2_change_mac_addr,
8512 .ndo_change_mtu = bnx2_change_mtu,
8513 .ndo_set_features = bnx2_set_features,
8514 .ndo_tx_timeout = bnx2_tx_timeout,
8515#ifdef CONFIG_NET_POLL_CONTROLLER
8516 .ndo_poll_controller = poll_bnx2,
8517#endif
8518};
8519
8520static int
8521bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8522{
8523 struct net_device *dev;
8524 struct bnx2 *bp;
8525 int rc;
8526 char str[40];
8527
8528 /* dev zeroed in init_etherdev */
8529 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8530 if (!dev)
8531 return -ENOMEM;
8532
8533 rc = bnx2_init_board(pdev, dev);
8534 if (rc < 0)
8535 goto err_free;
8536
8537 dev->netdev_ops = &bnx2_netdev_ops;
8538 dev->watchdog_timeo = TX_TIMEOUT;
8539 dev->ethtool_ops = &bnx2_ethtool_ops;
8540
8541 bp = netdev_priv(dev);
8542
8543 pci_set_drvdata(pdev, dev);
8544
8545 /*
8546 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8547 * New io-page table has been created before bnx2 does reset at open stage.
8548 * We have to wait for the in-flight DMA to complete to avoid it look up
8549 * into the newly created io-page table.
8550 */
8551 if (is_kdump_kernel())
8552 bnx2_wait_dma_complete(bp);
8553
8554 eth_hw_addr_set(dev, bp->mac_addr);
8555
8556 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8557 NETIF_F_TSO | NETIF_F_TSO_ECN |
8558 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8559
8560 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8561 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8562
8563 dev->vlan_features = dev->hw_features;
8564 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8565 dev->features |= dev->hw_features;
8566 dev->priv_flags |= IFF_UNICAST_FLT;
8567 dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8568 dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8569
8570 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8571 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8572
8573 if ((rc = register_netdev(dev))) {
8574 dev_err(&pdev->dev, "Cannot register net device\n");
8575 goto error;
8576 }
8577
8578 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8579 "node addr %pM\n", board_info[ent->driver_data].name,
8580 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8581 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8582 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8583 pdev->irq, dev->dev_addr);
8584
8585 return 0;
8586
8587error:
8588 pci_iounmap(pdev, bp->regview);
8589 pci_release_regions(pdev);
8590 pci_disable_device(pdev);
8591err_free:
8592 bnx2_free_stats_blk(dev);
8593 free_netdev(dev);
8594 return rc;
8595}
8596
8597static void
8598bnx2_remove_one(struct pci_dev *pdev)
8599{
8600 struct net_device *dev = pci_get_drvdata(pdev);
8601 struct bnx2 *bp = netdev_priv(dev);
8602
8603 unregister_netdev(dev);
8604
8605 del_timer_sync(&bp->timer);
8606 cancel_work_sync(&bp->reset_task);
8607
8608 pci_iounmap(bp->pdev, bp->regview);
8609
8610 bnx2_free_stats_blk(dev);
8611 kfree(bp->temp_stats_blk);
8612
8613 bnx2_release_firmware(bp);
8614
8615 free_netdev(dev);
8616
8617 pci_release_regions(pdev);
8618 pci_disable_device(pdev);
8619}
8620
8621#ifdef CONFIG_PM_SLEEP
8622static int
8623bnx2_suspend(struct device *device)
8624{
8625 struct net_device *dev = dev_get_drvdata(device);
8626 struct bnx2 *bp = netdev_priv(dev);
8627
8628 if (netif_running(dev)) {
8629 cancel_work_sync(&bp->reset_task);
8630 bnx2_netif_stop(bp, true);
8631 netif_device_detach(dev);
8632 del_timer_sync(&bp->timer);
8633 bnx2_shutdown_chip(bp);
8634 __bnx2_free_irq(bp);
8635 bnx2_free_skbs(bp);
8636 }
8637 bnx2_setup_wol(bp);
8638 return 0;
8639}
8640
8641static int
8642bnx2_resume(struct device *device)
8643{
8644 struct net_device *dev = dev_get_drvdata(device);
8645 struct bnx2 *bp = netdev_priv(dev);
8646
8647 if (!netif_running(dev))
8648 return 0;
8649
8650 bnx2_set_power_state(bp, PCI_D0);
8651 netif_device_attach(dev);
8652 bnx2_request_irq(bp);
8653 bnx2_init_nic(bp, 1);
8654 bnx2_netif_start(bp, true);
8655 return 0;
8656}
8657
8658static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8659#define BNX2_PM_OPS (&bnx2_pm_ops)
8660
8661#else
8662
8663#define BNX2_PM_OPS NULL
8664
8665#endif /* CONFIG_PM_SLEEP */
8666/**
8667 * bnx2_io_error_detected - called when PCI error is detected
8668 * @pdev: Pointer to PCI device
8669 * @state: The current pci connection state
8670 *
8671 * This function is called after a PCI bus error affecting
8672 * this device has been detected.
8673 */
8674static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8675 pci_channel_state_t state)
8676{
8677 struct net_device *dev = pci_get_drvdata(pdev);
8678 struct bnx2 *bp = netdev_priv(dev);
8679
8680 rtnl_lock();
8681 netif_device_detach(dev);
8682
8683 if (state == pci_channel_io_perm_failure) {
8684 rtnl_unlock();
8685 return PCI_ERS_RESULT_DISCONNECT;
8686 }
8687
8688 if (netif_running(dev)) {
8689 bnx2_netif_stop(bp, true);
8690 del_timer_sync(&bp->timer);
8691 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8692 }
8693
8694 pci_disable_device(pdev);
8695 rtnl_unlock();
8696
8697 /* Request a slot slot reset. */
8698 return PCI_ERS_RESULT_NEED_RESET;
8699}
8700
8701/**
8702 * bnx2_io_slot_reset - called after the pci bus has been reset.
8703 * @pdev: Pointer to PCI device
8704 *
8705 * Restart the card from scratch, as if from a cold-boot.
8706 */
8707static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8708{
8709 struct net_device *dev = pci_get_drvdata(pdev);
8710 struct bnx2 *bp = netdev_priv(dev);
8711 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8712 int err = 0;
8713
8714 rtnl_lock();
8715 if (pci_enable_device(pdev)) {
8716 dev_err(&pdev->dev,
8717 "Cannot re-enable PCI device after reset\n");
8718 } else {
8719 pci_set_master(pdev);
8720 pci_restore_state(pdev);
8721 pci_save_state(pdev);
8722
8723 if (netif_running(dev))
8724 err = bnx2_init_nic(bp, 1);
8725
8726 if (!err)
8727 result = PCI_ERS_RESULT_RECOVERED;
8728 }
8729
8730 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8731 bnx2_napi_enable(bp);
8732 dev_close(dev);
8733 }
8734 rtnl_unlock();
8735
8736 return result;
8737}
8738
8739/**
8740 * bnx2_io_resume - called when traffic can start flowing again.
8741 * @pdev: Pointer to PCI device
8742 *
8743 * This callback is called when the error recovery driver tells us that
8744 * its OK to resume normal operation.
8745 */
8746static void bnx2_io_resume(struct pci_dev *pdev)
8747{
8748 struct net_device *dev = pci_get_drvdata(pdev);
8749 struct bnx2 *bp = netdev_priv(dev);
8750
8751 rtnl_lock();
8752 if (netif_running(dev))
8753 bnx2_netif_start(bp, true);
8754
8755 netif_device_attach(dev);
8756 rtnl_unlock();
8757}
8758
8759static void bnx2_shutdown(struct pci_dev *pdev)
8760{
8761 struct net_device *dev = pci_get_drvdata(pdev);
8762 struct bnx2 *bp;
8763
8764 if (!dev)
8765 return;
8766
8767 bp = netdev_priv(dev);
8768 if (!bp)
8769 return;
8770
8771 rtnl_lock();
8772 if (netif_running(dev))
8773 dev_close(bp->dev);
8774
8775 if (system_state == SYSTEM_POWER_OFF)
8776 bnx2_set_power_state(bp, PCI_D3hot);
8777
8778 rtnl_unlock();
8779}
8780
8781static const struct pci_error_handlers bnx2_err_handler = {
8782 .error_detected = bnx2_io_error_detected,
8783 .slot_reset = bnx2_io_slot_reset,
8784 .resume = bnx2_io_resume,
8785};
8786
8787static struct pci_driver bnx2_pci_driver = {
8788 .name = DRV_MODULE_NAME,
8789 .id_table = bnx2_pci_tbl,
8790 .probe = bnx2_init_one,
8791 .remove = bnx2_remove_one,
8792 .driver.pm = BNX2_PM_OPS,
8793 .err_handler = &bnx2_err_handler,
8794 .shutdown = bnx2_shutdown,
8795};
8796
8797module_pci_driver(bnx2_pci_driver);