Loading...
1/*
2 * File Name:
3 * defxx.c
4 *
5 * Copyright Information:
6 * Copyright Digital Equipment Corporation 1996.
7 *
8 * This software may be used and distributed according to the terms of
9 * the GNU General Public License, incorporated herein by reference.
10 *
11 * Abstract:
12 * A Linux device driver supporting the Digital Equipment Corporation
13 * FDDI TURBOchannel, EISA and PCI controller families. Supported
14 * adapters include:
15 *
16 * DEC FDDIcontroller/TURBOchannel (DEFTA)
17 * DEC FDDIcontroller/EISA (DEFEA)
18 * DEC FDDIcontroller/PCI (DEFPA)
19 *
20 * The original author:
21 * LVS Lawrence V. Stefani <lstefani@yahoo.com>
22 *
23 * Maintainers:
24 * macro Maciej W. Rozycki <macro@linux-mips.org>
25 *
26 * Credits:
27 * I'd like to thank Patricia Cross for helping me get started with
28 * Linux, David Davies for a lot of help upgrading and configuring
29 * my development system and for answering many OS and driver
30 * development questions, and Alan Cox for recommendations and
31 * integration help on getting FDDI support into Linux. LVS
32 *
33 * Driver Architecture:
34 * The driver architecture is largely based on previous driver work
35 * for other operating systems. The upper edge interface and
36 * functions were largely taken from existing Linux device drivers
37 * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
38 * driver.
39 *
40 * Adapter Probe -
41 * The driver scans for supported EISA adapters by reading the
42 * SLOT ID register for each EISA slot and making a match
43 * against the expected value.
44 *
45 * Bus-Specific Initialization -
46 * This driver currently supports both EISA and PCI controller
47 * families. While the custom DMA chip and FDDI logic is similar
48 * or identical, the bus logic is very different. After
49 * initialization, the only bus-specific differences is in how the
50 * driver enables and disables interrupts. Other than that, the
51 * run-time critical code behaves the same on both families.
52 * It's important to note that both adapter families are configured
53 * to I/O map, rather than memory map, the adapter registers.
54 *
55 * Driver Open/Close -
56 * In the driver open routine, the driver ISR (interrupt service
57 * routine) is registered and the adapter is brought to an
58 * operational state. In the driver close routine, the opposite
59 * occurs; the driver ISR is deregistered and the adapter is
60 * brought to a safe, but closed state. Users may use consecutive
61 * commands to bring the adapter up and down as in the following
62 * example:
63 * ifconfig fddi0 up
64 * ifconfig fddi0 down
65 * ifconfig fddi0 up
66 *
67 * Driver Shutdown -
68 * Apparently, there is no shutdown or halt routine support under
69 * Linux. This routine would be called during "reboot" or
70 * "shutdown" to allow the driver to place the adapter in a safe
71 * state before a warm reboot occurs. To be really safe, the user
72 * should close the adapter before shutdown (eg. ifconfig fddi0 down)
73 * to ensure that the adapter DMA engine is taken off-line. However,
74 * the current driver code anticipates this problem and always issues
75 * a soft reset of the adapter at the beginning of driver initialization.
76 * A future driver enhancement in this area may occur in 2.1.X where
77 * Alan indicated that a shutdown handler may be implemented.
78 *
79 * Interrupt Service Routine -
80 * The driver supports shared interrupts, so the ISR is registered for
81 * each board with the appropriate flag and the pointer to that board's
82 * device structure. This provides the context during interrupt
83 * processing to support shared interrupts and multiple boards.
84 *
85 * Interrupt enabling/disabling can occur at many levels. At the host
86 * end, you can disable system interrupts, or disable interrupts at the
87 * PIC (on Intel systems). Across the bus, both EISA and PCI adapters
88 * have a bus-logic chip interrupt enable/disable as well as a DMA
89 * controller interrupt enable/disable.
90 *
91 * The driver currently enables and disables adapter interrupts at the
92 * bus-logic chip and assumes that Linux will take care of clearing or
93 * acknowledging any host-based interrupt chips.
94 *
95 * Control Functions -
96 * Control functions are those used to support functions such as adding
97 * or deleting multicast addresses, enabling or disabling packet
98 * reception filters, or other custom/proprietary commands. Presently,
99 * the driver supports the "get statistics", "set multicast list", and
100 * "set mac address" functions defined by Linux. A list of possible
101 * enhancements include:
102 *
103 * - Custom ioctl interface for executing port interface commands
104 * - Custom ioctl interface for adding unicast addresses to
105 * adapter CAM (to support bridge functions).
106 * - Custom ioctl interface for supporting firmware upgrades.
107 *
108 * Hardware (port interface) Support Routines -
109 * The driver function names that start with "dfx_hw_" represent
110 * low-level port interface routines that are called frequently. They
111 * include issuing a DMA or port control command to the adapter,
112 * resetting the adapter, or reading the adapter state. Since the
113 * driver initialization and run-time code must make calls into the
114 * port interface, these routines were written to be as generic and
115 * usable as possible.
116 *
117 * Receive Path -
118 * The adapter DMA engine supports a 256 entry receive descriptor block
119 * of which up to 255 entries can be used at any given time. The
120 * architecture is a standard producer, consumer, completion model in
121 * which the driver "produces" receive buffers to the adapter, the
122 * adapter "consumes" the receive buffers by DMAing incoming packet data,
123 * and the driver "completes" the receive buffers by servicing the
124 * incoming packet, then "produces" a new buffer and starts the cycle
125 * again. Receive buffers can be fragmented in up to 16 fragments
126 * (descriptor entries). For simplicity, this driver posts
127 * single-fragment receive buffers of 4608 bytes, then allocates a
128 * sk_buff, copies the data, then reposts the buffer. To reduce CPU
129 * utilization, a better approach would be to pass up the receive
130 * buffer (no extra copy) then allocate and post a replacement buffer.
131 * This is a performance enhancement that should be looked into at
132 * some point.
133 *
134 * Transmit Path -
135 * Like the receive path, the adapter DMA engine supports a 256 entry
136 * transmit descriptor block of which up to 255 entries can be used at
137 * any given time. Transmit buffers can be fragmented in up to 255
138 * fragments (descriptor entries). This driver always posts one
139 * fragment per transmit packet request.
140 *
141 * The fragment contains the entire packet from FC to end of data.
142 * Before posting the buffer to the adapter, the driver sets a three-byte
143 * packet request header (PRH) which is required by the Motorola MAC chip
144 * used on the adapters. The PRH tells the MAC the type of token to
145 * receive/send, whether or not to generate and append the CRC, whether
146 * synchronous or asynchronous framing is used, etc. Since the PRH
147 * definition is not necessarily consistent across all FDDI chipsets,
148 * the driver, rather than the common FDDI packet handler routines,
149 * sets these bytes.
150 *
151 * To reduce the amount of descriptor fetches needed per transmit request,
152 * the driver takes advantage of the fact that there are at least three
153 * bytes available before the skb->data field on the outgoing transmit
154 * request. This is guaranteed by having fddi_setup() in net_init.c set
155 * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest
156 * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad"
157 * bytes which we'll use to store the PRH.
158 *
159 * There's a subtle advantage to adding these pad bytes to the
160 * hard_header_len, it ensures that the data portion of the packet for
161 * an 802.2 SNAP frame is longword aligned. Other FDDI driver
162 * implementations may not need the extra padding and can start copying
163 * or DMAing directly from the FC byte which starts at skb->data. Should
164 * another driver implementation need ADDITIONAL padding, the net_init.c
165 * module should be updated and dev->hard_header_len should be increased.
166 * NOTE: To maintain the alignment on the data portion of the packet,
167 * dev->hard_header_len should always be evenly divisible by 4 and at
168 * least 24 bytes in size.
169 *
170 * Modification History:
171 * Date Name Description
172 * 16-Aug-96 LVS Created.
173 * 20-Aug-96 LVS Updated dfx_probe so that version information
174 * string is only displayed if 1 or more cards are
175 * found. Changed dfx_rcv_queue_process to copy
176 * 3 NULL bytes before FC to ensure that data is
177 * longword aligned in receive buffer.
178 * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable
179 * LLC group promiscuous mode if multicast list
180 * is too large. LLC individual/group promiscuous
181 * mode is now disabled if IFF_PROMISC flag not set.
182 * dfx_xmt_queue_pkt no longer checks for NULL skb
183 * on Alan Cox recommendation. Added node address
184 * override support.
185 * 12-Sep-96 LVS Reset current address to factory address during
186 * device open. Updated transmit path to post a
187 * single fragment which includes PRH->end of data.
188 * Mar 2000 AC Did various cleanups for 2.3.x
189 * Jun 2000 jgarzik PCI and resource alloc cleanups
190 * Jul 2000 tjeerd Much cleanup and some bug fixes
191 * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup
192 * Feb 2001 Skb allocation fixes
193 * Feb 2001 davej PCI enable cleanups.
194 * 04 Aug 2003 macro Converted to the DMA API.
195 * 14 Aug 2004 macro Fix device names reported.
196 * 14 Jun 2005 macro Use irqreturn_t.
197 * 23 Oct 2006 macro Big-endian host support.
198 * 14 Dec 2006 macro TURBOchannel support.
199 */
200
201/* Include files */
202#include <linux/bitops.h>
203#include <linux/compiler.h>
204#include <linux/delay.h>
205#include <linux/dma-mapping.h>
206#include <linux/eisa.h>
207#include <linux/errno.h>
208#include <linux/fddidevice.h>
209#include <linux/interrupt.h>
210#include <linux/ioport.h>
211#include <linux/kernel.h>
212#include <linux/module.h>
213#include <linux/netdevice.h>
214#include <linux/pci.h>
215#include <linux/skbuff.h>
216#include <linux/slab.h>
217#include <linux/string.h>
218#include <linux/tc.h>
219
220#include <asm/byteorder.h>
221#include <asm/io.h>
222
223#include "defxx.h"
224
225/* Version information string should be updated prior to each new release! */
226#define DRV_NAME "defxx"
227#define DRV_VERSION "v1.10"
228#define DRV_RELDATE "2006/12/14"
229
230static char version[] =
231 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
232 " Lawrence V. Stefani and others\n";
233
234#define DYNAMIC_BUFFERS 1
235
236#define SKBUFF_RX_COPYBREAK 200
237/*
238 * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
239 * alignment for compatibility with old EISA boards.
240 */
241#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
242
243#ifdef CONFIG_EISA
244#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
245#else
246#define DFX_BUS_EISA(dev) 0
247#endif
248
249#ifdef CONFIG_TC
250#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
251#else
252#define DFX_BUS_TC(dev) 0
253#endif
254
255#ifdef CONFIG_DEFXX_MMIO
256#define DFX_MMIO 1
257#else
258#define DFX_MMIO 0
259#endif
260
261/* Define module-wide (static) routines */
262
263static void dfx_bus_init(struct net_device *dev);
264static void dfx_bus_uninit(struct net_device *dev);
265static void dfx_bus_config_check(DFX_board_t *bp);
266
267static int dfx_driver_init(struct net_device *dev,
268 const char *print_name,
269 resource_size_t bar_start);
270static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
271
272static int dfx_open(struct net_device *dev);
273static int dfx_close(struct net_device *dev);
274
275static void dfx_int_pr_halt_id(DFX_board_t *bp);
276static void dfx_int_type_0_process(DFX_board_t *bp);
277static void dfx_int_common(struct net_device *dev);
278static irqreturn_t dfx_interrupt(int irq, void *dev_id);
279
280static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
281static void dfx_ctl_set_multicast_list(struct net_device *dev);
282static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
283static int dfx_ctl_update_cam(DFX_board_t *bp);
284static int dfx_ctl_update_filters(DFX_board_t *bp);
285
286static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
287static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
288static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
289static int dfx_hw_adap_state_rd(DFX_board_t *bp);
290static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
291
292static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
293static void dfx_rcv_queue_process(DFX_board_t *bp);
294static void dfx_rcv_flush(DFX_board_t *bp);
295
296static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
297 struct net_device *dev);
298static int dfx_xmt_done(DFX_board_t *bp);
299static void dfx_xmt_flush(DFX_board_t *bp);
300
301/* Define module-wide (static) variables */
302
303static struct pci_driver dfx_pci_driver;
304static struct eisa_driver dfx_eisa_driver;
305static struct tc_driver dfx_tc_driver;
306
307
308/*
309 * =======================
310 * = dfx_port_write_long =
311 * = dfx_port_read_long =
312 * =======================
313 *
314 * Overview:
315 * Routines for reading and writing values from/to adapter
316 *
317 * Returns:
318 * None
319 *
320 * Arguments:
321 * bp - pointer to board information
322 * offset - register offset from base I/O address
323 * data - for dfx_port_write_long, this is a value to write;
324 * for dfx_port_read_long, this is a pointer to store
325 * the read value
326 *
327 * Functional Description:
328 * These routines perform the correct operation to read or write
329 * the adapter register.
330 *
331 * EISA port block base addresses are based on the slot number in which the
332 * controller is installed. For example, if the EISA controller is installed
333 * in slot 4, the port block base address is 0x4000. If the controller is
334 * installed in slot 2, the port block base address is 0x2000, and so on.
335 * This port block can be used to access PDQ, ESIC, and DEFEA on-board
336 * registers using the register offsets defined in DEFXX.H.
337 *
338 * PCI port block base addresses are assigned by the PCI BIOS or system
339 * firmware. There is one 128 byte port block which can be accessed. It
340 * allows for I/O mapping of both PDQ and PFI registers using the register
341 * offsets defined in DEFXX.H.
342 *
343 * Return Codes:
344 * None
345 *
346 * Assumptions:
347 * bp->base is a valid base I/O address for this adapter.
348 * offset is a valid register offset for this adapter.
349 *
350 * Side Effects:
351 * Rather than produce macros for these functions, these routines
352 * are defined using "inline" to ensure that the compiler will
353 * generate inline code and not waste a procedure call and return.
354 * This provides all the benefits of macros, but with the
355 * advantage of strict data type checking.
356 */
357
358static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
359{
360 writel(data, bp->base.mem + offset);
361 mb();
362}
363
364static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
365{
366 outl(data, bp->base.port + offset);
367}
368
369static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
370{
371 struct device __maybe_unused *bdev = bp->bus_dev;
372 int dfx_bus_tc = DFX_BUS_TC(bdev);
373 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
374
375 if (dfx_use_mmio)
376 dfx_writel(bp, offset, data);
377 else
378 dfx_outl(bp, offset, data);
379}
380
381
382static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
383{
384 mb();
385 *data = readl(bp->base.mem + offset);
386}
387
388static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
389{
390 *data = inl(bp->base.port + offset);
391}
392
393static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
394{
395 struct device __maybe_unused *bdev = bp->bus_dev;
396 int dfx_bus_tc = DFX_BUS_TC(bdev);
397 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
398
399 if (dfx_use_mmio)
400 dfx_readl(bp, offset, data);
401 else
402 dfx_inl(bp, offset, data);
403}
404
405
406/*
407 * ================
408 * = dfx_get_bars =
409 * ================
410 *
411 * Overview:
412 * Retrieves the address range used to access control and status
413 * registers.
414 *
415 * Returns:
416 * None
417 *
418 * Arguments:
419 * bdev - pointer to device information
420 * bar_start - pointer to store the start address
421 * bar_len - pointer to store the length of the area
422 *
423 * Assumptions:
424 * I am sure there are some.
425 *
426 * Side Effects:
427 * None
428 */
429static void dfx_get_bars(struct device *bdev,
430 resource_size_t *bar_start, resource_size_t *bar_len)
431{
432 int dfx_bus_pci = dev_is_pci(bdev);
433 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
434 int dfx_bus_tc = DFX_BUS_TC(bdev);
435 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
436
437 if (dfx_bus_pci) {
438 int num = dfx_use_mmio ? 0 : 1;
439
440 *bar_start = pci_resource_start(to_pci_dev(bdev), num);
441 *bar_len = pci_resource_len(to_pci_dev(bdev), num);
442 }
443 if (dfx_bus_eisa) {
444 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
445 resource_size_t bar;
446
447 if (dfx_use_mmio) {
448 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
449 bar <<= 8;
450 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
451 bar <<= 8;
452 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
453 bar <<= 16;
454 *bar_start = bar;
455 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
456 bar <<= 8;
457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
458 bar <<= 8;
459 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
460 bar <<= 16;
461 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
462 } else {
463 *bar_start = base_addr;
464 *bar_len = PI_ESIC_K_CSR_IO_LEN;
465 }
466 }
467 if (dfx_bus_tc) {
468 *bar_start = to_tc_dev(bdev)->resource.start +
469 PI_TC_K_CSR_OFFSET;
470 *bar_len = PI_TC_K_CSR_LEN;
471 }
472}
473
474static const struct net_device_ops dfx_netdev_ops = {
475 .ndo_open = dfx_open,
476 .ndo_stop = dfx_close,
477 .ndo_start_xmit = dfx_xmt_queue_pkt,
478 .ndo_get_stats = dfx_ctl_get_stats,
479 .ndo_set_rx_mode = dfx_ctl_set_multicast_list,
480 .ndo_set_mac_address = dfx_ctl_set_mac_address,
481};
482
483/*
484 * ================
485 * = dfx_register =
486 * ================
487 *
488 * Overview:
489 * Initializes a supported FDDI controller
490 *
491 * Returns:
492 * Condition code
493 *
494 * Arguments:
495 * bdev - pointer to device information
496 *
497 * Functional Description:
498 *
499 * Return Codes:
500 * 0 - This device (fddi0, fddi1, etc) configured successfully
501 * -EBUSY - Failed to get resources, or dfx_driver_init failed.
502 *
503 * Assumptions:
504 * It compiles so it should work :-( (PCI cards do :-)
505 *
506 * Side Effects:
507 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
508 * initialized and the board resources are read and stored in
509 * the device structure.
510 */
511static int dfx_register(struct device *bdev)
512{
513 static int version_disp;
514 int dfx_bus_pci = dev_is_pci(bdev);
515 int dfx_bus_tc = DFX_BUS_TC(bdev);
516 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
517 const char *print_name = dev_name(bdev);
518 struct net_device *dev;
519 DFX_board_t *bp; /* board pointer */
520 resource_size_t bar_start = 0; /* pointer to port */
521 resource_size_t bar_len = 0; /* resource length */
522 int alloc_size; /* total buffer size used */
523 struct resource *region;
524 int err = 0;
525
526 if (!version_disp) { /* display version info if adapter is found */
527 version_disp = 1; /* set display flag to TRUE so that */
528 printk(version); /* we only display this string ONCE */
529 }
530
531 dev = alloc_fddidev(sizeof(*bp));
532 if (!dev) {
533 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
534 print_name);
535 return -ENOMEM;
536 }
537
538 /* Enable PCI device. */
539 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
540 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
541 print_name);
542 goto err_out;
543 }
544
545 SET_NETDEV_DEV(dev, bdev);
546
547 bp = netdev_priv(dev);
548 bp->bus_dev = bdev;
549 dev_set_drvdata(bdev, dev);
550
551 dfx_get_bars(bdev, &bar_start, &bar_len);
552
553 if (dfx_use_mmio)
554 region = request_mem_region(bar_start, bar_len, print_name);
555 else
556 region = request_region(bar_start, bar_len, print_name);
557 if (!region) {
558 printk(KERN_ERR "%s: Cannot reserve I/O resource "
559 "0x%lx @ 0x%lx, aborting\n",
560 print_name, (long)bar_len, (long)bar_start);
561 err = -EBUSY;
562 goto err_out_disable;
563 }
564
565 /* Set up I/O base address. */
566 if (dfx_use_mmio) {
567 bp->base.mem = ioremap_nocache(bar_start, bar_len);
568 if (!bp->base.mem) {
569 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
570 err = -ENOMEM;
571 goto err_out_region;
572 }
573 } else {
574 bp->base.port = bar_start;
575 dev->base_addr = bar_start;
576 }
577
578 /* Initialize new device structure */
579 dev->netdev_ops = &dfx_netdev_ops;
580
581 if (dfx_bus_pci)
582 pci_set_master(to_pci_dev(bdev));
583
584 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
585 err = -ENODEV;
586 goto err_out_unmap;
587 }
588
589 err = register_netdev(dev);
590 if (err)
591 goto err_out_kfree;
592
593 printk("%s: registered as %s\n", print_name, dev->name);
594 return 0;
595
596err_out_kfree:
597 alloc_size = sizeof(PI_DESCR_BLOCK) +
598 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
599#ifndef DYNAMIC_BUFFERS
600 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
601#endif
602 sizeof(PI_CONSUMER_BLOCK) +
603 (PI_ALIGN_K_DESC_BLK - 1);
604 if (bp->kmalloced)
605 dma_free_coherent(bdev, alloc_size,
606 bp->kmalloced, bp->kmalloced_dma);
607
608err_out_unmap:
609 if (dfx_use_mmio)
610 iounmap(bp->base.mem);
611
612err_out_region:
613 if (dfx_use_mmio)
614 release_mem_region(bar_start, bar_len);
615 else
616 release_region(bar_start, bar_len);
617
618err_out_disable:
619 if (dfx_bus_pci)
620 pci_disable_device(to_pci_dev(bdev));
621
622err_out:
623 free_netdev(dev);
624 return err;
625}
626
627
628/*
629 * ================
630 * = dfx_bus_init =
631 * ================
632 *
633 * Overview:
634 * Initializes the bus-specific controller logic.
635 *
636 * Returns:
637 * None
638 *
639 * Arguments:
640 * dev - pointer to device information
641 *
642 * Functional Description:
643 * Determine and save adapter IRQ in device table,
644 * then perform bus-specific logic initialization.
645 *
646 * Return Codes:
647 * None
648 *
649 * Assumptions:
650 * bp->base has already been set with the proper
651 * base I/O address for this device.
652 *
653 * Side Effects:
654 * Interrupts are enabled at the adapter bus-specific logic.
655 * Note: Interrupts at the DMA engine (PDQ chip) are not
656 * enabled yet.
657 */
658
659static void dfx_bus_init(struct net_device *dev)
660{
661 DFX_board_t *bp = netdev_priv(dev);
662 struct device *bdev = bp->bus_dev;
663 int dfx_bus_pci = dev_is_pci(bdev);
664 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
665 int dfx_bus_tc = DFX_BUS_TC(bdev);
666 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
667 u8 val;
668
669 DBG_printk("In dfx_bus_init...\n");
670
671 /* Initialize a pointer back to the net_device struct */
672 bp->dev = dev;
673
674 /* Initialize adapter based on bus type */
675
676 if (dfx_bus_tc)
677 dev->irq = to_tc_dev(bdev)->interrupt;
678 if (dfx_bus_eisa) {
679 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
680
681 /* Get the interrupt level from the ESIC chip. */
682 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
683 val &= PI_CONFIG_STAT_0_M_IRQ;
684 val >>= PI_CONFIG_STAT_0_V_IRQ;
685
686 switch (val) {
687 case PI_CONFIG_STAT_0_IRQ_K_9:
688 dev->irq = 9;
689 break;
690
691 case PI_CONFIG_STAT_0_IRQ_K_10:
692 dev->irq = 10;
693 break;
694
695 case PI_CONFIG_STAT_0_IRQ_K_11:
696 dev->irq = 11;
697 break;
698
699 case PI_CONFIG_STAT_0_IRQ_K_15:
700 dev->irq = 15;
701 break;
702 }
703
704 /*
705 * Enable memory decoding (MEMCS0) and/or port decoding
706 * (IOCS1/IOCS0) as appropriate in Function Control
707 * Register. One of the port chip selects seems to be
708 * used for the Burst Holdoff register, but this bit of
709 * documentation is missing and as yet it has not been
710 * determined which of the two. This is also the reason
711 * the size of the decoded port range is twice as large
712 * as one required by the PDQ.
713 */
714
715 /* Set the decode range of the board. */
716 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT);
717 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val);
718 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0);
719 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val);
720 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0);
721 val = PI_ESIC_K_CSR_IO_LEN - 1;
722 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff);
723 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff);
724 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff);
725 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff);
726
727 /* Enable the decoders. */
728 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
729 if (dfx_use_mmio)
730 val |= PI_FUNCTION_CNTRL_M_MEMCS0;
731 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val);
732
733 /*
734 * Enable access to the rest of the module
735 * (including PDQ and packet memory).
736 */
737 val = PI_SLOT_CNTRL_M_ENB;
738 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val);
739
740 /*
741 * Map PDQ registers into memory or port space. This is
742 * done with a bit in the Burst Holdoff register.
743 */
744 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
745 if (dfx_use_mmio)
746 val |= PI_BURST_HOLDOFF_V_MEM_MAP;
747 else
748 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP;
749 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val);
750
751 /* Enable interrupts at EISA bus interface chip (ESIC) */
752 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
753 val |= PI_CONFIG_STAT_0_M_INT_ENB;
754 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
755 }
756 if (dfx_bus_pci) {
757 struct pci_dev *pdev = to_pci_dev(bdev);
758
759 /* Get the interrupt level from the PCI Configuration Table */
760
761 dev->irq = pdev->irq;
762
763 /* Check Latency Timer and set if less than minimal */
764
765 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
766 if (val < PFI_K_LAT_TIMER_MIN) {
767 val = PFI_K_LAT_TIMER_DEF;
768 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
769 }
770
771 /* Enable interrupts at PCI bus interface chip (PFI) */
772 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
773 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
774 }
775}
776
777/*
778 * ==================
779 * = dfx_bus_uninit =
780 * ==================
781 *
782 * Overview:
783 * Uninitializes the bus-specific controller logic.
784 *
785 * Returns:
786 * None
787 *
788 * Arguments:
789 * dev - pointer to device information
790 *
791 * Functional Description:
792 * Perform bus-specific logic uninitialization.
793 *
794 * Return Codes:
795 * None
796 *
797 * Assumptions:
798 * bp->base has already been set with the proper
799 * base I/O address for this device.
800 *
801 * Side Effects:
802 * Interrupts are disabled at the adapter bus-specific logic.
803 */
804
805static void dfx_bus_uninit(struct net_device *dev)
806{
807 DFX_board_t *bp = netdev_priv(dev);
808 struct device *bdev = bp->bus_dev;
809 int dfx_bus_pci = dev_is_pci(bdev);
810 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
811 u8 val;
812
813 DBG_printk("In dfx_bus_uninit...\n");
814
815 /* Uninitialize adapter based on bus type */
816
817 if (dfx_bus_eisa) {
818 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
819
820 /* Disable interrupts at EISA bus interface chip (ESIC) */
821 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
822 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
823 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
824 }
825 if (dfx_bus_pci) {
826 /* Disable interrupts at PCI bus interface chip (PFI) */
827 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
828 }
829}
830
831
832/*
833 * ========================
834 * = dfx_bus_config_check =
835 * ========================
836 *
837 * Overview:
838 * Checks the configuration (burst size, full-duplex, etc.) If any parameters
839 * are illegal, then this routine will set new defaults.
840 *
841 * Returns:
842 * None
843 *
844 * Arguments:
845 * bp - pointer to board information
846 *
847 * Functional Description:
848 * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
849 * PDQ, and all FDDI PCI controllers, all values are legal.
850 *
851 * Return Codes:
852 * None
853 *
854 * Assumptions:
855 * dfx_adap_init has NOT been called yet so burst size and other items have
856 * not been set.
857 *
858 * Side Effects:
859 * None
860 */
861
862static void dfx_bus_config_check(DFX_board_t *bp)
863{
864 struct device __maybe_unused *bdev = bp->bus_dev;
865 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
866 int status; /* return code from adapter port control call */
867 u32 host_data; /* LW data returned from port control call */
868
869 DBG_printk("In dfx_bus_config_check...\n");
870
871 /* Configuration check only valid for EISA adapter */
872
873 if (dfx_bus_eisa) {
874 /*
875 * First check if revision 2 EISA controller. Rev. 1 cards used
876 * PDQ revision B, so no workaround needed in this case. Rev. 3
877 * cards used PDQ revision E, so no workaround needed in this
878 * case, either. Only Rev. 2 cards used either Rev. D or E
879 * chips, so we must verify the chip revision on Rev. 2 cards.
880 */
881 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
882 /*
883 * Revision 2 FDDI EISA controller found,
884 * so let's check PDQ revision of adapter.
885 */
886 status = dfx_hw_port_ctrl_req(bp,
887 PI_PCTRL_M_SUB_CMD,
888 PI_SUB_CMD_K_PDQ_REV_GET,
889 0,
890 &host_data);
891 if ((status != DFX_K_SUCCESS) || (host_data == 2))
892 {
893 /*
894 * Either we couldn't determine the PDQ revision, or
895 * we determined that it is at revision D. In either case,
896 * we need to implement the workaround.
897 */
898
899 /* Ensure that the burst size is set to 8 longwords or less */
900
901 switch (bp->burst_size)
902 {
903 case PI_PDATA_B_DMA_BURST_SIZE_32:
904 case PI_PDATA_B_DMA_BURST_SIZE_16:
905 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
906 break;
907
908 default:
909 break;
910 }
911
912 /* Ensure that full-duplex mode is not enabled */
913
914 bp->full_duplex_enb = PI_SNMP_K_FALSE;
915 }
916 }
917 }
918 }
919
920
921/*
922 * ===================
923 * = dfx_driver_init =
924 * ===================
925 *
926 * Overview:
927 * Initializes remaining adapter board structure information
928 * and makes sure adapter is in a safe state prior to dfx_open().
929 *
930 * Returns:
931 * Condition code
932 *
933 * Arguments:
934 * dev - pointer to device information
935 * print_name - printable device name
936 *
937 * Functional Description:
938 * This function allocates additional resources such as the host memory
939 * blocks needed by the adapter (eg. descriptor and consumer blocks).
940 * Remaining bus initialization steps are also completed. The adapter
941 * is also reset so that it is in the DMA_UNAVAILABLE state. The OS
942 * must call dfx_open() to open the adapter and bring it on-line.
943 *
944 * Return Codes:
945 * DFX_K_SUCCESS - initialization succeeded
946 * DFX_K_FAILURE - initialization failed - could not allocate memory
947 * or read adapter MAC address
948 *
949 * Assumptions:
950 * Memory allocated from pci_alloc_consistent() call is physically
951 * contiguous, locked memory.
952 *
953 * Side Effects:
954 * Adapter is reset and should be in DMA_UNAVAILABLE state before
955 * returning from this routine.
956 */
957
958static int dfx_driver_init(struct net_device *dev, const char *print_name,
959 resource_size_t bar_start)
960{
961 DFX_board_t *bp = netdev_priv(dev);
962 struct device *bdev = bp->bus_dev;
963 int dfx_bus_pci = dev_is_pci(bdev);
964 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
965 int dfx_bus_tc = DFX_BUS_TC(bdev);
966 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
967 int alloc_size; /* total buffer size needed */
968 char *top_v, *curr_v; /* virtual addrs into memory block */
969 dma_addr_t top_p, curr_p; /* physical addrs into memory block */
970 u32 data; /* host data register value */
971 __le32 le32;
972 char *board_name = NULL;
973
974 DBG_printk("In dfx_driver_init...\n");
975
976 /* Initialize bus-specific hardware registers */
977
978 dfx_bus_init(dev);
979
980 /*
981 * Initialize default values for configurable parameters
982 *
983 * Note: All of these parameters are ones that a user may
984 * want to customize. It'd be nice to break these
985 * out into Space.c or someplace else that's more
986 * accessible/understandable than this file.
987 */
988
989 bp->full_duplex_enb = PI_SNMP_K_FALSE;
990 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */
991 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
992 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
993
994 /*
995 * Ensure that HW configuration is OK
996 *
997 * Note: Depending on the hardware revision, we may need to modify
998 * some of the configurable parameters to workaround hardware
999 * limitations. We'll perform this configuration check AFTER
1000 * setting the parameters to their default values.
1001 */
1002
1003 dfx_bus_config_check(bp);
1004
1005 /* Disable PDQ interrupts first */
1006
1007 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1008
1009 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1010
1011 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1012
1013 /* Read the factory MAC address from the adapter then save it */
1014
1015 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1016 &data) != DFX_K_SUCCESS) {
1017 printk("%s: Could not read adapter factory MAC address!\n",
1018 print_name);
1019 return DFX_K_FAILURE;
1020 }
1021 le32 = cpu_to_le32(data);
1022 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1023
1024 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1025 &data) != DFX_K_SUCCESS) {
1026 printk("%s: Could not read adapter factory MAC address!\n",
1027 print_name);
1028 return DFX_K_FAILURE;
1029 }
1030 le32 = cpu_to_le32(data);
1031 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1032
1033 /*
1034 * Set current address to factory address
1035 *
1036 * Note: Node address override support is handled through
1037 * dfx_ctl_set_mac_address.
1038 */
1039
1040 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1041 if (dfx_bus_tc)
1042 board_name = "DEFTA";
1043 if (dfx_bus_eisa)
1044 board_name = "DEFEA";
1045 if (dfx_bus_pci)
1046 board_name = "DEFPA";
1047 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1048 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1049 (long long)bar_start, dev->irq, dev->dev_addr);
1050
1051 /*
1052 * Get memory for descriptor block, consumer block, and other buffers
1053 * that need to be DMA read or written to by the adapter.
1054 */
1055
1056 alloc_size = sizeof(PI_DESCR_BLOCK) +
1057 PI_CMD_REQ_K_SIZE_MAX +
1058 PI_CMD_RSP_K_SIZE_MAX +
1059#ifndef DYNAMIC_BUFFERS
1060 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1061#endif
1062 sizeof(PI_CONSUMER_BLOCK) +
1063 (PI_ALIGN_K_DESC_BLK - 1);
1064 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
1065 &bp->kmalloced_dma,
1066 GFP_ATOMIC);
1067 if (top_v == NULL)
1068 return DFX_K_FAILURE;
1069
1070 top_p = bp->kmalloced_dma; /* get physical address of buffer */
1071
1072 /*
1073 * To guarantee the 8K alignment required for the descriptor block, 8K - 1
1074 * plus the amount of memory needed was allocated. The physical address
1075 * is now 8K aligned. By carving up the memory in a specific order,
1076 * we'll guarantee the alignment requirements for all other structures.
1077 *
1078 * Note: If the assumptions change regarding the non-paged, non-cached,
1079 * physically contiguous nature of the memory block or the address
1080 * alignments, then we'll need to implement a different algorithm
1081 * for allocating the needed memory.
1082 */
1083
1084 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1085 curr_v = top_v + (curr_p - top_p);
1086
1087 /* Reserve space for descriptor block */
1088
1089 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1090 bp->descr_block_phys = curr_p;
1091 curr_v += sizeof(PI_DESCR_BLOCK);
1092 curr_p += sizeof(PI_DESCR_BLOCK);
1093
1094 /* Reserve space for command request buffer */
1095
1096 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1097 bp->cmd_req_phys = curr_p;
1098 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1099 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1100
1101 /* Reserve space for command response buffer */
1102
1103 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1104 bp->cmd_rsp_phys = curr_p;
1105 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1106 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1107
1108 /* Reserve space for the LLC host receive queue buffers */
1109
1110 bp->rcv_block_virt = curr_v;
1111 bp->rcv_block_phys = curr_p;
1112
1113#ifndef DYNAMIC_BUFFERS
1114 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1115 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1116#endif
1117
1118 /* Reserve space for the consumer block */
1119
1120 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1121 bp->cons_block_phys = curr_p;
1122
1123 /* Display virtual and physical addresses if debug driver */
1124
1125 DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
1126 print_name,
1127 (long)bp->descr_block_virt, bp->descr_block_phys);
1128 DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
1129 print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
1130 DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
1131 print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
1132 DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
1133 print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
1134 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1135 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1136
1137 return DFX_K_SUCCESS;
1138}
1139
1140
1141/*
1142 * =================
1143 * = dfx_adap_init =
1144 * =================
1145 *
1146 * Overview:
1147 * Brings the adapter to the link avail/link unavailable state.
1148 *
1149 * Returns:
1150 * Condition code
1151 *
1152 * Arguments:
1153 * bp - pointer to board information
1154 * get_buffers - non-zero if buffers to be allocated
1155 *
1156 * Functional Description:
1157 * Issues the low-level firmware/hardware calls necessary to bring
1158 * the adapter up, or to properly reset and restore adapter during
1159 * run-time.
1160 *
1161 * Return Codes:
1162 * DFX_K_SUCCESS - Adapter brought up successfully
1163 * DFX_K_FAILURE - Adapter initialization failed
1164 *
1165 * Assumptions:
1166 * bp->reset_type should be set to a valid reset type value before
1167 * calling this routine.
1168 *
1169 * Side Effects:
1170 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1171 * upon a successful return of this routine.
1172 */
1173
1174static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1175 {
1176 DBG_printk("In dfx_adap_init...\n");
1177
1178 /* Disable PDQ interrupts first */
1179
1180 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1181
1182 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1183
1184 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1185 {
1186 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1187 return DFX_K_FAILURE;
1188 }
1189
1190 /*
1191 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1192 * so we'll acknowledge all Type 0 interrupts now before continuing.
1193 */
1194
1195 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1196
1197 /*
1198 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1199 *
1200 * Note: We only need to clear host copies of these registers. The PDQ reset
1201 * takes care of the on-board register values.
1202 */
1203
1204 bp->cmd_req_reg.lword = 0;
1205 bp->cmd_rsp_reg.lword = 0;
1206 bp->rcv_xmt_reg.lword = 0;
1207
1208 /* Clear consumer block before going to DMA_AVAILABLE state */
1209
1210 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1211
1212 /* Initialize the DMA Burst Size */
1213
1214 if (dfx_hw_port_ctrl_req(bp,
1215 PI_PCTRL_M_SUB_CMD,
1216 PI_SUB_CMD_K_BURST_SIZE_SET,
1217 bp->burst_size,
1218 NULL) != DFX_K_SUCCESS)
1219 {
1220 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1221 return DFX_K_FAILURE;
1222 }
1223
1224 /*
1225 * Set base address of Consumer Block
1226 *
1227 * Assumption: 32-bit physical address of consumer block is 64 byte
1228 * aligned. That is, bits 0-5 of the address must be zero.
1229 */
1230
1231 if (dfx_hw_port_ctrl_req(bp,
1232 PI_PCTRL_M_CONS_BLOCK,
1233 bp->cons_block_phys,
1234 0,
1235 NULL) != DFX_K_SUCCESS)
1236 {
1237 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1238 return DFX_K_FAILURE;
1239 }
1240
1241 /*
1242 * Set the base address of Descriptor Block and bring adapter
1243 * to DMA_AVAILABLE state.
1244 *
1245 * Note: We also set the literal and data swapping requirements
1246 * in this command.
1247 *
1248 * Assumption: 32-bit physical address of descriptor block
1249 * is 8Kbyte aligned.
1250 */
1251 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1252 (u32)(bp->descr_block_phys |
1253 PI_PDATA_A_INIT_M_BSWAP_INIT),
1254 0, NULL) != DFX_K_SUCCESS) {
1255 printk("%s: Could not set descriptor block address!\n",
1256 bp->dev->name);
1257 return DFX_K_FAILURE;
1258 }
1259
1260 /* Set transmit flush timeout value */
1261
1262 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1263 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1264 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */
1265 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1266 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1267 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1268 {
1269 printk("%s: DMA command request failed!\n", bp->dev->name);
1270 return DFX_K_FAILURE;
1271 }
1272
1273 /* Set the initial values for eFDXEnable and MACTReq MIB objects */
1274
1275 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1276 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1277 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1278 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1279 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1280 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1281 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1282 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1283 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1284 {
1285 printk("%s: DMA command request failed!\n", bp->dev->name);
1286 return DFX_K_FAILURE;
1287 }
1288
1289 /* Initialize adapter CAM */
1290
1291 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1292 {
1293 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1294 return DFX_K_FAILURE;
1295 }
1296
1297 /* Initialize adapter filters */
1298
1299 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1300 {
1301 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1302 return DFX_K_FAILURE;
1303 }
1304
1305 /*
1306 * Remove any existing dynamic buffers (i.e. if the adapter is being
1307 * reinitialized)
1308 */
1309
1310 if (get_buffers)
1311 dfx_rcv_flush(bp);
1312
1313 /* Initialize receive descriptor block and produce buffers */
1314
1315 if (dfx_rcv_init(bp, get_buffers))
1316 {
1317 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1318 if (get_buffers)
1319 dfx_rcv_flush(bp);
1320 return DFX_K_FAILURE;
1321 }
1322
1323 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1324
1325 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1326 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1327 {
1328 printk("%s: Start command failed\n", bp->dev->name);
1329 if (get_buffers)
1330 dfx_rcv_flush(bp);
1331 return DFX_K_FAILURE;
1332 }
1333
1334 /* Initialization succeeded, reenable PDQ interrupts */
1335
1336 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1337 return DFX_K_SUCCESS;
1338 }
1339
1340
1341/*
1342 * ============
1343 * = dfx_open =
1344 * ============
1345 *
1346 * Overview:
1347 * Opens the adapter
1348 *
1349 * Returns:
1350 * Condition code
1351 *
1352 * Arguments:
1353 * dev - pointer to device information
1354 *
1355 * Functional Description:
1356 * This function brings the adapter to an operational state.
1357 *
1358 * Return Codes:
1359 * 0 - Adapter was successfully opened
1360 * -EAGAIN - Could not register IRQ or adapter initialization failed
1361 *
1362 * Assumptions:
1363 * This routine should only be called for a device that was
1364 * initialized successfully.
1365 *
1366 * Side Effects:
1367 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1368 * if the open is successful.
1369 */
1370
1371static int dfx_open(struct net_device *dev)
1372{
1373 DFX_board_t *bp = netdev_priv(dev);
1374 int ret;
1375
1376 DBG_printk("In dfx_open...\n");
1377
1378 /* Register IRQ - support shared interrupts by passing device ptr */
1379
1380 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1381 dev);
1382 if (ret) {
1383 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1384 return ret;
1385 }
1386
1387 /*
1388 * Set current address to factory MAC address
1389 *
1390 * Note: We've already done this step in dfx_driver_init.
1391 * However, it's possible that a user has set a node
1392 * address override, then closed and reopened the
1393 * adapter. Unless we reset the device address field
1394 * now, we'll continue to use the existing modified
1395 * address.
1396 */
1397
1398 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1399
1400 /* Clear local unicast/multicast address tables and counts */
1401
1402 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1403 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1404 bp->uc_count = 0;
1405 bp->mc_count = 0;
1406
1407 /* Disable promiscuous filter settings */
1408
1409 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1410 bp->group_prom = PI_FSTATE_K_BLOCK;
1411
1412 spin_lock_init(&bp->lock);
1413
1414 /* Reset and initialize adapter */
1415
1416 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */
1417 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1418 {
1419 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1420 free_irq(dev->irq, dev);
1421 return -EAGAIN;
1422 }
1423
1424 /* Set device structure info */
1425 netif_start_queue(dev);
1426 return 0;
1427}
1428
1429
1430/*
1431 * =============
1432 * = dfx_close =
1433 * =============
1434 *
1435 * Overview:
1436 * Closes the device/module.
1437 *
1438 * Returns:
1439 * Condition code
1440 *
1441 * Arguments:
1442 * dev - pointer to device information
1443 *
1444 * Functional Description:
1445 * This routine closes the adapter and brings it to a safe state.
1446 * The interrupt service routine is deregistered with the OS.
1447 * The adapter can be opened again with another call to dfx_open().
1448 *
1449 * Return Codes:
1450 * Always return 0.
1451 *
1452 * Assumptions:
1453 * No further requests for this adapter are made after this routine is
1454 * called. dfx_open() can be called to reset and reinitialize the
1455 * adapter.
1456 *
1457 * Side Effects:
1458 * Adapter should be in DMA_UNAVAILABLE state upon completion of this
1459 * routine.
1460 */
1461
1462static int dfx_close(struct net_device *dev)
1463{
1464 DFX_board_t *bp = netdev_priv(dev);
1465
1466 DBG_printk("In dfx_close...\n");
1467
1468 /* Disable PDQ interrupts first */
1469
1470 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1471
1472 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1473
1474 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1475
1476 /*
1477 * Flush any pending transmit buffers
1478 *
1479 * Note: It's important that we flush the transmit buffers
1480 * BEFORE we clear our copy of the Type 2 register.
1481 * Otherwise, we'll have no idea how many buffers
1482 * we need to free.
1483 */
1484
1485 dfx_xmt_flush(bp);
1486
1487 /*
1488 * Clear Type 1 and Type 2 registers after adapter reset
1489 *
1490 * Note: Even though we're closing the adapter, it's
1491 * possible that an interrupt will occur after
1492 * dfx_close is called. Without some assurance to
1493 * the contrary we want to make sure that we don't
1494 * process receive and transmit LLC frames and update
1495 * the Type 2 register with bad information.
1496 */
1497
1498 bp->cmd_req_reg.lword = 0;
1499 bp->cmd_rsp_reg.lword = 0;
1500 bp->rcv_xmt_reg.lword = 0;
1501
1502 /* Clear consumer block for the same reason given above */
1503
1504 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1505
1506 /* Release all dynamically allocate skb in the receive ring. */
1507
1508 dfx_rcv_flush(bp);
1509
1510 /* Clear device structure flags */
1511
1512 netif_stop_queue(dev);
1513
1514 /* Deregister (free) IRQ */
1515
1516 free_irq(dev->irq, dev);
1517
1518 return 0;
1519}
1520
1521
1522/*
1523 * ======================
1524 * = dfx_int_pr_halt_id =
1525 * ======================
1526 *
1527 * Overview:
1528 * Displays halt id's in string form.
1529 *
1530 * Returns:
1531 * None
1532 *
1533 * Arguments:
1534 * bp - pointer to board information
1535 *
1536 * Functional Description:
1537 * Determine current halt id and display appropriate string.
1538 *
1539 * Return Codes:
1540 * None
1541 *
1542 * Assumptions:
1543 * None
1544 *
1545 * Side Effects:
1546 * None
1547 */
1548
1549static void dfx_int_pr_halt_id(DFX_board_t *bp)
1550 {
1551 PI_UINT32 port_status; /* PDQ port status register value */
1552 PI_UINT32 halt_id; /* PDQ port status halt ID */
1553
1554 /* Read the latest port status */
1555
1556 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1557
1558 /* Display halt state transition information */
1559
1560 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1561 switch (halt_id)
1562 {
1563 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1564 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1565 break;
1566
1567 case PI_HALT_ID_K_PARITY_ERROR:
1568 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1569 break;
1570
1571 case PI_HALT_ID_K_HOST_DIR_HALT:
1572 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1573 break;
1574
1575 case PI_HALT_ID_K_SW_FAULT:
1576 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1577 break;
1578
1579 case PI_HALT_ID_K_HW_FAULT:
1580 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1581 break;
1582
1583 case PI_HALT_ID_K_PC_TRACE:
1584 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1585 break;
1586
1587 case PI_HALT_ID_K_DMA_ERROR:
1588 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1589 break;
1590
1591 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1592 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1593 break;
1594
1595 case PI_HALT_ID_K_BUS_EXCEPTION:
1596 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1597 break;
1598
1599 default:
1600 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1601 break;
1602 }
1603 }
1604
1605
1606/*
1607 * ==========================
1608 * = dfx_int_type_0_process =
1609 * ==========================
1610 *
1611 * Overview:
1612 * Processes Type 0 interrupts.
1613 *
1614 * Returns:
1615 * None
1616 *
1617 * Arguments:
1618 * bp - pointer to board information
1619 *
1620 * Functional Description:
1621 * Processes all enabled Type 0 interrupts. If the reason for the interrupt
1622 * is a serious fault on the adapter, then an error message is displayed
1623 * and the adapter is reset.
1624 *
1625 * One tricky potential timing window is the rapid succession of "link avail"
1626 * "link unavail" state change interrupts. The acknowledgement of the Type 0
1627 * interrupt must be done before reading the state from the Port Status
1628 * register. This is true because a state change could occur after reading
1629 * the data, but before acknowledging the interrupt. If this state change
1630 * does happen, it would be lost because the driver is using the old state,
1631 * and it will never know about the new state because it subsequently
1632 * acknowledges the state change interrupt.
1633 *
1634 * INCORRECT CORRECT
1635 * read type 0 int reasons read type 0 int reasons
1636 * read adapter state ack type 0 interrupts
1637 * ack type 0 interrupts read adapter state
1638 * ... process interrupt ... ... process interrupt ...
1639 *
1640 * Return Codes:
1641 * None
1642 *
1643 * Assumptions:
1644 * None
1645 *
1646 * Side Effects:
1647 * An adapter reset may occur if the adapter has any Type 0 error interrupts
1648 * or if the port status indicates that the adapter is halted. The driver
1649 * is responsible for reinitializing the adapter with the current CAM
1650 * contents and adapter filter settings.
1651 */
1652
1653static void dfx_int_type_0_process(DFX_board_t *bp)
1654
1655 {
1656 PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */
1657 PI_UINT32 state; /* current adap state (from port status) */
1658
1659 /*
1660 * Read host interrupt Type 0 register to determine which Type 0
1661 * interrupts are pending. Immediately write it back out to clear
1662 * those interrupts.
1663 */
1664
1665 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1666 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1667
1668 /* Check for Type 0 error interrupts */
1669
1670 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1671 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1672 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1673 {
1674 /* Check for Non-Existent Memory error */
1675
1676 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1677 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1678
1679 /* Check for Packet Memory Parity error */
1680
1681 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1682 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1683
1684 /* Check for Host Bus Parity error */
1685
1686 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1687 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1688
1689 /* Reset adapter and bring it back on-line */
1690
1691 bp->link_available = PI_K_FALSE; /* link is no longer available */
1692 bp->reset_type = 0; /* rerun on-board diagnostics */
1693 printk("%s: Resetting adapter...\n", bp->dev->name);
1694 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1695 {
1696 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1697 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1698 return;
1699 }
1700 printk("%s: Adapter reset successful!\n", bp->dev->name);
1701 return;
1702 }
1703
1704 /* Check for transmit flush interrupt */
1705
1706 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1707 {
1708 /* Flush any pending xmt's and acknowledge the flush interrupt */
1709
1710 bp->link_available = PI_K_FALSE; /* link is no longer available */
1711 dfx_xmt_flush(bp); /* flush any outstanding packets */
1712 (void) dfx_hw_port_ctrl_req(bp,
1713 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1714 0,
1715 0,
1716 NULL);
1717 }
1718
1719 /* Check for adapter state change */
1720
1721 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1722 {
1723 /* Get latest adapter state */
1724
1725 state = dfx_hw_adap_state_rd(bp); /* get adapter state */
1726 if (state == PI_STATE_K_HALTED)
1727 {
1728 /*
1729 * Adapter has transitioned to HALTED state, try to reset
1730 * adapter to bring it back on-line. If reset fails,
1731 * leave the adapter in the broken state.
1732 */
1733
1734 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1735 dfx_int_pr_halt_id(bp); /* display halt id as string */
1736
1737 /* Reset adapter and bring it back on-line */
1738
1739 bp->link_available = PI_K_FALSE; /* link is no longer available */
1740 bp->reset_type = 0; /* rerun on-board diagnostics */
1741 printk("%s: Resetting adapter...\n", bp->dev->name);
1742 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1743 {
1744 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1745 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1746 return;
1747 }
1748 printk("%s: Adapter reset successful!\n", bp->dev->name);
1749 }
1750 else if (state == PI_STATE_K_LINK_AVAIL)
1751 {
1752 bp->link_available = PI_K_TRUE; /* set link available flag */
1753 }
1754 }
1755 }
1756
1757
1758/*
1759 * ==================
1760 * = dfx_int_common =
1761 * ==================
1762 *
1763 * Overview:
1764 * Interrupt service routine (ISR)
1765 *
1766 * Returns:
1767 * None
1768 *
1769 * Arguments:
1770 * bp - pointer to board information
1771 *
1772 * Functional Description:
1773 * This is the ISR which processes incoming adapter interrupts.
1774 *
1775 * Return Codes:
1776 * None
1777 *
1778 * Assumptions:
1779 * This routine assumes PDQ interrupts have not been disabled.
1780 * When interrupts are disabled at the PDQ, the Port Status register
1781 * is automatically cleared. This routine uses the Port Status
1782 * register value to determine whether a Type 0 interrupt occurred,
1783 * so it's important that adapter interrupts are not normally
1784 * enabled/disabled at the PDQ.
1785 *
1786 * It's vital that this routine is NOT reentered for the
1787 * same board and that the OS is not in another section of
1788 * code (eg. dfx_xmt_queue_pkt) for the same board on a
1789 * different thread.
1790 *
1791 * Side Effects:
1792 * Pending interrupts are serviced. Depending on the type of
1793 * interrupt, acknowledging and clearing the interrupt at the
1794 * PDQ involves writing a register to clear the interrupt bit
1795 * or updating completion indices.
1796 */
1797
1798static void dfx_int_common(struct net_device *dev)
1799{
1800 DFX_board_t *bp = netdev_priv(dev);
1801 PI_UINT32 port_status; /* Port Status register */
1802
1803 /* Process xmt interrupts - frequent case, so always call this routine */
1804
1805 if(dfx_xmt_done(bp)) /* free consumed xmt packets */
1806 netif_wake_queue(dev);
1807
1808 /* Process rcv interrupts - frequent case, so always call this routine */
1809
1810 dfx_rcv_queue_process(bp); /* service received LLC frames */
1811
1812 /*
1813 * Transmit and receive producer and completion indices are updated on the
1814 * adapter by writing to the Type 2 Producer register. Since the frequent
1815 * case is that we'll be processing either LLC transmit or receive buffers,
1816 * we'll optimize I/O writes by doing a single register write here.
1817 */
1818
1819 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1820
1821 /* Read PDQ Port Status register to find out which interrupts need processing */
1822
1823 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1824
1825 /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1826
1827 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1828 dfx_int_type_0_process(bp); /* process Type 0 interrupts */
1829 }
1830
1831
1832/*
1833 * =================
1834 * = dfx_interrupt =
1835 * =================
1836 *
1837 * Overview:
1838 * Interrupt processing routine
1839 *
1840 * Returns:
1841 * Whether a valid interrupt was seen.
1842 *
1843 * Arguments:
1844 * irq - interrupt vector
1845 * dev_id - pointer to device information
1846 *
1847 * Functional Description:
1848 * This routine calls the interrupt processing routine for this adapter. It
1849 * disables and reenables adapter interrupts, as appropriate. We can support
1850 * shared interrupts since the incoming dev_id pointer provides our device
1851 * structure context.
1852 *
1853 * Return Codes:
1854 * IRQ_HANDLED - an IRQ was handled.
1855 * IRQ_NONE - no IRQ was handled.
1856 *
1857 * Assumptions:
1858 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1859 * on Intel-based systems) is done by the operating system outside this
1860 * routine.
1861 *
1862 * System interrupts are enabled through this call.
1863 *
1864 * Side Effects:
1865 * Interrupts are disabled, then reenabled at the adapter.
1866 */
1867
1868static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1869{
1870 struct net_device *dev = dev_id;
1871 DFX_board_t *bp = netdev_priv(dev);
1872 struct device *bdev = bp->bus_dev;
1873 int dfx_bus_pci = dev_is_pci(bdev);
1874 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1875 int dfx_bus_tc = DFX_BUS_TC(bdev);
1876
1877 /* Service adapter interrupts */
1878
1879 if (dfx_bus_pci) {
1880 u32 status;
1881
1882 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1883 if (!(status & PFI_STATUS_M_PDQ_INT))
1884 return IRQ_NONE;
1885
1886 spin_lock(&bp->lock);
1887
1888 /* Disable PDQ-PFI interrupts at PFI */
1889 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1890 PFI_MODE_M_DMA_ENB);
1891
1892 /* Call interrupt service routine for this adapter */
1893 dfx_int_common(dev);
1894
1895 /* Clear PDQ interrupt status bit and reenable interrupts */
1896 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1897 PFI_STATUS_M_PDQ_INT);
1898 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1899 (PFI_MODE_M_PDQ_INT_ENB |
1900 PFI_MODE_M_DMA_ENB));
1901
1902 spin_unlock(&bp->lock);
1903 }
1904 if (dfx_bus_eisa) {
1905 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1906 u8 status;
1907
1908 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1909 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1910 return IRQ_NONE;
1911
1912 spin_lock(&bp->lock);
1913
1914 /* Disable interrupts at the ESIC */
1915 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1916 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1917
1918 /* Call interrupt service routine for this adapter */
1919 dfx_int_common(dev);
1920
1921 /* Reenable interrupts at the ESIC */
1922 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1923 status |= PI_CONFIG_STAT_0_M_INT_ENB;
1924 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1925
1926 spin_unlock(&bp->lock);
1927 }
1928 if (dfx_bus_tc) {
1929 u32 status;
1930
1931 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1932 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1933 PI_PSTATUS_M_XMT_DATA_PENDING |
1934 PI_PSTATUS_M_SMT_HOST_PENDING |
1935 PI_PSTATUS_M_UNSOL_PENDING |
1936 PI_PSTATUS_M_CMD_RSP_PENDING |
1937 PI_PSTATUS_M_CMD_REQ_PENDING |
1938 PI_PSTATUS_M_TYPE_0_PENDING)))
1939 return IRQ_NONE;
1940
1941 spin_lock(&bp->lock);
1942
1943 /* Call interrupt service routine for this adapter */
1944 dfx_int_common(dev);
1945
1946 spin_unlock(&bp->lock);
1947 }
1948
1949 return IRQ_HANDLED;
1950}
1951
1952
1953/*
1954 * =====================
1955 * = dfx_ctl_get_stats =
1956 * =====================
1957 *
1958 * Overview:
1959 * Get statistics for FDDI adapter
1960 *
1961 * Returns:
1962 * Pointer to FDDI statistics structure
1963 *
1964 * Arguments:
1965 * dev - pointer to device information
1966 *
1967 * Functional Description:
1968 * Gets current MIB objects from adapter, then
1969 * returns FDDI statistics structure as defined
1970 * in if_fddi.h.
1971 *
1972 * Note: Since the FDDI statistics structure is
1973 * still new and the device structure doesn't
1974 * have an FDDI-specific get statistics handler,
1975 * we'll return the FDDI statistics structure as
1976 * a pointer to an Ethernet statistics structure.
1977 * That way, at least the first part of the statistics
1978 * structure can be decoded properly, and it allows
1979 * "smart" applications to perform a second cast to
1980 * decode the FDDI-specific statistics.
1981 *
1982 * We'll have to pay attention to this routine as the
1983 * device structure becomes more mature and LAN media
1984 * independent.
1985 *
1986 * Return Codes:
1987 * None
1988 *
1989 * Assumptions:
1990 * None
1991 *
1992 * Side Effects:
1993 * None
1994 */
1995
1996static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
1997 {
1998 DFX_board_t *bp = netdev_priv(dev);
1999
2000 /* Fill the bp->stats structure with driver-maintained counters */
2001
2002 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2003 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2004 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2005 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2006 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2007 bp->rcv_frame_status_errors +
2008 bp->rcv_length_errors;
2009 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2010 bp->stats.gen.rx_dropped = bp->rcv_discards;
2011 bp->stats.gen.tx_dropped = bp->xmt_discards;
2012 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2013 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */
2014
2015 /* Get FDDI SMT MIB objects */
2016
2017 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2018 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2019 return (struct net_device_stats *)&bp->stats;
2020
2021 /* Fill the bp->stats structure with the SMT MIB object values */
2022
2023 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2024 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2025 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2026 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2027 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2028 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2029 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2030 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2031 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2032 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2033 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2034 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2035 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2036 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2037 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2038 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2039 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2040 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2041 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2042 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2043 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2044 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2045 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2046 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2047 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2048 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2049 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2050 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2051 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2052 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2053 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2054 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2055 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2056 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2057 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2058 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2059 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2060 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2061 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2062 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2063 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2064 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2065 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2066 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2067 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2068 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2069 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2070 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2071 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2072 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2073 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2074 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2075 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2076 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2077 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2078 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2079 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2080 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2081 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2082 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2083 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2084 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2085 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2086 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2087 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2088 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2089 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2090 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2091 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2092 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2093 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2094 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2095 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2096 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2097 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2098 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2099 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2100 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2101 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2102 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2103 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2104 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2105 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2106 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2107 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2108 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2109 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2110 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2111 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2112 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2113 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2114 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2115
2116 /* Get FDDI counters */
2117
2118 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2119 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2120 return (struct net_device_stats *)&bp->stats;
2121
2122 /* Fill the bp->stats structure with the FDDI counter values */
2123
2124 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2125 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2126 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2127 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2128 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2129 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2130 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2131 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2132 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2133 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2134 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2135
2136 return (struct net_device_stats *)&bp->stats;
2137 }
2138
2139
2140/*
2141 * ==============================
2142 * = dfx_ctl_set_multicast_list =
2143 * ==============================
2144 *
2145 * Overview:
2146 * Enable/Disable LLC frame promiscuous mode reception
2147 * on the adapter and/or update multicast address table.
2148 *
2149 * Returns:
2150 * None
2151 *
2152 * Arguments:
2153 * dev - pointer to device information
2154 *
2155 * Functional Description:
2156 * This routine follows a fairly simple algorithm for setting the
2157 * adapter filters and CAM:
2158 *
2159 * if IFF_PROMISC flag is set
2160 * enable LLC individual/group promiscuous mode
2161 * else
2162 * disable LLC individual/group promiscuous mode
2163 * if number of incoming multicast addresses >
2164 * (CAM max size - number of unicast addresses in CAM)
2165 * enable LLC group promiscuous mode
2166 * set driver-maintained multicast address count to zero
2167 * else
2168 * disable LLC group promiscuous mode
2169 * set driver-maintained multicast address count to incoming count
2170 * update adapter CAM
2171 * update adapter filters
2172 *
2173 * Return Codes:
2174 * None
2175 *
2176 * Assumptions:
2177 * Multicast addresses are presented in canonical (LSB) format.
2178 *
2179 * Side Effects:
2180 * On-board adapter CAM and filters are updated.
2181 */
2182
2183static void dfx_ctl_set_multicast_list(struct net_device *dev)
2184{
2185 DFX_board_t *bp = netdev_priv(dev);
2186 int i; /* used as index in for loop */
2187 struct netdev_hw_addr *ha;
2188
2189 /* Enable LLC frame promiscuous mode, if necessary */
2190
2191 if (dev->flags & IFF_PROMISC)
2192 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */
2193
2194 /* Else, update multicast address table */
2195
2196 else
2197 {
2198 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */
2199 /*
2200 * Check whether incoming multicast address count exceeds table size
2201 *
2202 * Note: The adapters utilize an on-board 64 entry CAM for
2203 * supporting perfect filtering of multicast packets
2204 * and bridge functions when adding unicast addresses.
2205 * There is no hash function available. To support
2206 * additional multicast addresses, the all multicast
2207 * filter (LLC group promiscuous mode) must be enabled.
2208 *
2209 * The firmware reserves two CAM entries for SMT-related
2210 * multicast addresses, which leaves 62 entries available.
2211 * The following code ensures that we're not being asked
2212 * to add more than 62 addresses to the CAM. If we are,
2213 * the driver will enable the all multicast filter.
2214 * Should the number of multicast addresses drop below
2215 * the high water mark, the filter will be disabled and
2216 * perfect filtering will be used.
2217 */
2218
2219 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2220 {
2221 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2222 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2223 }
2224 else
2225 {
2226 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
2227 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
2228 }
2229
2230 /* Copy addresses to multicast address table, then update adapter CAM */
2231
2232 i = 0;
2233 netdev_for_each_mc_addr(ha, dev)
2234 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2235 ha->addr, FDDI_K_ALEN);
2236
2237 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2238 {
2239 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2240 }
2241 else
2242 {
2243 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2244 }
2245 }
2246
2247 /* Update adapter filters */
2248
2249 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2250 {
2251 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2252 }
2253 else
2254 {
2255 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2256 }
2257 }
2258
2259
2260/*
2261 * ===========================
2262 * = dfx_ctl_set_mac_address =
2263 * ===========================
2264 *
2265 * Overview:
2266 * Add node address override (unicast address) to adapter
2267 * CAM and update dev_addr field in device table.
2268 *
2269 * Returns:
2270 * None
2271 *
2272 * Arguments:
2273 * dev - pointer to device information
2274 * addr - pointer to sockaddr structure containing unicast address to add
2275 *
2276 * Functional Description:
2277 * The adapter supports node address overrides by adding one or more
2278 * unicast addresses to the adapter CAM. This is similar to adding
2279 * multicast addresses. In this routine we'll update the driver and
2280 * device structures with the new address, then update the adapter CAM
2281 * to ensure that the adapter will copy and strip frames destined and
2282 * sourced by that address.
2283 *
2284 * Return Codes:
2285 * Always returns zero.
2286 *
2287 * Assumptions:
2288 * The address pointed to by addr->sa_data is a valid unicast
2289 * address and is presented in canonical (LSB) format.
2290 *
2291 * Side Effects:
2292 * On-board adapter CAM is updated. On-board adapter filters
2293 * may be updated.
2294 */
2295
2296static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2297 {
2298 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2299 DFX_board_t *bp = netdev_priv(dev);
2300
2301 /* Copy unicast address to driver-maintained structs and update count */
2302
2303 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */
2304 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
2305 bp->uc_count = 1;
2306
2307 /*
2308 * Verify we're not exceeding the CAM size by adding unicast address
2309 *
2310 * Note: It's possible that before entering this routine we've
2311 * already filled the CAM with 62 multicast addresses.
2312 * Since we need to place the node address override into
2313 * the CAM, we have to check to see that we're not
2314 * exceeding the CAM size. If we are, we have to enable
2315 * the LLC group (multicast) promiscuous mode filter as
2316 * in dfx_ctl_set_multicast_list.
2317 */
2318
2319 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2320 {
2321 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2322 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2323
2324 /* Update adapter filters */
2325
2326 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2327 {
2328 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2329 }
2330 else
2331 {
2332 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2333 }
2334 }
2335
2336 /* Update adapter CAM with new unicast address */
2337
2338 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2339 {
2340 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2341 }
2342 else
2343 {
2344 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2345 }
2346 return 0; /* always return zero */
2347 }
2348
2349
2350/*
2351 * ======================
2352 * = dfx_ctl_update_cam =
2353 * ======================
2354 *
2355 * Overview:
2356 * Procedure to update adapter CAM (Content Addressable Memory)
2357 * with desired unicast and multicast address entries.
2358 *
2359 * Returns:
2360 * Condition code
2361 *
2362 * Arguments:
2363 * bp - pointer to board information
2364 *
2365 * Functional Description:
2366 * Updates adapter CAM with current contents of board structure
2367 * unicast and multicast address tables. Since there are only 62
2368 * free entries in CAM, this routine ensures that the command
2369 * request buffer is not overrun.
2370 *
2371 * Return Codes:
2372 * DFX_K_SUCCESS - Request succeeded
2373 * DFX_K_FAILURE - Request failed
2374 *
2375 * Assumptions:
2376 * All addresses being added (unicast and multicast) are in canonical
2377 * order.
2378 *
2379 * Side Effects:
2380 * On-board adapter CAM is updated.
2381 */
2382
2383static int dfx_ctl_update_cam(DFX_board_t *bp)
2384 {
2385 int i; /* used as index */
2386 PI_LAN_ADDR *p_addr; /* pointer to CAM entry */
2387
2388 /*
2389 * Fill in command request information
2390 *
2391 * Note: Even though both the unicast and multicast address
2392 * table entries are stored as contiguous 6 byte entries,
2393 * the firmware address filter set command expects each
2394 * entry to be two longwords (8 bytes total). We must be
2395 * careful to only copy the six bytes of each unicast and
2396 * multicast table entry into each command entry. This
2397 * is also why we must first clear the entire command
2398 * request buffer.
2399 */
2400
2401 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */
2402 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2403 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2404
2405 /* Now add unicast addresses to command request buffer, if any */
2406
2407 for (i=0; i < (int)bp->uc_count; i++)
2408 {
2409 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2410 {
2411 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2412 p_addr++; /* point to next command entry */
2413 }
2414 }
2415
2416 /* Now add multicast addresses to command request buffer, if any */
2417
2418 for (i=0; i < (int)bp->mc_count; i++)
2419 {
2420 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2421 {
2422 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2423 p_addr++; /* point to next command entry */
2424 }
2425 }
2426
2427 /* Issue command to update adapter CAM, then return */
2428
2429 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2430 return DFX_K_FAILURE;
2431 return DFX_K_SUCCESS;
2432 }
2433
2434
2435/*
2436 * ==========================
2437 * = dfx_ctl_update_filters =
2438 * ==========================
2439 *
2440 * Overview:
2441 * Procedure to update adapter filters with desired
2442 * filter settings.
2443 *
2444 * Returns:
2445 * Condition code
2446 *
2447 * Arguments:
2448 * bp - pointer to board information
2449 *
2450 * Functional Description:
2451 * Enables or disables filter using current filter settings.
2452 *
2453 * Return Codes:
2454 * DFX_K_SUCCESS - Request succeeded.
2455 * DFX_K_FAILURE - Request failed.
2456 *
2457 * Assumptions:
2458 * We must always pass up packets destined to the broadcast
2459 * address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2460 * broadcast filter enabled.
2461 *
2462 * Side Effects:
2463 * On-board adapter filters are updated.
2464 */
2465
2466static int dfx_ctl_update_filters(DFX_board_t *bp)
2467 {
2468 int i = 0; /* used as index */
2469
2470 /* Fill in command request information */
2471
2472 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2473
2474 /* Initialize Broadcast filter - * ALWAYS ENABLED * */
2475
2476 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2477 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2478
2479 /* Initialize LLC Individual/Group Promiscuous filter */
2480
2481 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2482 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2483
2484 /* Initialize LLC Group Promiscuous filter */
2485
2486 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2487 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2488
2489 /* Terminate the item code list */
2490
2491 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2492
2493 /* Issue command to update adapter filters, then return */
2494
2495 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2496 return DFX_K_FAILURE;
2497 return DFX_K_SUCCESS;
2498 }
2499
2500
2501/*
2502 * ======================
2503 * = dfx_hw_dma_cmd_req =
2504 * ======================
2505 *
2506 * Overview:
2507 * Sends PDQ DMA command to adapter firmware
2508 *
2509 * Returns:
2510 * Condition code
2511 *
2512 * Arguments:
2513 * bp - pointer to board information
2514 *
2515 * Functional Description:
2516 * The command request and response buffers are posted to the adapter in the manner
2517 * described in the PDQ Port Specification:
2518 *
2519 * 1. Command Response Buffer is posted to adapter.
2520 * 2. Command Request Buffer is posted to adapter.
2521 * 3. Command Request consumer index is polled until it indicates that request
2522 * buffer has been DMA'd to adapter.
2523 * 4. Command Response consumer index is polled until it indicates that response
2524 * buffer has been DMA'd from adapter.
2525 *
2526 * This ordering ensures that a response buffer is already available for the firmware
2527 * to use once it's done processing the request buffer.
2528 *
2529 * Return Codes:
2530 * DFX_K_SUCCESS - DMA command succeeded
2531 * DFX_K_OUTSTATE - Adapter is NOT in proper state
2532 * DFX_K_HW_TIMEOUT - DMA command timed out
2533 *
2534 * Assumptions:
2535 * Command request buffer has already been filled with desired DMA command.
2536 *
2537 * Side Effects:
2538 * None
2539 */
2540
2541static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2542 {
2543 int status; /* adapter status */
2544 int timeout_cnt; /* used in for loops */
2545
2546 /* Make sure the adapter is in a state that we can issue the DMA command in */
2547
2548 status = dfx_hw_adap_state_rd(bp);
2549 if ((status == PI_STATE_K_RESET) ||
2550 (status == PI_STATE_K_HALTED) ||
2551 (status == PI_STATE_K_DMA_UNAVAIL) ||
2552 (status == PI_STATE_K_UPGRADE))
2553 return DFX_K_OUTSTATE;
2554
2555 /* Put response buffer on the command response queue */
2556
2557 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2558 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2559 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2560
2561 /* Bump (and wrap) the producer index and write out to register */
2562
2563 bp->cmd_rsp_reg.index.prod += 1;
2564 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2565 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2566
2567 /* Put request buffer on the command request queue */
2568
2569 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2570 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2571 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2572
2573 /* Bump (and wrap) the producer index and write out to register */
2574
2575 bp->cmd_req_reg.index.prod += 1;
2576 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2577 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2578
2579 /*
2580 * Here we wait for the command request consumer index to be equal
2581 * to the producer, indicating that the adapter has DMAed the request.
2582 */
2583
2584 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2585 {
2586 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2587 break;
2588 udelay(100); /* wait for 100 microseconds */
2589 }
2590 if (timeout_cnt == 0)
2591 return DFX_K_HW_TIMEOUT;
2592
2593 /* Bump (and wrap) the completion index and write out to register */
2594
2595 bp->cmd_req_reg.index.comp += 1;
2596 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2597 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2598
2599 /*
2600 * Here we wait for the command response consumer index to be equal
2601 * to the producer, indicating that the adapter has DMAed the response.
2602 */
2603
2604 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2605 {
2606 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2607 break;
2608 udelay(100); /* wait for 100 microseconds */
2609 }
2610 if (timeout_cnt == 0)
2611 return DFX_K_HW_TIMEOUT;
2612
2613 /* Bump (and wrap) the completion index and write out to register */
2614
2615 bp->cmd_rsp_reg.index.comp += 1;
2616 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2617 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2618 return DFX_K_SUCCESS;
2619 }
2620
2621
2622/*
2623 * ========================
2624 * = dfx_hw_port_ctrl_req =
2625 * ========================
2626 *
2627 * Overview:
2628 * Sends PDQ port control command to adapter firmware
2629 *
2630 * Returns:
2631 * Host data register value in host_data if ptr is not NULL
2632 *
2633 * Arguments:
2634 * bp - pointer to board information
2635 * command - port control command
2636 * data_a - port data A register value
2637 * data_b - port data B register value
2638 * host_data - ptr to host data register value
2639 *
2640 * Functional Description:
2641 * Send generic port control command to adapter by writing
2642 * to various PDQ port registers, then polling for completion.
2643 *
2644 * Return Codes:
2645 * DFX_K_SUCCESS - port control command succeeded
2646 * DFX_K_HW_TIMEOUT - port control command timed out
2647 *
2648 * Assumptions:
2649 * None
2650 *
2651 * Side Effects:
2652 * None
2653 */
2654
2655static int dfx_hw_port_ctrl_req(
2656 DFX_board_t *bp,
2657 PI_UINT32 command,
2658 PI_UINT32 data_a,
2659 PI_UINT32 data_b,
2660 PI_UINT32 *host_data
2661 )
2662
2663 {
2664 PI_UINT32 port_cmd; /* Port Control command register value */
2665 int timeout_cnt; /* used in for loops */
2666
2667 /* Set Command Error bit in command longword */
2668
2669 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2670
2671 /* Issue port command to the adapter */
2672
2673 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2674 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2675 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2676
2677 /* Now wait for command to complete */
2678
2679 if (command == PI_PCTRL_M_BLAST_FLASH)
2680 timeout_cnt = 600000; /* set command timeout count to 60 seconds */
2681 else
2682 timeout_cnt = 20000; /* set command timeout count to 2 seconds */
2683
2684 for (; timeout_cnt > 0; timeout_cnt--)
2685 {
2686 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2687 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2688 break;
2689 udelay(100); /* wait for 100 microseconds */
2690 }
2691 if (timeout_cnt == 0)
2692 return DFX_K_HW_TIMEOUT;
2693
2694 /*
2695 * If the address of host_data is non-zero, assume caller has supplied a
2696 * non NULL pointer, and return the contents of the HOST_DATA register in
2697 * it.
2698 */
2699
2700 if (host_data != NULL)
2701 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2702 return DFX_K_SUCCESS;
2703 }
2704
2705
2706/*
2707 * =====================
2708 * = dfx_hw_adap_reset =
2709 * =====================
2710 *
2711 * Overview:
2712 * Resets adapter
2713 *
2714 * Returns:
2715 * None
2716 *
2717 * Arguments:
2718 * bp - pointer to board information
2719 * type - type of reset to perform
2720 *
2721 * Functional Description:
2722 * Issue soft reset to adapter by writing to PDQ Port Reset
2723 * register. Use incoming reset type to tell adapter what
2724 * kind of reset operation to perform.
2725 *
2726 * Return Codes:
2727 * None
2728 *
2729 * Assumptions:
2730 * This routine merely issues a soft reset to the adapter.
2731 * It is expected that after this routine returns, the caller
2732 * will appropriately poll the Port Status register for the
2733 * adapter to enter the proper state.
2734 *
2735 * Side Effects:
2736 * Internal adapter registers are cleared.
2737 */
2738
2739static void dfx_hw_adap_reset(
2740 DFX_board_t *bp,
2741 PI_UINT32 type
2742 )
2743
2744 {
2745 /* Set Reset type and assert reset */
2746
2747 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */
2748 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2749
2750 /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2751
2752 udelay(20);
2753
2754 /* Deassert reset */
2755
2756 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2757 }
2758
2759
2760/*
2761 * ========================
2762 * = dfx_hw_adap_state_rd =
2763 * ========================
2764 *
2765 * Overview:
2766 * Returns current adapter state
2767 *
2768 * Returns:
2769 * Adapter state per PDQ Port Specification
2770 *
2771 * Arguments:
2772 * bp - pointer to board information
2773 *
2774 * Functional Description:
2775 * Reads PDQ Port Status register and returns adapter state.
2776 *
2777 * Return Codes:
2778 * None
2779 *
2780 * Assumptions:
2781 * None
2782 *
2783 * Side Effects:
2784 * None
2785 */
2786
2787static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2788 {
2789 PI_UINT32 port_status; /* Port Status register value */
2790
2791 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2792 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2793 }
2794
2795
2796/*
2797 * =====================
2798 * = dfx_hw_dma_uninit =
2799 * =====================
2800 *
2801 * Overview:
2802 * Brings adapter to DMA_UNAVAILABLE state
2803 *
2804 * Returns:
2805 * Condition code
2806 *
2807 * Arguments:
2808 * bp - pointer to board information
2809 * type - type of reset to perform
2810 *
2811 * Functional Description:
2812 * Bring adapter to DMA_UNAVAILABLE state by performing the following:
2813 * 1. Set reset type bit in Port Data A Register then reset adapter.
2814 * 2. Check that adapter is in DMA_UNAVAILABLE state.
2815 *
2816 * Return Codes:
2817 * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state
2818 * DFX_K_HW_TIMEOUT - adapter did not reset properly
2819 *
2820 * Assumptions:
2821 * None
2822 *
2823 * Side Effects:
2824 * Internal adapter registers are cleared.
2825 */
2826
2827static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2828 {
2829 int timeout_cnt; /* used in for loops */
2830
2831 /* Set reset type bit and reset adapter */
2832
2833 dfx_hw_adap_reset(bp, type);
2834
2835 /* Now wait for adapter to enter DMA_UNAVAILABLE state */
2836
2837 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2838 {
2839 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2840 break;
2841 udelay(100); /* wait for 100 microseconds */
2842 }
2843 if (timeout_cnt == 0)
2844 return DFX_K_HW_TIMEOUT;
2845 return DFX_K_SUCCESS;
2846 }
2847
2848/*
2849 * Align an sk_buff to a boundary power of 2
2850 *
2851 */
2852
2853static void my_skb_align(struct sk_buff *skb, int n)
2854{
2855 unsigned long x = (unsigned long)skb->data;
2856 unsigned long v;
2857
2858 v = ALIGN(x, n); /* Where we want to be */
2859
2860 skb_reserve(skb, v - x);
2861}
2862
2863
2864/*
2865 * ================
2866 * = dfx_rcv_init =
2867 * ================
2868 *
2869 * Overview:
2870 * Produces buffers to adapter LLC Host receive descriptor block
2871 *
2872 * Returns:
2873 * None
2874 *
2875 * Arguments:
2876 * bp - pointer to board information
2877 * get_buffers - non-zero if buffers to be allocated
2878 *
2879 * Functional Description:
2880 * This routine can be called during dfx_adap_init() or during an adapter
2881 * reset. It initializes the descriptor block and produces all allocated
2882 * LLC Host queue receive buffers.
2883 *
2884 * Return Codes:
2885 * Return 0 on success or -ENOMEM if buffer allocation failed (when using
2886 * dynamic buffer allocation). If the buffer allocation failed, the
2887 * already allocated buffers will not be released and the caller should do
2888 * this.
2889 *
2890 * Assumptions:
2891 * The PDQ has been reset and the adapter and driver maintained Type 2
2892 * register indices are cleared.
2893 *
2894 * Side Effects:
2895 * Receive buffers are posted to the adapter LLC queue and the adapter
2896 * is notified.
2897 */
2898
2899static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2900 {
2901 int i, j; /* used in for loop */
2902
2903 /*
2904 * Since each receive buffer is a single fragment of same length, initialize
2905 * first longword in each receive descriptor for entire LLC Host descriptor
2906 * block. Also initialize second longword in each receive descriptor with
2907 * physical address of receive buffer. We'll always allocate receive
2908 * buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2909 * block and produce new receive buffers by simply updating the receive
2910 * producer index.
2911 *
2912 * Assumptions:
2913 * To support all shipping versions of PDQ, the receive buffer size
2914 * must be mod 128 in length and the physical address must be 128 byte
2915 * aligned. In other words, bits 0-6 of the length and address must
2916 * be zero for the following descriptor field entries to be correct on
2917 * all PDQ-based boards. We guaranteed both requirements during
2918 * driver initialization when we allocated memory for the receive buffers.
2919 */
2920
2921 if (get_buffers) {
2922#ifdef DYNAMIC_BUFFERS
2923 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2924 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2925 {
2926 struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
2927 if (!newskb)
2928 return -ENOMEM;
2929 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2930 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2931 /*
2932 * align to 128 bytes for compatibility with
2933 * the old EISA boards.
2934 */
2935
2936 my_skb_align(newskb, 128);
2937 bp->descr_block_virt->rcv_data[i + j].long_1 =
2938 (u32)dma_map_single(bp->bus_dev, newskb->data,
2939 NEW_SKB_SIZE,
2940 DMA_FROM_DEVICE);
2941 /*
2942 * p_rcv_buff_va is only used inside the
2943 * kernel so we put the skb pointer here.
2944 */
2945 bp->p_rcv_buff_va[i+j] = (char *) newskb;
2946 }
2947#else
2948 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2949 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2950 {
2951 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2952 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2953 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2954 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2955 }
2956#endif
2957 }
2958
2959 /* Update receive producer and Type 2 register */
2960
2961 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2962 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2963 return 0;
2964 }
2965
2966
2967/*
2968 * =========================
2969 * = dfx_rcv_queue_process =
2970 * =========================
2971 *
2972 * Overview:
2973 * Process received LLC frames.
2974 *
2975 * Returns:
2976 * None
2977 *
2978 * Arguments:
2979 * bp - pointer to board information
2980 *
2981 * Functional Description:
2982 * Received LLC frames are processed until there are no more consumed frames.
2983 * Once all frames are processed, the receive buffers are returned to the
2984 * adapter. Note that this algorithm fixes the length of time that can be spent
2985 * in this routine, because there are a fixed number of receive buffers to
2986 * process and buffers are not produced until this routine exits and returns
2987 * to the ISR.
2988 *
2989 * Return Codes:
2990 * None
2991 *
2992 * Assumptions:
2993 * None
2994 *
2995 * Side Effects:
2996 * None
2997 */
2998
2999static void dfx_rcv_queue_process(
3000 DFX_board_t *bp
3001 )
3002
3003 {
3004 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3005 char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */
3006 u32 descr, pkt_len; /* FMC descriptor field and packet length */
3007 struct sk_buff *skb; /* pointer to a sk_buff to hold incoming packet data */
3008
3009 /* Service all consumed LLC receive frames */
3010
3011 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3012 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3013 {
3014 /* Process any errors */
3015
3016 int entry;
3017
3018 entry = bp->rcv_xmt_reg.index.rcv_comp;
3019#ifdef DYNAMIC_BUFFERS
3020 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3021#else
3022 p_buff = bp->p_rcv_buff_va[entry];
3023#endif
3024 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3025
3026 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3027 {
3028 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3029 bp->rcv_crc_errors++;
3030 else
3031 bp->rcv_frame_status_errors++;
3032 }
3033 else
3034 {
3035 int rx_in_place = 0;
3036
3037 /* The frame was received without errors - verify packet length */
3038
3039 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3040 pkt_len -= 4; /* subtract 4 byte CRC */
3041 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3042 bp->rcv_length_errors++;
3043 else{
3044#ifdef DYNAMIC_BUFFERS
3045 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3046 struct sk_buff *newskb;
3047
3048 newskb = dev_alloc_skb(NEW_SKB_SIZE);
3049 if (newskb){
3050 rx_in_place = 1;
3051
3052 my_skb_align(newskb, 128);
3053 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3054 dma_unmap_single(bp->bus_dev,
3055 bp->descr_block_virt->rcv_data[entry].long_1,
3056 NEW_SKB_SIZE,
3057 DMA_FROM_DEVICE);
3058 skb_reserve(skb, RCV_BUFF_K_PADDING);
3059 bp->p_rcv_buff_va[entry] = (char *)newskb;
3060 bp->descr_block_virt->rcv_data[entry].long_1 =
3061 (u32)dma_map_single(bp->bus_dev,
3062 newskb->data,
3063 NEW_SKB_SIZE,
3064 DMA_FROM_DEVICE);
3065 } else
3066 skb = NULL;
3067 } else
3068#endif
3069 skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */
3070 if (skb == NULL)
3071 {
3072 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3073 bp->rcv_discards++;
3074 break;
3075 }
3076 else {
3077#ifndef DYNAMIC_BUFFERS
3078 if (! rx_in_place)
3079#endif
3080 {
3081 /* Receive buffer allocated, pass receive packet up */
3082
3083 skb_copy_to_linear_data(skb,
3084 p_buff + RCV_BUFF_K_PADDING,
3085 pkt_len + 3);
3086 }
3087
3088 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
3089 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
3090 skb->protocol = fddi_type_trans(skb, bp->dev);
3091 bp->rcv_total_bytes += skb->len;
3092 netif_rx(skb);
3093
3094 /* Update the rcv counters */
3095 bp->rcv_total_frames++;
3096 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3097 bp->rcv_multicast_frames++;
3098 }
3099 }
3100 }
3101
3102 /*
3103 * Advance the producer (for recycling) and advance the completion
3104 * (for servicing received frames). Note that it is okay to
3105 * advance the producer without checking that it passes the
3106 * completion index because they are both advanced at the same
3107 * rate.
3108 */
3109
3110 bp->rcv_xmt_reg.index.rcv_prod += 1;
3111 bp->rcv_xmt_reg.index.rcv_comp += 1;
3112 }
3113 }
3114
3115
3116/*
3117 * =====================
3118 * = dfx_xmt_queue_pkt =
3119 * =====================
3120 *
3121 * Overview:
3122 * Queues packets for transmission
3123 *
3124 * Returns:
3125 * Condition code
3126 *
3127 * Arguments:
3128 * skb - pointer to sk_buff to queue for transmission
3129 * dev - pointer to device information
3130 *
3131 * Functional Description:
3132 * Here we assume that an incoming skb transmit request
3133 * is contained in a single physically contiguous buffer
3134 * in which the virtual address of the start of packet
3135 * (skb->data) can be converted to a physical address
3136 * by using pci_map_single().
3137 *
3138 * Since the adapter architecture requires a three byte
3139 * packet request header to prepend the start of packet,
3140 * we'll write the three byte field immediately prior to
3141 * the FC byte. This assumption is valid because we've
3142 * ensured that dev->hard_header_len includes three pad
3143 * bytes. By posting a single fragment to the adapter,
3144 * we'll reduce the number of descriptor fetches and
3145 * bus traffic needed to send the request.
3146 *
3147 * Also, we can't free the skb until after it's been DMA'd
3148 * out by the adapter, so we'll queue it in the driver and
3149 * return it in dfx_xmt_done.
3150 *
3151 * Return Codes:
3152 * 0 - driver queued packet, link is unavailable, or skbuff was bad
3153 * 1 - caller should requeue the sk_buff for later transmission
3154 *
3155 * Assumptions:
3156 * First and foremost, we assume the incoming skb pointer
3157 * is NOT NULL and is pointing to a valid sk_buff structure.
3158 *
3159 * The outgoing packet is complete, starting with the
3160 * frame control byte including the last byte of data,
3161 * but NOT including the 4 byte CRC. We'll let the
3162 * adapter hardware generate and append the CRC.
3163 *
3164 * The entire packet is stored in one physically
3165 * contiguous buffer which is not cached and whose
3166 * 32-bit physical address can be determined.
3167 *
3168 * It's vital that this routine is NOT reentered for the
3169 * same board and that the OS is not in another section of
3170 * code (eg. dfx_int_common) for the same board on a
3171 * different thread.
3172 *
3173 * Side Effects:
3174 * None
3175 */
3176
3177static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3178 struct net_device *dev)
3179 {
3180 DFX_board_t *bp = netdev_priv(dev);
3181 u8 prod; /* local transmit producer index */
3182 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
3183 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3184 unsigned long flags;
3185
3186 netif_stop_queue(dev);
3187
3188 /*
3189 * Verify that incoming transmit request is OK
3190 *
3191 * Note: The packet size check is consistent with other
3192 * Linux device drivers, although the correct packet
3193 * size should be verified before calling the
3194 * transmit routine.
3195 */
3196
3197 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3198 {
3199 printk("%s: Invalid packet length - %u bytes\n",
3200 dev->name, skb->len);
3201 bp->xmt_length_errors++; /* bump error counter */
3202 netif_wake_queue(dev);
3203 dev_kfree_skb(skb);
3204 return NETDEV_TX_OK; /* return "success" */
3205 }
3206 /*
3207 * See if adapter link is available, if not, free buffer
3208 *
3209 * Note: If the link isn't available, free buffer and return 0
3210 * rather than tell the upper layer to requeue the packet.
3211 * The methodology here is that by the time the link
3212 * becomes available, the packet to be sent will be
3213 * fairly stale. By simply dropping the packet, the
3214 * higher layer protocols will eventually time out
3215 * waiting for response packets which it won't receive.
3216 */
3217
3218 if (bp->link_available == PI_K_FALSE)
3219 {
3220 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */
3221 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */
3222 else
3223 {
3224 bp->xmt_discards++; /* bump error counter */
3225 dev_kfree_skb(skb); /* free sk_buff now */
3226 netif_wake_queue(dev);
3227 return NETDEV_TX_OK; /* return "success" */
3228 }
3229 }
3230
3231 spin_lock_irqsave(&bp->lock, flags);
3232
3233 /* Get the current producer and the next free xmt data descriptor */
3234
3235 prod = bp->rcv_xmt_reg.index.xmt_prod;
3236 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3237
3238 /*
3239 * Get pointer to auxiliary queue entry to contain information
3240 * for this packet.
3241 *
3242 * Note: The current xmt producer index will become the
3243 * current xmt completion index when we complete this
3244 * packet later on. So, we'll get the pointer to the
3245 * next auxiliary queue entry now before we bump the
3246 * producer index.
3247 */
3248
3249 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */
3250
3251 /* Write the three PRH bytes immediately before the FC byte */
3252
3253 skb_push(skb,3);
3254 skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
3255 skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
3256 skb->data[2] = DFX_PRH2_BYTE; /* specification */
3257
3258 /*
3259 * Write the descriptor with buffer info and bump producer
3260 *
3261 * Note: Since we need to start DMA from the packet request
3262 * header, we'll add 3 bytes to the DMA buffer length,
3263 * and we'll determine the physical address of the
3264 * buffer from the PRH, not skb->data.
3265 *
3266 * Assumptions:
3267 * 1. Packet starts with the frame control (FC) byte
3268 * at skb->data.
3269 * 2. The 4-byte CRC is not appended to the buffer or
3270 * included in the length.
3271 * 3. Packet length (skb->len) is from FC to end of
3272 * data, inclusive.
3273 * 4. The packet length does not exceed the maximum
3274 * FDDI LLC frame length of 4491 bytes.
3275 * 5. The entire packet is contained in a physically
3276 * contiguous, non-cached, locked memory space
3277 * comprised of a single buffer pointed to by
3278 * skb->data.
3279 * 6. The physical address of the start of packet
3280 * can be determined from the virtual address
3281 * by using pci_map_single() and is only 32-bits
3282 * wide.
3283 */
3284
3285 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3286 p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
3287 skb->len, DMA_TO_DEVICE);
3288
3289 /*
3290 * Verify that descriptor is actually available
3291 *
3292 * Note: If descriptor isn't available, return 1 which tells
3293 * the upper layer to requeue the packet for later
3294 * transmission.
3295 *
3296 * We need to ensure that the producer never reaches the
3297 * completion, except to indicate that the queue is empty.
3298 */
3299
3300 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3301 {
3302 skb_pull(skb,3);
3303 spin_unlock_irqrestore(&bp->lock, flags);
3304 return NETDEV_TX_BUSY; /* requeue packet for later */
3305 }
3306
3307 /*
3308 * Save info for this packet for xmt done indication routine
3309 *
3310 * Normally, we'd save the producer index in the p_xmt_drv_descr
3311 * structure so that we'd have it handy when we complete this
3312 * packet later (in dfx_xmt_done). However, since the current
3313 * transmit architecture guarantees a single fragment for the
3314 * entire packet, we can simply bump the completion index by
3315 * one (1) for each completed packet.
3316 *
3317 * Note: If this assumption changes and we're presented with
3318 * an inconsistent number of transmit fragments for packet
3319 * data, we'll need to modify this code to save the current
3320 * transmit producer index.
3321 */
3322
3323 p_xmt_drv_descr->p_skb = skb;
3324
3325 /* Update Type 2 register */
3326
3327 bp->rcv_xmt_reg.index.xmt_prod = prod;
3328 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3329 spin_unlock_irqrestore(&bp->lock, flags);
3330 netif_wake_queue(dev);
3331 return NETDEV_TX_OK; /* packet queued to adapter */
3332 }
3333
3334
3335/*
3336 * ================
3337 * = dfx_xmt_done =
3338 * ================
3339 *
3340 * Overview:
3341 * Processes all frames that have been transmitted.
3342 *
3343 * Returns:
3344 * None
3345 *
3346 * Arguments:
3347 * bp - pointer to board information
3348 *
3349 * Functional Description:
3350 * For all consumed transmit descriptors that have not
3351 * yet been completed, we'll free the skb we were holding
3352 * onto using dev_kfree_skb and bump the appropriate
3353 * counters.
3354 *
3355 * Return Codes:
3356 * None
3357 *
3358 * Assumptions:
3359 * The Type 2 register is not updated in this routine. It is
3360 * assumed that it will be updated in the ISR when dfx_xmt_done
3361 * returns.
3362 *
3363 * Side Effects:
3364 * None
3365 */
3366
3367static int dfx_xmt_done(DFX_board_t *bp)
3368 {
3369 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3370 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3371 u8 comp; /* local transmit completion index */
3372 int freed = 0; /* buffers freed */
3373
3374 /* Service all consumed transmit frames */
3375
3376 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3377 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3378 {
3379 /* Get pointer to the transmit driver descriptor block information */
3380
3381 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3382
3383 /* Increment transmit counters */
3384
3385 bp->xmt_total_frames++;
3386 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3387
3388 /* Return skb to operating system */
3389 comp = bp->rcv_xmt_reg.index.xmt_comp;
3390 dma_unmap_single(bp->bus_dev,
3391 bp->descr_block_virt->xmt_data[comp].long_1,
3392 p_xmt_drv_descr->p_skb->len,
3393 DMA_TO_DEVICE);
3394 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3395
3396 /*
3397 * Move to start of next packet by updating completion index
3398 *
3399 * Here we assume that a transmit packet request is always
3400 * serviced by posting one fragment. We can therefore
3401 * simplify the completion code by incrementing the
3402 * completion index by one. This code will need to be
3403 * modified if this assumption changes. See comments
3404 * in dfx_xmt_queue_pkt for more details.
3405 */
3406
3407 bp->rcv_xmt_reg.index.xmt_comp += 1;
3408 freed++;
3409 }
3410 return freed;
3411 }
3412
3413
3414/*
3415 * =================
3416 * = dfx_rcv_flush =
3417 * =================
3418 *
3419 * Overview:
3420 * Remove all skb's in the receive ring.
3421 *
3422 * Returns:
3423 * None
3424 *
3425 * Arguments:
3426 * bp - pointer to board information
3427 *
3428 * Functional Description:
3429 * Free's all the dynamically allocated skb's that are
3430 * currently attached to the device receive ring. This
3431 * function is typically only used when the device is
3432 * initialized or reinitialized.
3433 *
3434 * Return Codes:
3435 * None
3436 *
3437 * Side Effects:
3438 * None
3439 */
3440#ifdef DYNAMIC_BUFFERS
3441static void dfx_rcv_flush( DFX_board_t *bp )
3442 {
3443 int i, j;
3444
3445 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3446 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3447 {
3448 struct sk_buff *skb;
3449 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3450 if (skb)
3451 dev_kfree_skb(skb);
3452 bp->p_rcv_buff_va[i+j] = NULL;
3453 }
3454
3455 }
3456#else
3457static inline void dfx_rcv_flush( DFX_board_t *bp )
3458{
3459}
3460#endif /* DYNAMIC_BUFFERS */
3461
3462/*
3463 * =================
3464 * = dfx_xmt_flush =
3465 * =================
3466 *
3467 * Overview:
3468 * Processes all frames whether they've been transmitted
3469 * or not.
3470 *
3471 * Returns:
3472 * None
3473 *
3474 * Arguments:
3475 * bp - pointer to board information
3476 *
3477 * Functional Description:
3478 * For all produced transmit descriptors that have not
3479 * yet been completed, we'll free the skb we were holding
3480 * onto using dev_kfree_skb and bump the appropriate
3481 * counters. Of course, it's possible that some of
3482 * these transmit requests actually did go out, but we
3483 * won't make that distinction here. Finally, we'll
3484 * update the consumer index to match the producer.
3485 *
3486 * Return Codes:
3487 * None
3488 *
3489 * Assumptions:
3490 * This routine does NOT update the Type 2 register. It
3491 * is assumed that this routine is being called during a
3492 * transmit flush interrupt, or a shutdown or close routine.
3493 *
3494 * Side Effects:
3495 * None
3496 */
3497
3498static void dfx_xmt_flush( DFX_board_t *bp )
3499 {
3500 u32 prod_cons; /* rcv/xmt consumer block longword */
3501 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3502 u8 comp; /* local transmit completion index */
3503
3504 /* Flush all outstanding transmit frames */
3505
3506 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3507 {
3508 /* Get pointer to the transmit driver descriptor block information */
3509
3510 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3511
3512 /* Return skb to operating system */
3513 comp = bp->rcv_xmt_reg.index.xmt_comp;
3514 dma_unmap_single(bp->bus_dev,
3515 bp->descr_block_virt->xmt_data[comp].long_1,
3516 p_xmt_drv_descr->p_skb->len,
3517 DMA_TO_DEVICE);
3518 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3519
3520 /* Increment transmit error counter */
3521
3522 bp->xmt_discards++;
3523
3524 /*
3525 * Move to start of next packet by updating completion index
3526 *
3527 * Here we assume that a transmit packet request is always
3528 * serviced by posting one fragment. We can therefore
3529 * simplify the completion code by incrementing the
3530 * completion index by one. This code will need to be
3531 * modified if this assumption changes. See comments
3532 * in dfx_xmt_queue_pkt for more details.
3533 */
3534
3535 bp->rcv_xmt_reg.index.xmt_comp += 1;
3536 }
3537
3538 /* Update the transmit consumer index in the consumer block */
3539
3540 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3541 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3542 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3543 }
3544
3545/*
3546 * ==================
3547 * = dfx_unregister =
3548 * ==================
3549 *
3550 * Overview:
3551 * Shuts down an FDDI controller
3552 *
3553 * Returns:
3554 * Condition code
3555 *
3556 * Arguments:
3557 * bdev - pointer to device information
3558 *
3559 * Functional Description:
3560 *
3561 * Return Codes:
3562 * None
3563 *
3564 * Assumptions:
3565 * It compiles so it should work :-( (PCI cards do :-)
3566 *
3567 * Side Effects:
3568 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
3569 * freed.
3570 */
3571static void dfx_unregister(struct device *bdev)
3572{
3573 struct net_device *dev = dev_get_drvdata(bdev);
3574 DFX_board_t *bp = netdev_priv(dev);
3575 int dfx_bus_pci = dev_is_pci(bdev);
3576 int dfx_bus_tc = DFX_BUS_TC(bdev);
3577 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3578 resource_size_t bar_start = 0; /* pointer to port */
3579 resource_size_t bar_len = 0; /* resource length */
3580 int alloc_size; /* total buffer size used */
3581
3582 unregister_netdev(dev);
3583
3584 alloc_size = sizeof(PI_DESCR_BLOCK) +
3585 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3586#ifndef DYNAMIC_BUFFERS
3587 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3588#endif
3589 sizeof(PI_CONSUMER_BLOCK) +
3590 (PI_ALIGN_K_DESC_BLK - 1);
3591 if (bp->kmalloced)
3592 dma_free_coherent(bdev, alloc_size,
3593 bp->kmalloced, bp->kmalloced_dma);
3594
3595 dfx_bus_uninit(dev);
3596
3597 dfx_get_bars(bdev, &bar_start, &bar_len);
3598 if (dfx_use_mmio) {
3599 iounmap(bp->base.mem);
3600 release_mem_region(bar_start, bar_len);
3601 } else
3602 release_region(bar_start, bar_len);
3603
3604 if (dfx_bus_pci)
3605 pci_disable_device(to_pci_dev(bdev));
3606
3607 free_netdev(dev);
3608}
3609
3610
3611static int __maybe_unused dfx_dev_register(struct device *);
3612static int __maybe_unused dfx_dev_unregister(struct device *);
3613
3614#ifdef CONFIG_PCI
3615static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3616static void dfx_pci_unregister(struct pci_dev *);
3617
3618static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3619 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3620 { }
3621};
3622MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3623
3624static struct pci_driver dfx_pci_driver = {
3625 .name = "defxx",
3626 .id_table = dfx_pci_table,
3627 .probe = dfx_pci_register,
3628 .remove = dfx_pci_unregister,
3629};
3630
3631static int dfx_pci_register(struct pci_dev *pdev,
3632 const struct pci_device_id *ent)
3633{
3634 return dfx_register(&pdev->dev);
3635}
3636
3637static void dfx_pci_unregister(struct pci_dev *pdev)
3638{
3639 dfx_unregister(&pdev->dev);
3640}
3641#endif /* CONFIG_PCI */
3642
3643#ifdef CONFIG_EISA
3644static struct eisa_device_id dfx_eisa_table[] = {
3645 { "DEC3001", DEFEA_PROD_ID_1 },
3646 { "DEC3002", DEFEA_PROD_ID_2 },
3647 { "DEC3003", DEFEA_PROD_ID_3 },
3648 { "DEC3004", DEFEA_PROD_ID_4 },
3649 { }
3650};
3651MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3652
3653static struct eisa_driver dfx_eisa_driver = {
3654 .id_table = dfx_eisa_table,
3655 .driver = {
3656 .name = "defxx",
3657 .bus = &eisa_bus_type,
3658 .probe = dfx_dev_register,
3659 .remove = dfx_dev_unregister,
3660 },
3661};
3662#endif /* CONFIG_EISA */
3663
3664#ifdef CONFIG_TC
3665static struct tc_device_id const dfx_tc_table[] = {
3666 { "DEC ", "PMAF-FA " },
3667 { "DEC ", "PMAF-FD " },
3668 { "DEC ", "PMAF-FS " },
3669 { "DEC ", "PMAF-FU " },
3670 { }
3671};
3672MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3673
3674static struct tc_driver dfx_tc_driver = {
3675 .id_table = dfx_tc_table,
3676 .driver = {
3677 .name = "defxx",
3678 .bus = &tc_bus_type,
3679 .probe = dfx_dev_register,
3680 .remove = dfx_dev_unregister,
3681 },
3682};
3683#endif /* CONFIG_TC */
3684
3685static int __maybe_unused dfx_dev_register(struct device *dev)
3686{
3687 int status;
3688
3689 status = dfx_register(dev);
3690 if (!status)
3691 get_device(dev);
3692 return status;
3693}
3694
3695static int __maybe_unused dfx_dev_unregister(struct device *dev)
3696{
3697 put_device(dev);
3698 dfx_unregister(dev);
3699 return 0;
3700}
3701
3702
3703static int dfx_init(void)
3704{
3705 int status;
3706
3707 status = pci_register_driver(&dfx_pci_driver);
3708 if (!status)
3709 status = eisa_driver_register(&dfx_eisa_driver);
3710 if (!status)
3711 status = tc_register_driver(&dfx_tc_driver);
3712 return status;
3713}
3714
3715static void dfx_cleanup(void)
3716{
3717 tc_unregister_driver(&dfx_tc_driver);
3718 eisa_driver_unregister(&dfx_eisa_driver);
3719 pci_unregister_driver(&dfx_pci_driver);
3720}
3721
3722module_init(dfx_init);
3723module_exit(dfx_cleanup);
3724MODULE_AUTHOR("Lawrence V. Stefani");
3725MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3726 DRV_VERSION " " DRV_RELDATE);
3727MODULE_LICENSE("GPL");
1/*
2 * File Name:
3 * defxx.c
4 *
5 * Copyright Information:
6 * Copyright Digital Equipment Corporation 1996.
7 *
8 * This software may be used and distributed according to the terms of
9 * the GNU General Public License, incorporated herein by reference.
10 *
11 * Abstract:
12 * A Linux device driver supporting the Digital Equipment Corporation
13 * FDDI TURBOchannel, EISA and PCI controller families. Supported
14 * adapters include:
15 *
16 * DEC FDDIcontroller/TURBOchannel (DEFTA)
17 * DEC FDDIcontroller/EISA (DEFEA)
18 * DEC FDDIcontroller/PCI (DEFPA)
19 *
20 * The original author:
21 * LVS Lawrence V. Stefani <lstefani@yahoo.com>
22 *
23 * Maintainers:
24 * macro Maciej W. Rozycki <macro@linux-mips.org>
25 *
26 * Credits:
27 * I'd like to thank Patricia Cross for helping me get started with
28 * Linux, David Davies for a lot of help upgrading and configuring
29 * my development system and for answering many OS and driver
30 * development questions, and Alan Cox for recommendations and
31 * integration help on getting FDDI support into Linux. LVS
32 *
33 * Driver Architecture:
34 * The driver architecture is largely based on previous driver work
35 * for other operating systems. The upper edge interface and
36 * functions were largely taken from existing Linux device drivers
37 * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C
38 * driver.
39 *
40 * Adapter Probe -
41 * The driver scans for supported EISA adapters by reading the
42 * SLOT ID register for each EISA slot and making a match
43 * against the expected value.
44 *
45 * Bus-Specific Initialization -
46 * This driver currently supports both EISA and PCI controller
47 * families. While the custom DMA chip and FDDI logic is similar
48 * or identical, the bus logic is very different. After
49 * initialization, the only bus-specific differences is in how the
50 * driver enables and disables interrupts. Other than that, the
51 * run-time critical code behaves the same on both families.
52 * It's important to note that both adapter families are configured
53 * to I/O map, rather than memory map, the adapter registers.
54 *
55 * Driver Open/Close -
56 * In the driver open routine, the driver ISR (interrupt service
57 * routine) is registered and the adapter is brought to an
58 * operational state. In the driver close routine, the opposite
59 * occurs; the driver ISR is deregistered and the adapter is
60 * brought to a safe, but closed state. Users may use consecutive
61 * commands to bring the adapter up and down as in the following
62 * example:
63 * ifconfig fddi0 up
64 * ifconfig fddi0 down
65 * ifconfig fddi0 up
66 *
67 * Driver Shutdown -
68 * Apparently, there is no shutdown or halt routine support under
69 * Linux. This routine would be called during "reboot" or
70 * "shutdown" to allow the driver to place the adapter in a safe
71 * state before a warm reboot occurs. To be really safe, the user
72 * should close the adapter before shutdown (eg. ifconfig fddi0 down)
73 * to ensure that the adapter DMA engine is taken off-line. However,
74 * the current driver code anticipates this problem and always issues
75 * a soft reset of the adapter at the beginning of driver initialization.
76 * A future driver enhancement in this area may occur in 2.1.X where
77 * Alan indicated that a shutdown handler may be implemented.
78 *
79 * Interrupt Service Routine -
80 * The driver supports shared interrupts, so the ISR is registered for
81 * each board with the appropriate flag and the pointer to that board's
82 * device structure. This provides the context during interrupt
83 * processing to support shared interrupts and multiple boards.
84 *
85 * Interrupt enabling/disabling can occur at many levels. At the host
86 * end, you can disable system interrupts, or disable interrupts at the
87 * PIC (on Intel systems). Across the bus, both EISA and PCI adapters
88 * have a bus-logic chip interrupt enable/disable as well as a DMA
89 * controller interrupt enable/disable.
90 *
91 * The driver currently enables and disables adapter interrupts at the
92 * bus-logic chip and assumes that Linux will take care of clearing or
93 * acknowledging any host-based interrupt chips.
94 *
95 * Control Functions -
96 * Control functions are those used to support functions such as adding
97 * or deleting multicast addresses, enabling or disabling packet
98 * reception filters, or other custom/proprietary commands. Presently,
99 * the driver supports the "get statistics", "set multicast list", and
100 * "set mac address" functions defined by Linux. A list of possible
101 * enhancements include:
102 *
103 * - Custom ioctl interface for executing port interface commands
104 * - Custom ioctl interface for adding unicast addresses to
105 * adapter CAM (to support bridge functions).
106 * - Custom ioctl interface for supporting firmware upgrades.
107 *
108 * Hardware (port interface) Support Routines -
109 * The driver function names that start with "dfx_hw_" represent
110 * low-level port interface routines that are called frequently. They
111 * include issuing a DMA or port control command to the adapter,
112 * resetting the adapter, or reading the adapter state. Since the
113 * driver initialization and run-time code must make calls into the
114 * port interface, these routines were written to be as generic and
115 * usable as possible.
116 *
117 * Receive Path -
118 * The adapter DMA engine supports a 256 entry receive descriptor block
119 * of which up to 255 entries can be used at any given time. The
120 * architecture is a standard producer, consumer, completion model in
121 * which the driver "produces" receive buffers to the adapter, the
122 * adapter "consumes" the receive buffers by DMAing incoming packet data,
123 * and the driver "completes" the receive buffers by servicing the
124 * incoming packet, then "produces" a new buffer and starts the cycle
125 * again. Receive buffers can be fragmented in up to 16 fragments
126 * (descriptor entries). For simplicity, this driver posts
127 * single-fragment receive buffers of 4608 bytes, then allocates a
128 * sk_buff, copies the data, then reposts the buffer. To reduce CPU
129 * utilization, a better approach would be to pass up the receive
130 * buffer (no extra copy) then allocate and post a replacement buffer.
131 * This is a performance enhancement that should be looked into at
132 * some point.
133 *
134 * Transmit Path -
135 * Like the receive path, the adapter DMA engine supports a 256 entry
136 * transmit descriptor block of which up to 255 entries can be used at
137 * any given time. Transmit buffers can be fragmented in up to 255
138 * fragments (descriptor entries). This driver always posts one
139 * fragment per transmit packet request.
140 *
141 * The fragment contains the entire packet from FC to end of data.
142 * Before posting the buffer to the adapter, the driver sets a three-byte
143 * packet request header (PRH) which is required by the Motorola MAC chip
144 * used on the adapters. The PRH tells the MAC the type of token to
145 * receive/send, whether or not to generate and append the CRC, whether
146 * synchronous or asynchronous framing is used, etc. Since the PRH
147 * definition is not necessarily consistent across all FDDI chipsets,
148 * the driver, rather than the common FDDI packet handler routines,
149 * sets these bytes.
150 *
151 * To reduce the amount of descriptor fetches needed per transmit request,
152 * the driver takes advantage of the fact that there are at least three
153 * bytes available before the skb->data field on the outgoing transmit
154 * request. This is guaranteed by having fddi_setup() in net_init.c set
155 * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest
156 * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad"
157 * bytes which we'll use to store the PRH.
158 *
159 * There's a subtle advantage to adding these pad bytes to the
160 * hard_header_len, it ensures that the data portion of the packet for
161 * an 802.2 SNAP frame is longword aligned. Other FDDI driver
162 * implementations may not need the extra padding and can start copying
163 * or DMAing directly from the FC byte which starts at skb->data. Should
164 * another driver implementation need ADDITIONAL padding, the net_init.c
165 * module should be updated and dev->hard_header_len should be increased.
166 * NOTE: To maintain the alignment on the data portion of the packet,
167 * dev->hard_header_len should always be evenly divisible by 4 and at
168 * least 24 bytes in size.
169 *
170 * Modification History:
171 * Date Name Description
172 * 16-Aug-96 LVS Created.
173 * 20-Aug-96 LVS Updated dfx_probe so that version information
174 * string is only displayed if 1 or more cards are
175 * found. Changed dfx_rcv_queue_process to copy
176 * 3 NULL bytes before FC to ensure that data is
177 * longword aligned in receive buffer.
178 * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable
179 * LLC group promiscuous mode if multicast list
180 * is too large. LLC individual/group promiscuous
181 * mode is now disabled if IFF_PROMISC flag not set.
182 * dfx_xmt_queue_pkt no longer checks for NULL skb
183 * on Alan Cox recommendation. Added node address
184 * override support.
185 * 12-Sep-96 LVS Reset current address to factory address during
186 * device open. Updated transmit path to post a
187 * single fragment which includes PRH->end of data.
188 * Mar 2000 AC Did various cleanups for 2.3.x
189 * Jun 2000 jgarzik PCI and resource alloc cleanups
190 * Jul 2000 tjeerd Much cleanup and some bug fixes
191 * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup
192 * Feb 2001 Skb allocation fixes
193 * Feb 2001 davej PCI enable cleanups.
194 * 04 Aug 2003 macro Converted to the DMA API.
195 * 14 Aug 2004 macro Fix device names reported.
196 * 14 Jun 2005 macro Use irqreturn_t.
197 * 23 Oct 2006 macro Big-endian host support.
198 * 14 Dec 2006 macro TURBOchannel support.
199 */
200
201/* Include files */
202#include <linux/bitops.h>
203#include <linux/compiler.h>
204#include <linux/delay.h>
205#include <linux/dma-mapping.h>
206#include <linux/eisa.h>
207#include <linux/errno.h>
208#include <linux/fddidevice.h>
209#include <linux/init.h>
210#include <linux/interrupt.h>
211#include <linux/ioport.h>
212#include <linux/kernel.h>
213#include <linux/module.h>
214#include <linux/netdevice.h>
215#include <linux/pci.h>
216#include <linux/skbuff.h>
217#include <linux/slab.h>
218#include <linux/string.h>
219#include <linux/tc.h>
220
221#include <asm/byteorder.h>
222#include <asm/io.h>
223
224#include "defxx.h"
225
226/* Version information string should be updated prior to each new release! */
227#define DRV_NAME "defxx"
228#define DRV_VERSION "v1.10"
229#define DRV_RELDATE "2006/12/14"
230
231static char version[] __devinitdata =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n";
234
235#define DYNAMIC_BUFFERS 1
236
237#define SKBUFF_RX_COPYBREAK 200
238/*
239 * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte
240 * alignment for compatibility with old EISA boards.
241 */
242#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
243
244#ifdef CONFIG_PCI
245#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
246#else
247#define DFX_BUS_PCI(dev) 0
248#endif
249
250#ifdef CONFIG_EISA
251#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
252#else
253#define DFX_BUS_EISA(dev) 0
254#endif
255
256#ifdef CONFIG_TC
257#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
258#else
259#define DFX_BUS_TC(dev) 0
260#endif
261
262#ifdef CONFIG_DEFXX_MMIO
263#define DFX_MMIO 1
264#else
265#define DFX_MMIO 0
266#endif
267
268/* Define module-wide (static) routines */
269
270static void dfx_bus_init(struct net_device *dev);
271static void dfx_bus_uninit(struct net_device *dev);
272static void dfx_bus_config_check(DFX_board_t *bp);
273
274static int dfx_driver_init(struct net_device *dev,
275 const char *print_name,
276 resource_size_t bar_start);
277static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
278
279static int dfx_open(struct net_device *dev);
280static int dfx_close(struct net_device *dev);
281
282static void dfx_int_pr_halt_id(DFX_board_t *bp);
283static void dfx_int_type_0_process(DFX_board_t *bp);
284static void dfx_int_common(struct net_device *dev);
285static irqreturn_t dfx_interrupt(int irq, void *dev_id);
286
287static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev);
288static void dfx_ctl_set_multicast_list(struct net_device *dev);
289static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr);
290static int dfx_ctl_update_cam(DFX_board_t *bp);
291static int dfx_ctl_update_filters(DFX_board_t *bp);
292
293static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
294static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
295static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
296static int dfx_hw_adap_state_rd(DFX_board_t *bp);
297static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
298
299static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
300static void dfx_rcv_queue_process(DFX_board_t *bp);
301static void dfx_rcv_flush(DFX_board_t *bp);
302
303static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
304 struct net_device *dev);
305static int dfx_xmt_done(DFX_board_t *bp);
306static void dfx_xmt_flush(DFX_board_t *bp);
307
308/* Define module-wide (static) variables */
309
310static struct pci_driver dfx_pci_driver;
311static struct eisa_driver dfx_eisa_driver;
312static struct tc_driver dfx_tc_driver;
313
314
315/*
316 * =======================
317 * = dfx_port_write_long =
318 * = dfx_port_read_long =
319 * =======================
320 *
321 * Overview:
322 * Routines for reading and writing values from/to adapter
323 *
324 * Returns:
325 * None
326 *
327 * Arguments:
328 * bp - pointer to board information
329 * offset - register offset from base I/O address
330 * data - for dfx_port_write_long, this is a value to write;
331 * for dfx_port_read_long, this is a pointer to store
332 * the read value
333 *
334 * Functional Description:
335 * These routines perform the correct operation to read or write
336 * the adapter register.
337 *
338 * EISA port block base addresses are based on the slot number in which the
339 * controller is installed. For example, if the EISA controller is installed
340 * in slot 4, the port block base address is 0x4000. If the controller is
341 * installed in slot 2, the port block base address is 0x2000, and so on.
342 * This port block can be used to access PDQ, ESIC, and DEFEA on-board
343 * registers using the register offsets defined in DEFXX.H.
344 *
345 * PCI port block base addresses are assigned by the PCI BIOS or system
346 * firmware. There is one 128 byte port block which can be accessed. It
347 * allows for I/O mapping of both PDQ and PFI registers using the register
348 * offsets defined in DEFXX.H.
349 *
350 * Return Codes:
351 * None
352 *
353 * Assumptions:
354 * bp->base is a valid base I/O address for this adapter.
355 * offset is a valid register offset for this adapter.
356 *
357 * Side Effects:
358 * Rather than produce macros for these functions, these routines
359 * are defined using "inline" to ensure that the compiler will
360 * generate inline code and not waste a procedure call and return.
361 * This provides all the benefits of macros, but with the
362 * advantage of strict data type checking.
363 */
364
365static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
366{
367 writel(data, bp->base.mem + offset);
368 mb();
369}
370
371static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
372{
373 outl(data, bp->base.port + offset);
374}
375
376static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
377{
378 struct device __maybe_unused *bdev = bp->bus_dev;
379 int dfx_bus_tc = DFX_BUS_TC(bdev);
380 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
381
382 if (dfx_use_mmio)
383 dfx_writel(bp, offset, data);
384 else
385 dfx_outl(bp, offset, data);
386}
387
388
389static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
390{
391 mb();
392 *data = readl(bp->base.mem + offset);
393}
394
395static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
396{
397 *data = inl(bp->base.port + offset);
398}
399
400static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
401{
402 struct device __maybe_unused *bdev = bp->bus_dev;
403 int dfx_bus_tc = DFX_BUS_TC(bdev);
404 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
405
406 if (dfx_use_mmio)
407 dfx_readl(bp, offset, data);
408 else
409 dfx_inl(bp, offset, data);
410}
411
412
413/*
414 * ================
415 * = dfx_get_bars =
416 * ================
417 *
418 * Overview:
419 * Retrieves the address range used to access control and status
420 * registers.
421 *
422 * Returns:
423 * None
424 *
425 * Arguments:
426 * bdev - pointer to device information
427 * bar_start - pointer to store the start address
428 * bar_len - pointer to store the length of the area
429 *
430 * Assumptions:
431 * I am sure there are some.
432 *
433 * Side Effects:
434 * None
435 */
436static void dfx_get_bars(struct device *bdev,
437 resource_size_t *bar_start, resource_size_t *bar_len)
438{
439 int dfx_bus_pci = DFX_BUS_PCI(bdev);
440 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
441 int dfx_bus_tc = DFX_BUS_TC(bdev);
442 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
443
444 if (dfx_bus_pci) {
445 int num = dfx_use_mmio ? 0 : 1;
446
447 *bar_start = pci_resource_start(to_pci_dev(bdev), num);
448 *bar_len = pci_resource_len(to_pci_dev(bdev), num);
449 }
450 if (dfx_bus_eisa) {
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 resource_size_t bar;
453
454 if (dfx_use_mmio) {
455 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
456 bar <<= 8;
457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
458 bar <<= 8;
459 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
460 bar <<= 16;
461 *bar_start = bar;
462 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
463 bar <<= 8;
464 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
465 bar <<= 8;
466 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
467 bar <<= 16;
468 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
469 } else {
470 *bar_start = base_addr;
471 *bar_len = PI_ESIC_K_CSR_IO_LEN;
472 }
473 }
474 if (dfx_bus_tc) {
475 *bar_start = to_tc_dev(bdev)->resource.start +
476 PI_TC_K_CSR_OFFSET;
477 *bar_len = PI_TC_K_CSR_LEN;
478 }
479}
480
481static const struct net_device_ops dfx_netdev_ops = {
482 .ndo_open = dfx_open,
483 .ndo_stop = dfx_close,
484 .ndo_start_xmit = dfx_xmt_queue_pkt,
485 .ndo_get_stats = dfx_ctl_get_stats,
486 .ndo_set_rx_mode = dfx_ctl_set_multicast_list,
487 .ndo_set_mac_address = dfx_ctl_set_mac_address,
488};
489
490/*
491 * ================
492 * = dfx_register =
493 * ================
494 *
495 * Overview:
496 * Initializes a supported FDDI controller
497 *
498 * Returns:
499 * Condition code
500 *
501 * Arguments:
502 * bdev - pointer to device information
503 *
504 * Functional Description:
505 *
506 * Return Codes:
507 * 0 - This device (fddi0, fddi1, etc) configured successfully
508 * -EBUSY - Failed to get resources, or dfx_driver_init failed.
509 *
510 * Assumptions:
511 * It compiles so it should work :-( (PCI cards do :-)
512 *
513 * Side Effects:
514 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
515 * initialized and the board resources are read and stored in
516 * the device structure.
517 */
518static int __devinit dfx_register(struct device *bdev)
519{
520 static int version_disp;
521 int dfx_bus_pci = DFX_BUS_PCI(bdev);
522 int dfx_bus_tc = DFX_BUS_TC(bdev);
523 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
524 const char *print_name = dev_name(bdev);
525 struct net_device *dev;
526 DFX_board_t *bp; /* board pointer */
527 resource_size_t bar_start = 0; /* pointer to port */
528 resource_size_t bar_len = 0; /* resource length */
529 int alloc_size; /* total buffer size used */
530 struct resource *region;
531 int err = 0;
532
533 if (!version_disp) { /* display version info if adapter is found */
534 version_disp = 1; /* set display flag to TRUE so that */
535 printk(version); /* we only display this string ONCE */
536 }
537
538 dev = alloc_fddidev(sizeof(*bp));
539 if (!dev) {
540 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
541 print_name);
542 return -ENOMEM;
543 }
544
545 /* Enable PCI device. */
546 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
547 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
548 print_name);
549 goto err_out;
550 }
551
552 SET_NETDEV_DEV(dev, bdev);
553
554 bp = netdev_priv(dev);
555 bp->bus_dev = bdev;
556 dev_set_drvdata(bdev, dev);
557
558 dfx_get_bars(bdev, &bar_start, &bar_len);
559
560 if (dfx_use_mmio)
561 region = request_mem_region(bar_start, bar_len, print_name);
562 else
563 region = request_region(bar_start, bar_len, print_name);
564 if (!region) {
565 printk(KERN_ERR "%s: Cannot reserve I/O resource "
566 "0x%lx @ 0x%lx, aborting\n",
567 print_name, (long)bar_len, (long)bar_start);
568 err = -EBUSY;
569 goto err_out_disable;
570 }
571
572 /* Set up I/O base address. */
573 if (dfx_use_mmio) {
574 bp->base.mem = ioremap_nocache(bar_start, bar_len);
575 if (!bp->base.mem) {
576 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
577 err = -ENOMEM;
578 goto err_out_region;
579 }
580 } else {
581 bp->base.port = bar_start;
582 dev->base_addr = bar_start;
583 }
584
585 /* Initialize new device structure */
586 dev->netdev_ops = &dfx_netdev_ops;
587
588 if (dfx_bus_pci)
589 pci_set_master(to_pci_dev(bdev));
590
591 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
592 err = -ENODEV;
593 goto err_out_unmap;
594 }
595
596 err = register_netdev(dev);
597 if (err)
598 goto err_out_kfree;
599
600 printk("%s: registered as %s\n", print_name, dev->name);
601 return 0;
602
603err_out_kfree:
604 alloc_size = sizeof(PI_DESCR_BLOCK) +
605 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
606#ifndef DYNAMIC_BUFFERS
607 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
608#endif
609 sizeof(PI_CONSUMER_BLOCK) +
610 (PI_ALIGN_K_DESC_BLK - 1);
611 if (bp->kmalloced)
612 dma_free_coherent(bdev, alloc_size,
613 bp->kmalloced, bp->kmalloced_dma);
614
615err_out_unmap:
616 if (dfx_use_mmio)
617 iounmap(bp->base.mem);
618
619err_out_region:
620 if (dfx_use_mmio)
621 release_mem_region(bar_start, bar_len);
622 else
623 release_region(bar_start, bar_len);
624
625err_out_disable:
626 if (dfx_bus_pci)
627 pci_disable_device(to_pci_dev(bdev));
628
629err_out:
630 free_netdev(dev);
631 return err;
632}
633
634
635/*
636 * ================
637 * = dfx_bus_init =
638 * ================
639 *
640 * Overview:
641 * Initializes the bus-specific controller logic.
642 *
643 * Returns:
644 * None
645 *
646 * Arguments:
647 * dev - pointer to device information
648 *
649 * Functional Description:
650 * Determine and save adapter IRQ in device table,
651 * then perform bus-specific logic initialization.
652 *
653 * Return Codes:
654 * None
655 *
656 * Assumptions:
657 * bp->base has already been set with the proper
658 * base I/O address for this device.
659 *
660 * Side Effects:
661 * Interrupts are enabled at the adapter bus-specific logic.
662 * Note: Interrupts at the DMA engine (PDQ chip) are not
663 * enabled yet.
664 */
665
666static void __devinit dfx_bus_init(struct net_device *dev)
667{
668 DFX_board_t *bp = netdev_priv(dev);
669 struct device *bdev = bp->bus_dev;
670 int dfx_bus_pci = DFX_BUS_PCI(bdev);
671 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
672 int dfx_bus_tc = DFX_BUS_TC(bdev);
673 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
674 u8 val;
675
676 DBG_printk("In dfx_bus_init...\n");
677
678 /* Initialize a pointer back to the net_device struct */
679 bp->dev = dev;
680
681 /* Initialize adapter based on bus type */
682
683 if (dfx_bus_tc)
684 dev->irq = to_tc_dev(bdev)->interrupt;
685 if (dfx_bus_eisa) {
686 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
687
688 /* Get the interrupt level from the ESIC chip. */
689 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
690 val &= PI_CONFIG_STAT_0_M_IRQ;
691 val >>= PI_CONFIG_STAT_0_V_IRQ;
692
693 switch (val) {
694 case PI_CONFIG_STAT_0_IRQ_K_9:
695 dev->irq = 9;
696 break;
697
698 case PI_CONFIG_STAT_0_IRQ_K_10:
699 dev->irq = 10;
700 break;
701
702 case PI_CONFIG_STAT_0_IRQ_K_11:
703 dev->irq = 11;
704 break;
705
706 case PI_CONFIG_STAT_0_IRQ_K_15:
707 dev->irq = 15;
708 break;
709 }
710
711 /*
712 * Enable memory decoding (MEMCS0) and/or port decoding
713 * (IOCS1/IOCS0) as appropriate in Function Control
714 * Register. One of the port chip selects seems to be
715 * used for the Burst Holdoff register, but this bit of
716 * documentation is missing and as yet it has not been
717 * determined which of the two. This is also the reason
718 * the size of the decoded port range is twice as large
719 * as one required by the PDQ.
720 */
721
722 /* Set the decode range of the board. */
723 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT);
724 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val);
725 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0);
726 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val);
727 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0);
728 val = PI_ESIC_K_CSR_IO_LEN - 1;
729 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff);
730 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff);
731 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff);
732 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff);
733
734 /* Enable the decoders. */
735 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
736 if (dfx_use_mmio)
737 val |= PI_FUNCTION_CNTRL_M_MEMCS0;
738 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val);
739
740 /*
741 * Enable access to the rest of the module
742 * (including PDQ and packet memory).
743 */
744 val = PI_SLOT_CNTRL_M_ENB;
745 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val);
746
747 /*
748 * Map PDQ registers into memory or port space. This is
749 * done with a bit in the Burst Holdoff register.
750 */
751 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
752 if (dfx_use_mmio)
753 val |= PI_BURST_HOLDOFF_V_MEM_MAP;
754 else
755 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP;
756 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val);
757
758 /* Enable interrupts at EISA bus interface chip (ESIC) */
759 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
760 val |= PI_CONFIG_STAT_0_M_INT_ENB;
761 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
762 }
763 if (dfx_bus_pci) {
764 struct pci_dev *pdev = to_pci_dev(bdev);
765
766 /* Get the interrupt level from the PCI Configuration Table */
767
768 dev->irq = pdev->irq;
769
770 /* Check Latency Timer and set if less than minimal */
771
772 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
773 if (val < PFI_K_LAT_TIMER_MIN) {
774 val = PFI_K_LAT_TIMER_DEF;
775 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
776 }
777
778 /* Enable interrupts at PCI bus interface chip (PFI) */
779 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
780 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
781 }
782}
783
784/*
785 * ==================
786 * = dfx_bus_uninit =
787 * ==================
788 *
789 * Overview:
790 * Uninitializes the bus-specific controller logic.
791 *
792 * Returns:
793 * None
794 *
795 * Arguments:
796 * dev - pointer to device information
797 *
798 * Functional Description:
799 * Perform bus-specific logic uninitialization.
800 *
801 * Return Codes:
802 * None
803 *
804 * Assumptions:
805 * bp->base has already been set with the proper
806 * base I/O address for this device.
807 *
808 * Side Effects:
809 * Interrupts are disabled at the adapter bus-specific logic.
810 */
811
812static void __devexit dfx_bus_uninit(struct net_device *dev)
813{
814 DFX_board_t *bp = netdev_priv(dev);
815 struct device *bdev = bp->bus_dev;
816 int dfx_bus_pci = DFX_BUS_PCI(bdev);
817 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
818 u8 val;
819
820 DBG_printk("In dfx_bus_uninit...\n");
821
822 /* Uninitialize adapter based on bus type */
823
824 if (dfx_bus_eisa) {
825 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
826
827 /* Disable interrupts at EISA bus interface chip (ESIC) */
828 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
829 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
830 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
831 }
832 if (dfx_bus_pci) {
833 /* Disable interrupts at PCI bus interface chip (PFI) */
834 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
835 }
836}
837
838
839/*
840 * ========================
841 * = dfx_bus_config_check =
842 * ========================
843 *
844 * Overview:
845 * Checks the configuration (burst size, full-duplex, etc.) If any parameters
846 * are illegal, then this routine will set new defaults.
847 *
848 * Returns:
849 * None
850 *
851 * Arguments:
852 * bp - pointer to board information
853 *
854 * Functional Description:
855 * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later
856 * PDQ, and all FDDI PCI controllers, all values are legal.
857 *
858 * Return Codes:
859 * None
860 *
861 * Assumptions:
862 * dfx_adap_init has NOT been called yet so burst size and other items have
863 * not been set.
864 *
865 * Side Effects:
866 * None
867 */
868
869static void __devinit dfx_bus_config_check(DFX_board_t *bp)
870{
871 struct device __maybe_unused *bdev = bp->bus_dev;
872 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
873 int status; /* return code from adapter port control call */
874 u32 host_data; /* LW data returned from port control call */
875
876 DBG_printk("In dfx_bus_config_check...\n");
877
878 /* Configuration check only valid for EISA adapter */
879
880 if (dfx_bus_eisa) {
881 /*
882 * First check if revision 2 EISA controller. Rev. 1 cards used
883 * PDQ revision B, so no workaround needed in this case. Rev. 3
884 * cards used PDQ revision E, so no workaround needed in this
885 * case, either. Only Rev. 2 cards used either Rev. D or E
886 * chips, so we must verify the chip revision on Rev. 2 cards.
887 */
888 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
889 /*
890 * Revision 2 FDDI EISA controller found,
891 * so let's check PDQ revision of adapter.
892 */
893 status = dfx_hw_port_ctrl_req(bp,
894 PI_PCTRL_M_SUB_CMD,
895 PI_SUB_CMD_K_PDQ_REV_GET,
896 0,
897 &host_data);
898 if ((status != DFX_K_SUCCESS) || (host_data == 2))
899 {
900 /*
901 * Either we couldn't determine the PDQ revision, or
902 * we determined that it is at revision D. In either case,
903 * we need to implement the workaround.
904 */
905
906 /* Ensure that the burst size is set to 8 longwords or less */
907
908 switch (bp->burst_size)
909 {
910 case PI_PDATA_B_DMA_BURST_SIZE_32:
911 case PI_PDATA_B_DMA_BURST_SIZE_16:
912 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8;
913 break;
914
915 default:
916 break;
917 }
918
919 /* Ensure that full-duplex mode is not enabled */
920
921 bp->full_duplex_enb = PI_SNMP_K_FALSE;
922 }
923 }
924 }
925 }
926
927
928/*
929 * ===================
930 * = dfx_driver_init =
931 * ===================
932 *
933 * Overview:
934 * Initializes remaining adapter board structure information
935 * and makes sure adapter is in a safe state prior to dfx_open().
936 *
937 * Returns:
938 * Condition code
939 *
940 * Arguments:
941 * dev - pointer to device information
942 * print_name - printable device name
943 *
944 * Functional Description:
945 * This function allocates additional resources such as the host memory
946 * blocks needed by the adapter (eg. descriptor and consumer blocks).
947 * Remaining bus initialization steps are also completed. The adapter
948 * is also reset so that it is in the DMA_UNAVAILABLE state. The OS
949 * must call dfx_open() to open the adapter and bring it on-line.
950 *
951 * Return Codes:
952 * DFX_K_SUCCESS - initialization succeeded
953 * DFX_K_FAILURE - initialization failed - could not allocate memory
954 * or read adapter MAC address
955 *
956 * Assumptions:
957 * Memory allocated from pci_alloc_consistent() call is physically
958 * contiguous, locked memory.
959 *
960 * Side Effects:
961 * Adapter is reset and should be in DMA_UNAVAILABLE state before
962 * returning from this routine.
963 */
964
965static int __devinit dfx_driver_init(struct net_device *dev,
966 const char *print_name,
967 resource_size_t bar_start)
968{
969 DFX_board_t *bp = netdev_priv(dev);
970 struct device *bdev = bp->bus_dev;
971 int dfx_bus_pci = DFX_BUS_PCI(bdev);
972 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
973 int dfx_bus_tc = DFX_BUS_TC(bdev);
974 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
975 int alloc_size; /* total buffer size needed */
976 char *top_v, *curr_v; /* virtual addrs into memory block */
977 dma_addr_t top_p, curr_p; /* physical addrs into memory block */
978 u32 data; /* host data register value */
979 __le32 le32;
980 char *board_name = NULL;
981
982 DBG_printk("In dfx_driver_init...\n");
983
984 /* Initialize bus-specific hardware registers */
985
986 dfx_bus_init(dev);
987
988 /*
989 * Initialize default values for configurable parameters
990 *
991 * Note: All of these parameters are ones that a user may
992 * want to customize. It'd be nice to break these
993 * out into Space.c or someplace else that's more
994 * accessible/understandable than this file.
995 */
996
997 bp->full_duplex_enb = PI_SNMP_K_FALSE;
998 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */
999 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF;
1000 bp->rcv_bufs_to_post = RCV_BUFS_DEF;
1001
1002 /*
1003 * Ensure that HW configuration is OK
1004 *
1005 * Note: Depending on the hardware revision, we may need to modify
1006 * some of the configurable parameters to workaround hardware
1007 * limitations. We'll perform this configuration check AFTER
1008 * setting the parameters to their default values.
1009 */
1010
1011 dfx_bus_config_check(bp);
1012
1013 /* Disable PDQ interrupts first */
1014
1015 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1016
1017 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1018
1019 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1020
1021 /* Read the factory MAC address from the adapter then save it */
1022
1023 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0,
1024 &data) != DFX_K_SUCCESS) {
1025 printk("%s: Could not read adapter factory MAC address!\n",
1026 print_name);
1027 return DFX_K_FAILURE;
1028 }
1029 le32 = cpu_to_le32(data);
1030 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
1031
1032 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
1033 &data) != DFX_K_SUCCESS) {
1034 printk("%s: Could not read adapter factory MAC address!\n",
1035 print_name);
1036 return DFX_K_FAILURE;
1037 }
1038 le32 = cpu_to_le32(data);
1039 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
1040
1041 /*
1042 * Set current address to factory address
1043 *
1044 * Note: Node address override support is handled through
1045 * dfx_ctl_set_mac_address.
1046 */
1047
1048 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1049 if (dfx_bus_tc)
1050 board_name = "DEFTA";
1051 if (dfx_bus_eisa)
1052 board_name = "DEFEA";
1053 if (dfx_bus_pci)
1054 board_name = "DEFPA";
1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
1056 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
1057 (long long)bar_start, dev->irq, dev->dev_addr);
1058
1059 /*
1060 * Get memory for descriptor block, consumer block, and other buffers
1061 * that need to be DMA read or written to by the adapter.
1062 */
1063
1064 alloc_size = sizeof(PI_DESCR_BLOCK) +
1065 PI_CMD_REQ_K_SIZE_MAX +
1066 PI_CMD_RSP_K_SIZE_MAX +
1067#ifndef DYNAMIC_BUFFERS
1068 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
1069#endif
1070 sizeof(PI_CONSUMER_BLOCK) +
1071 (PI_ALIGN_K_DESC_BLK - 1);
1072 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1073 &bp->kmalloced_dma,
1074 GFP_ATOMIC);
1075 if (top_v == NULL) {
1076 printk("%s: Could not allocate memory for host buffers "
1077 "and structures!\n", print_name);
1078 return DFX_K_FAILURE;
1079 }
1080 memset(top_v, 0, alloc_size); /* zero out memory before continuing */
1081 top_p = bp->kmalloced_dma; /* get physical address of buffer */
1082
1083 /*
1084 * To guarantee the 8K alignment required for the descriptor block, 8K - 1
1085 * plus the amount of memory needed was allocated. The physical address
1086 * is now 8K aligned. By carving up the memory in a specific order,
1087 * we'll guarantee the alignment requirements for all other structures.
1088 *
1089 * Note: If the assumptions change regarding the non-paged, non-cached,
1090 * physically contiguous nature of the memory block or the address
1091 * alignments, then we'll need to implement a different algorithm
1092 * for allocating the needed memory.
1093 */
1094
1095 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK);
1096 curr_v = top_v + (curr_p - top_p);
1097
1098 /* Reserve space for descriptor block */
1099
1100 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v;
1101 bp->descr_block_phys = curr_p;
1102 curr_v += sizeof(PI_DESCR_BLOCK);
1103 curr_p += sizeof(PI_DESCR_BLOCK);
1104
1105 /* Reserve space for command request buffer */
1106
1107 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v;
1108 bp->cmd_req_phys = curr_p;
1109 curr_v += PI_CMD_REQ_K_SIZE_MAX;
1110 curr_p += PI_CMD_REQ_K_SIZE_MAX;
1111
1112 /* Reserve space for command response buffer */
1113
1114 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v;
1115 bp->cmd_rsp_phys = curr_p;
1116 curr_v += PI_CMD_RSP_K_SIZE_MAX;
1117 curr_p += PI_CMD_RSP_K_SIZE_MAX;
1118
1119 /* Reserve space for the LLC host receive queue buffers */
1120
1121 bp->rcv_block_virt = curr_v;
1122 bp->rcv_block_phys = curr_p;
1123
1124#ifndef DYNAMIC_BUFFERS
1125 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1126 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX);
1127#endif
1128
1129 /* Reserve space for the consumer block */
1130
1131 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v;
1132 bp->cons_block_phys = curr_p;
1133
1134 /* Display virtual and physical addresses if debug driver */
1135
1136 DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
1137 print_name,
1138 (long)bp->descr_block_virt, bp->descr_block_phys);
1139 DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
1140 print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
1141 DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
1142 print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
1143 DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
1144 print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
1145 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1146 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1147
1148 return DFX_K_SUCCESS;
1149}
1150
1151
1152/*
1153 * =================
1154 * = dfx_adap_init =
1155 * =================
1156 *
1157 * Overview:
1158 * Brings the adapter to the link avail/link unavailable state.
1159 *
1160 * Returns:
1161 * Condition code
1162 *
1163 * Arguments:
1164 * bp - pointer to board information
1165 * get_buffers - non-zero if buffers to be allocated
1166 *
1167 * Functional Description:
1168 * Issues the low-level firmware/hardware calls necessary to bring
1169 * the adapter up, or to properly reset and restore adapter during
1170 * run-time.
1171 *
1172 * Return Codes:
1173 * DFX_K_SUCCESS - Adapter brought up successfully
1174 * DFX_K_FAILURE - Adapter initialization failed
1175 *
1176 * Assumptions:
1177 * bp->reset_type should be set to a valid reset type value before
1178 * calling this routine.
1179 *
1180 * Side Effects:
1181 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1182 * upon a successful return of this routine.
1183 */
1184
1185static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1186 {
1187 DBG_printk("In dfx_adap_init...\n");
1188
1189 /* Disable PDQ interrupts first */
1190
1191 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1192
1193 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1194
1195 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1196 {
1197 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1198 return DFX_K_FAILURE;
1199 }
1200
1201 /*
1202 * When the PDQ is reset, some false Type 0 interrupts may be pending,
1203 * so we'll acknowledge all Type 0 interrupts now before continuing.
1204 */
1205
1206 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0);
1207
1208 /*
1209 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state
1210 *
1211 * Note: We only need to clear host copies of these registers. The PDQ reset
1212 * takes care of the on-board register values.
1213 */
1214
1215 bp->cmd_req_reg.lword = 0;
1216 bp->cmd_rsp_reg.lword = 0;
1217 bp->rcv_xmt_reg.lword = 0;
1218
1219 /* Clear consumer block before going to DMA_AVAILABLE state */
1220
1221 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1222
1223 /* Initialize the DMA Burst Size */
1224
1225 if (dfx_hw_port_ctrl_req(bp,
1226 PI_PCTRL_M_SUB_CMD,
1227 PI_SUB_CMD_K_BURST_SIZE_SET,
1228 bp->burst_size,
1229 NULL) != DFX_K_SUCCESS)
1230 {
1231 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1232 return DFX_K_FAILURE;
1233 }
1234
1235 /*
1236 * Set base address of Consumer Block
1237 *
1238 * Assumption: 32-bit physical address of consumer block is 64 byte
1239 * aligned. That is, bits 0-5 of the address must be zero.
1240 */
1241
1242 if (dfx_hw_port_ctrl_req(bp,
1243 PI_PCTRL_M_CONS_BLOCK,
1244 bp->cons_block_phys,
1245 0,
1246 NULL) != DFX_K_SUCCESS)
1247 {
1248 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1249 return DFX_K_FAILURE;
1250 }
1251
1252 /*
1253 * Set the base address of Descriptor Block and bring adapter
1254 * to DMA_AVAILABLE state.
1255 *
1256 * Note: We also set the literal and data swapping requirements
1257 * in this command.
1258 *
1259 * Assumption: 32-bit physical address of descriptor block
1260 * is 8Kbyte aligned.
1261 */
1262 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT,
1263 (u32)(bp->descr_block_phys |
1264 PI_PDATA_A_INIT_M_BSWAP_INIT),
1265 0, NULL) != DFX_K_SUCCESS) {
1266 printk("%s: Could not set descriptor block address!\n",
1267 bp->dev->name);
1268 return DFX_K_FAILURE;
1269 }
1270
1271 /* Set transmit flush timeout value */
1272
1273 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET;
1274 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME;
1275 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */
1276 bp->cmd_req_virt->char_set.item[0].item_index = 0;
1277 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL;
1278 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1279 {
1280 printk("%s: DMA command request failed!\n", bp->dev->name);
1281 return DFX_K_FAILURE;
1282 }
1283
1284 /* Set the initial values for eFDXEnable and MACTReq MIB objects */
1285
1286 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET;
1287 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS;
1288 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb;
1289 bp->cmd_req_virt->snmp_set.item[0].item_index = 0;
1290 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ;
1291 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt;
1292 bp->cmd_req_virt->snmp_set.item[1].item_index = 0;
1293 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL;
1294 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1295 {
1296 printk("%s: DMA command request failed!\n", bp->dev->name);
1297 return DFX_K_FAILURE;
1298 }
1299
1300 /* Initialize adapter CAM */
1301
1302 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1303 {
1304 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1305 return DFX_K_FAILURE;
1306 }
1307
1308 /* Initialize adapter filters */
1309
1310 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1311 {
1312 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1313 return DFX_K_FAILURE;
1314 }
1315
1316 /*
1317 * Remove any existing dynamic buffers (i.e. if the adapter is being
1318 * reinitialized)
1319 */
1320
1321 if (get_buffers)
1322 dfx_rcv_flush(bp);
1323
1324 /* Initialize receive descriptor block and produce buffers */
1325
1326 if (dfx_rcv_init(bp, get_buffers))
1327 {
1328 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1329 if (get_buffers)
1330 dfx_rcv_flush(bp);
1331 return DFX_K_FAILURE;
1332 }
1333
1334 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
1335
1336 bp->cmd_req_virt->cmd_type = PI_CMD_K_START;
1337 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1338 {
1339 printk("%s: Start command failed\n", bp->dev->name);
1340 if (get_buffers)
1341 dfx_rcv_flush(bp);
1342 return DFX_K_FAILURE;
1343 }
1344
1345 /* Initialization succeeded, reenable PDQ interrupts */
1346
1347 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1348 return DFX_K_SUCCESS;
1349 }
1350
1351
1352/*
1353 * ============
1354 * = dfx_open =
1355 * ============
1356 *
1357 * Overview:
1358 * Opens the adapter
1359 *
1360 * Returns:
1361 * Condition code
1362 *
1363 * Arguments:
1364 * dev - pointer to device information
1365 *
1366 * Functional Description:
1367 * This function brings the adapter to an operational state.
1368 *
1369 * Return Codes:
1370 * 0 - Adapter was successfully opened
1371 * -EAGAIN - Could not register IRQ or adapter initialization failed
1372 *
1373 * Assumptions:
1374 * This routine should only be called for a device that was
1375 * initialized successfully.
1376 *
1377 * Side Effects:
1378 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state
1379 * if the open is successful.
1380 */
1381
1382static int dfx_open(struct net_device *dev)
1383{
1384 DFX_board_t *bp = netdev_priv(dev);
1385 int ret;
1386
1387 DBG_printk("In dfx_open...\n");
1388
1389 /* Register IRQ - support shared interrupts by passing device ptr */
1390
1391 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1392 dev);
1393 if (ret) {
1394 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1395 return ret;
1396 }
1397
1398 /*
1399 * Set current address to factory MAC address
1400 *
1401 * Note: We've already done this step in dfx_driver_init.
1402 * However, it's possible that a user has set a node
1403 * address override, then closed and reopened the
1404 * adapter. Unless we reset the device address field
1405 * now, we'll continue to use the existing modified
1406 * address.
1407 */
1408
1409 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
1410
1411 /* Clear local unicast/multicast address tables and counts */
1412
1413 memset(bp->uc_table, 0, sizeof(bp->uc_table));
1414 memset(bp->mc_table, 0, sizeof(bp->mc_table));
1415 bp->uc_count = 0;
1416 bp->mc_count = 0;
1417
1418 /* Disable promiscuous filter settings */
1419
1420 bp->ind_group_prom = PI_FSTATE_K_BLOCK;
1421 bp->group_prom = PI_FSTATE_K_BLOCK;
1422
1423 spin_lock_init(&bp->lock);
1424
1425 /* Reset and initialize adapter */
1426
1427 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */
1428 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS)
1429 {
1430 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name);
1431 free_irq(dev->irq, dev);
1432 return -EAGAIN;
1433 }
1434
1435 /* Set device structure info */
1436 netif_start_queue(dev);
1437 return 0;
1438}
1439
1440
1441/*
1442 * =============
1443 * = dfx_close =
1444 * =============
1445 *
1446 * Overview:
1447 * Closes the device/module.
1448 *
1449 * Returns:
1450 * Condition code
1451 *
1452 * Arguments:
1453 * dev - pointer to device information
1454 *
1455 * Functional Description:
1456 * This routine closes the adapter and brings it to a safe state.
1457 * The interrupt service routine is deregistered with the OS.
1458 * The adapter can be opened again with another call to dfx_open().
1459 *
1460 * Return Codes:
1461 * Always return 0.
1462 *
1463 * Assumptions:
1464 * No further requests for this adapter are made after this routine is
1465 * called. dfx_open() can be called to reset and reinitialize the
1466 * adapter.
1467 *
1468 * Side Effects:
1469 * Adapter should be in DMA_UNAVAILABLE state upon completion of this
1470 * routine.
1471 */
1472
1473static int dfx_close(struct net_device *dev)
1474{
1475 DFX_board_t *bp = netdev_priv(dev);
1476
1477 DBG_printk("In dfx_close...\n");
1478
1479 /* Disable PDQ interrupts first */
1480
1481 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1482
1483 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */
1484
1485 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST);
1486
1487 /*
1488 * Flush any pending transmit buffers
1489 *
1490 * Note: It's important that we flush the transmit buffers
1491 * BEFORE we clear our copy of the Type 2 register.
1492 * Otherwise, we'll have no idea how many buffers
1493 * we need to free.
1494 */
1495
1496 dfx_xmt_flush(bp);
1497
1498 /*
1499 * Clear Type 1 and Type 2 registers after adapter reset
1500 *
1501 * Note: Even though we're closing the adapter, it's
1502 * possible that an interrupt will occur after
1503 * dfx_close is called. Without some assurance to
1504 * the contrary we want to make sure that we don't
1505 * process receive and transmit LLC frames and update
1506 * the Type 2 register with bad information.
1507 */
1508
1509 bp->cmd_req_reg.lword = 0;
1510 bp->cmd_rsp_reg.lword = 0;
1511 bp->rcv_xmt_reg.lword = 0;
1512
1513 /* Clear consumer block for the same reason given above */
1514
1515 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK));
1516
1517 /* Release all dynamically allocate skb in the receive ring. */
1518
1519 dfx_rcv_flush(bp);
1520
1521 /* Clear device structure flags */
1522
1523 netif_stop_queue(dev);
1524
1525 /* Deregister (free) IRQ */
1526
1527 free_irq(dev->irq, dev);
1528
1529 return 0;
1530}
1531
1532
1533/*
1534 * ======================
1535 * = dfx_int_pr_halt_id =
1536 * ======================
1537 *
1538 * Overview:
1539 * Displays halt id's in string form.
1540 *
1541 * Returns:
1542 * None
1543 *
1544 * Arguments:
1545 * bp - pointer to board information
1546 *
1547 * Functional Description:
1548 * Determine current halt id and display appropriate string.
1549 *
1550 * Return Codes:
1551 * None
1552 *
1553 * Assumptions:
1554 * None
1555 *
1556 * Side Effects:
1557 * None
1558 */
1559
1560static void dfx_int_pr_halt_id(DFX_board_t *bp)
1561 {
1562 PI_UINT32 port_status; /* PDQ port status register value */
1563 PI_UINT32 halt_id; /* PDQ port status halt ID */
1564
1565 /* Read the latest port status */
1566
1567 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1568
1569 /* Display halt state transition information */
1570
1571 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID;
1572 switch (halt_id)
1573 {
1574 case PI_HALT_ID_K_SELFTEST_TIMEOUT:
1575 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name);
1576 break;
1577
1578 case PI_HALT_ID_K_PARITY_ERROR:
1579 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name);
1580 break;
1581
1582 case PI_HALT_ID_K_HOST_DIR_HALT:
1583 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name);
1584 break;
1585
1586 case PI_HALT_ID_K_SW_FAULT:
1587 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name);
1588 break;
1589
1590 case PI_HALT_ID_K_HW_FAULT:
1591 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name);
1592 break;
1593
1594 case PI_HALT_ID_K_PC_TRACE:
1595 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name);
1596 break;
1597
1598 case PI_HALT_ID_K_DMA_ERROR:
1599 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name);
1600 break;
1601
1602 case PI_HALT_ID_K_IMAGE_CRC_ERROR:
1603 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name);
1604 break;
1605
1606 case PI_HALT_ID_K_BUS_EXCEPTION:
1607 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name);
1608 break;
1609
1610 default:
1611 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id);
1612 break;
1613 }
1614 }
1615
1616
1617/*
1618 * ==========================
1619 * = dfx_int_type_0_process =
1620 * ==========================
1621 *
1622 * Overview:
1623 * Processes Type 0 interrupts.
1624 *
1625 * Returns:
1626 * None
1627 *
1628 * Arguments:
1629 * bp - pointer to board information
1630 *
1631 * Functional Description:
1632 * Processes all enabled Type 0 interrupts. If the reason for the interrupt
1633 * is a serious fault on the adapter, then an error message is displayed
1634 * and the adapter is reset.
1635 *
1636 * One tricky potential timing window is the rapid succession of "link avail"
1637 * "link unavail" state change interrupts. The acknowledgement of the Type 0
1638 * interrupt must be done before reading the state from the Port Status
1639 * register. This is true because a state change could occur after reading
1640 * the data, but before acknowledging the interrupt. If this state change
1641 * does happen, it would be lost because the driver is using the old state,
1642 * and it will never know about the new state because it subsequently
1643 * acknowledges the state change interrupt.
1644 *
1645 * INCORRECT CORRECT
1646 * read type 0 int reasons read type 0 int reasons
1647 * read adapter state ack type 0 interrupts
1648 * ack type 0 interrupts read adapter state
1649 * ... process interrupt ... ... process interrupt ...
1650 *
1651 * Return Codes:
1652 * None
1653 *
1654 * Assumptions:
1655 * None
1656 *
1657 * Side Effects:
1658 * An adapter reset may occur if the adapter has any Type 0 error interrupts
1659 * or if the port status indicates that the adapter is halted. The driver
1660 * is responsible for reinitializing the adapter with the current CAM
1661 * contents and adapter filter settings.
1662 */
1663
1664static void dfx_int_type_0_process(DFX_board_t *bp)
1665
1666 {
1667 PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */
1668 PI_UINT32 state; /* current adap state (from port status) */
1669
1670 /*
1671 * Read host interrupt Type 0 register to determine which Type 0
1672 * interrupts are pending. Immediately write it back out to clear
1673 * those interrupts.
1674 */
1675
1676 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status);
1677 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status);
1678
1679 /* Check for Type 0 error interrupts */
1680
1681 if (type_0_status & (PI_TYPE_0_STAT_M_NXM |
1682 PI_TYPE_0_STAT_M_PM_PAR_ERR |
1683 PI_TYPE_0_STAT_M_BUS_PAR_ERR))
1684 {
1685 /* Check for Non-Existent Memory error */
1686
1687 if (type_0_status & PI_TYPE_0_STAT_M_NXM)
1688 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name);
1689
1690 /* Check for Packet Memory Parity error */
1691
1692 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR)
1693 printk("%s: Packet Memory Parity Error\n", bp->dev->name);
1694
1695 /* Check for Host Bus Parity error */
1696
1697 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR)
1698 printk("%s: Host Bus Parity Error\n", bp->dev->name);
1699
1700 /* Reset adapter and bring it back on-line */
1701
1702 bp->link_available = PI_K_FALSE; /* link is no longer available */
1703 bp->reset_type = 0; /* rerun on-board diagnostics */
1704 printk("%s: Resetting adapter...\n", bp->dev->name);
1705 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1706 {
1707 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1708 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1709 return;
1710 }
1711 printk("%s: Adapter reset successful!\n", bp->dev->name);
1712 return;
1713 }
1714
1715 /* Check for transmit flush interrupt */
1716
1717 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH)
1718 {
1719 /* Flush any pending xmt's and acknowledge the flush interrupt */
1720
1721 bp->link_available = PI_K_FALSE; /* link is no longer available */
1722 dfx_xmt_flush(bp); /* flush any outstanding packets */
1723 (void) dfx_hw_port_ctrl_req(bp,
1724 PI_PCTRL_M_XMT_DATA_FLUSH_DONE,
1725 0,
1726 0,
1727 NULL);
1728 }
1729
1730 /* Check for adapter state change */
1731
1732 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE)
1733 {
1734 /* Get latest adapter state */
1735
1736 state = dfx_hw_adap_state_rd(bp); /* get adapter state */
1737 if (state == PI_STATE_K_HALTED)
1738 {
1739 /*
1740 * Adapter has transitioned to HALTED state, try to reset
1741 * adapter to bring it back on-line. If reset fails,
1742 * leave the adapter in the broken state.
1743 */
1744
1745 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name);
1746 dfx_int_pr_halt_id(bp); /* display halt id as string */
1747
1748 /* Reset adapter and bring it back on-line */
1749
1750 bp->link_available = PI_K_FALSE; /* link is no longer available */
1751 bp->reset_type = 0; /* rerun on-board diagnostics */
1752 printk("%s: Resetting adapter...\n", bp->dev->name);
1753 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS)
1754 {
1755 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name);
1756 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS);
1757 return;
1758 }
1759 printk("%s: Adapter reset successful!\n", bp->dev->name);
1760 }
1761 else if (state == PI_STATE_K_LINK_AVAIL)
1762 {
1763 bp->link_available = PI_K_TRUE; /* set link available flag */
1764 }
1765 }
1766 }
1767
1768
1769/*
1770 * ==================
1771 * = dfx_int_common =
1772 * ==================
1773 *
1774 * Overview:
1775 * Interrupt service routine (ISR)
1776 *
1777 * Returns:
1778 * None
1779 *
1780 * Arguments:
1781 * bp - pointer to board information
1782 *
1783 * Functional Description:
1784 * This is the ISR which processes incoming adapter interrupts.
1785 *
1786 * Return Codes:
1787 * None
1788 *
1789 * Assumptions:
1790 * This routine assumes PDQ interrupts have not been disabled.
1791 * When interrupts are disabled at the PDQ, the Port Status register
1792 * is automatically cleared. This routine uses the Port Status
1793 * register value to determine whether a Type 0 interrupt occurred,
1794 * so it's important that adapter interrupts are not normally
1795 * enabled/disabled at the PDQ.
1796 *
1797 * It's vital that this routine is NOT reentered for the
1798 * same board and that the OS is not in another section of
1799 * code (eg. dfx_xmt_queue_pkt) for the same board on a
1800 * different thread.
1801 *
1802 * Side Effects:
1803 * Pending interrupts are serviced. Depending on the type of
1804 * interrupt, acknowledging and clearing the interrupt at the
1805 * PDQ involves writing a register to clear the interrupt bit
1806 * or updating completion indices.
1807 */
1808
1809static void dfx_int_common(struct net_device *dev)
1810{
1811 DFX_board_t *bp = netdev_priv(dev);
1812 PI_UINT32 port_status; /* Port Status register */
1813
1814 /* Process xmt interrupts - frequent case, so always call this routine */
1815
1816 if(dfx_xmt_done(bp)) /* free consumed xmt packets */
1817 netif_wake_queue(dev);
1818
1819 /* Process rcv interrupts - frequent case, so always call this routine */
1820
1821 dfx_rcv_queue_process(bp); /* service received LLC frames */
1822
1823 /*
1824 * Transmit and receive producer and completion indices are updated on the
1825 * adapter by writing to the Type 2 Producer register. Since the frequent
1826 * case is that we'll be processing either LLC transmit or receive buffers,
1827 * we'll optimize I/O writes by doing a single register write here.
1828 */
1829
1830 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
1831
1832 /* Read PDQ Port Status register to find out which interrupts need processing */
1833
1834 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
1835
1836 /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */
1837
1838 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING)
1839 dfx_int_type_0_process(bp); /* process Type 0 interrupts */
1840 }
1841
1842
1843/*
1844 * =================
1845 * = dfx_interrupt =
1846 * =================
1847 *
1848 * Overview:
1849 * Interrupt processing routine
1850 *
1851 * Returns:
1852 * Whether a valid interrupt was seen.
1853 *
1854 * Arguments:
1855 * irq - interrupt vector
1856 * dev_id - pointer to device information
1857 *
1858 * Functional Description:
1859 * This routine calls the interrupt processing routine for this adapter. It
1860 * disables and reenables adapter interrupts, as appropriate. We can support
1861 * shared interrupts since the incoming dev_id pointer provides our device
1862 * structure context.
1863 *
1864 * Return Codes:
1865 * IRQ_HANDLED - an IRQ was handled.
1866 * IRQ_NONE - no IRQ was handled.
1867 *
1868 * Assumptions:
1869 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
1870 * on Intel-based systems) is done by the operating system outside this
1871 * routine.
1872 *
1873 * System interrupts are enabled through this call.
1874 *
1875 * Side Effects:
1876 * Interrupts are disabled, then reenabled at the adapter.
1877 */
1878
1879static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1880{
1881 struct net_device *dev = dev_id;
1882 DFX_board_t *bp = netdev_priv(dev);
1883 struct device *bdev = bp->bus_dev;
1884 int dfx_bus_pci = DFX_BUS_PCI(bdev);
1885 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1886 int dfx_bus_tc = DFX_BUS_TC(bdev);
1887
1888 /* Service adapter interrupts */
1889
1890 if (dfx_bus_pci) {
1891 u32 status;
1892
1893 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
1894 if (!(status & PFI_STATUS_M_PDQ_INT))
1895 return IRQ_NONE;
1896
1897 spin_lock(&bp->lock);
1898
1899 /* Disable PDQ-PFI interrupts at PFI */
1900 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1901 PFI_MODE_M_DMA_ENB);
1902
1903 /* Call interrupt service routine for this adapter */
1904 dfx_int_common(dev);
1905
1906 /* Clear PDQ interrupt status bit and reenable interrupts */
1907 dfx_port_write_long(bp, PFI_K_REG_STATUS,
1908 PFI_STATUS_M_PDQ_INT);
1909 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL,
1910 (PFI_MODE_M_PDQ_INT_ENB |
1911 PFI_MODE_M_DMA_ENB));
1912
1913 spin_unlock(&bp->lock);
1914 }
1915 if (dfx_bus_eisa) {
1916 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1917 u8 status;
1918
1919 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1920 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1921 return IRQ_NONE;
1922
1923 spin_lock(&bp->lock);
1924
1925 /* Disable interrupts at the ESIC */
1926 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1927 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1928
1929 /* Call interrupt service routine for this adapter */
1930 dfx_int_common(dev);
1931
1932 /* Reenable interrupts at the ESIC */
1933 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1934 status |= PI_CONFIG_STAT_0_M_INT_ENB;
1935 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1936
1937 spin_unlock(&bp->lock);
1938 }
1939 if (dfx_bus_tc) {
1940 u32 status;
1941
1942 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1943 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1944 PI_PSTATUS_M_XMT_DATA_PENDING |
1945 PI_PSTATUS_M_SMT_HOST_PENDING |
1946 PI_PSTATUS_M_UNSOL_PENDING |
1947 PI_PSTATUS_M_CMD_RSP_PENDING |
1948 PI_PSTATUS_M_CMD_REQ_PENDING |
1949 PI_PSTATUS_M_TYPE_0_PENDING)))
1950 return IRQ_NONE;
1951
1952 spin_lock(&bp->lock);
1953
1954 /* Call interrupt service routine for this adapter */
1955 dfx_int_common(dev);
1956
1957 spin_unlock(&bp->lock);
1958 }
1959
1960 return IRQ_HANDLED;
1961}
1962
1963
1964/*
1965 * =====================
1966 * = dfx_ctl_get_stats =
1967 * =====================
1968 *
1969 * Overview:
1970 * Get statistics for FDDI adapter
1971 *
1972 * Returns:
1973 * Pointer to FDDI statistics structure
1974 *
1975 * Arguments:
1976 * dev - pointer to device information
1977 *
1978 * Functional Description:
1979 * Gets current MIB objects from adapter, then
1980 * returns FDDI statistics structure as defined
1981 * in if_fddi.h.
1982 *
1983 * Note: Since the FDDI statistics structure is
1984 * still new and the device structure doesn't
1985 * have an FDDI-specific get statistics handler,
1986 * we'll return the FDDI statistics structure as
1987 * a pointer to an Ethernet statistics structure.
1988 * That way, at least the first part of the statistics
1989 * structure can be decoded properly, and it allows
1990 * "smart" applications to perform a second cast to
1991 * decode the FDDI-specific statistics.
1992 *
1993 * We'll have to pay attention to this routine as the
1994 * device structure becomes more mature and LAN media
1995 * independent.
1996 *
1997 * Return Codes:
1998 * None
1999 *
2000 * Assumptions:
2001 * None
2002 *
2003 * Side Effects:
2004 * None
2005 */
2006
2007static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2008 {
2009 DFX_board_t *bp = netdev_priv(dev);
2010
2011 /* Fill the bp->stats structure with driver-maintained counters */
2012
2013 bp->stats.gen.rx_packets = bp->rcv_total_frames;
2014 bp->stats.gen.tx_packets = bp->xmt_total_frames;
2015 bp->stats.gen.rx_bytes = bp->rcv_total_bytes;
2016 bp->stats.gen.tx_bytes = bp->xmt_total_bytes;
2017 bp->stats.gen.rx_errors = bp->rcv_crc_errors +
2018 bp->rcv_frame_status_errors +
2019 bp->rcv_length_errors;
2020 bp->stats.gen.tx_errors = bp->xmt_length_errors;
2021 bp->stats.gen.rx_dropped = bp->rcv_discards;
2022 bp->stats.gen.tx_dropped = bp->xmt_discards;
2023 bp->stats.gen.multicast = bp->rcv_multicast_frames;
2024 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */
2025
2026 /* Get FDDI SMT MIB objects */
2027
2028 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2029 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2030 return (struct net_device_stats *)&bp->stats;
2031
2032 /* Fill the bp->stats structure with the SMT MIB object values */
2033
2034 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
2035 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
2036 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
2037 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
2038 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
2039 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
2040 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
2041 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
2042 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
2043 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
2044 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
2045 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
2046 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
2047 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
2048 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
2049 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
2050 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
2051 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
2052 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
2053 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
2054 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
2055 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
2056 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
2057 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
2058 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
2059 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
2060 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
2061 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
2062 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
2063 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
2064 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
2065 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
2066 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
2067 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
2068 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
2069 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
2070 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
2071 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
2072 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
2073 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
2074 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
2075 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
2076 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
2077 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
2078 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
2079 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
2080 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
2081 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
2082 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
2083 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
2084 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
2085 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
2086 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
2087 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
2088 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
2089 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
2090 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
2091 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
2092 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
2093 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
2094 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
2095 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
2096 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
2097 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
2098 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
2099 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
2100 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
2101 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
2102 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
2103 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
2104 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
2105 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
2106 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
2107 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
2108 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
2109 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
2110 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
2111 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
2112 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
2113 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
2114 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
2115 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
2116 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
2117 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
2118 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
2119 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
2120 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
2121 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
2122 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
2123 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
2124 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
2125 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
2126
2127 /* Get FDDI counters */
2128
2129 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2130 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2131 return (struct net_device_stats *)&bp->stats;
2132
2133 /* Fill the bp->stats structure with the FDDI counter values */
2134
2135 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
2136 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
2137 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
2138 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
2139 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
2140 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
2141 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
2142 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
2143 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
2144 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2145 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2146
2147 return (struct net_device_stats *)&bp->stats;
2148 }
2149
2150
2151/*
2152 * ==============================
2153 * = dfx_ctl_set_multicast_list =
2154 * ==============================
2155 *
2156 * Overview:
2157 * Enable/Disable LLC frame promiscuous mode reception
2158 * on the adapter and/or update multicast address table.
2159 *
2160 * Returns:
2161 * None
2162 *
2163 * Arguments:
2164 * dev - pointer to device information
2165 *
2166 * Functional Description:
2167 * This routine follows a fairly simple algorithm for setting the
2168 * adapter filters and CAM:
2169 *
2170 * if IFF_PROMISC flag is set
2171 * enable LLC individual/group promiscuous mode
2172 * else
2173 * disable LLC individual/group promiscuous mode
2174 * if number of incoming multicast addresses >
2175 * (CAM max size - number of unicast addresses in CAM)
2176 * enable LLC group promiscuous mode
2177 * set driver-maintained multicast address count to zero
2178 * else
2179 * disable LLC group promiscuous mode
2180 * set driver-maintained multicast address count to incoming count
2181 * update adapter CAM
2182 * update adapter filters
2183 *
2184 * Return Codes:
2185 * None
2186 *
2187 * Assumptions:
2188 * Multicast addresses are presented in canonical (LSB) format.
2189 *
2190 * Side Effects:
2191 * On-board adapter CAM and filters are updated.
2192 */
2193
2194static void dfx_ctl_set_multicast_list(struct net_device *dev)
2195{
2196 DFX_board_t *bp = netdev_priv(dev);
2197 int i; /* used as index in for loop */
2198 struct netdev_hw_addr *ha;
2199
2200 /* Enable LLC frame promiscuous mode, if necessary */
2201
2202 if (dev->flags & IFF_PROMISC)
2203 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */
2204
2205 /* Else, update multicast address table */
2206
2207 else
2208 {
2209 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */
2210 /*
2211 * Check whether incoming multicast address count exceeds table size
2212 *
2213 * Note: The adapters utilize an on-board 64 entry CAM for
2214 * supporting perfect filtering of multicast packets
2215 * and bridge functions when adding unicast addresses.
2216 * There is no hash function available. To support
2217 * additional multicast addresses, the all multicast
2218 * filter (LLC group promiscuous mode) must be enabled.
2219 *
2220 * The firmware reserves two CAM entries for SMT-related
2221 * multicast addresses, which leaves 62 entries available.
2222 * The following code ensures that we're not being asked
2223 * to add more than 62 addresses to the CAM. If we are,
2224 * the driver will enable the all multicast filter.
2225 * Should the number of multicast addresses drop below
2226 * the high water mark, the filter will be disabled and
2227 * perfect filtering will be used.
2228 */
2229
2230 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
2231 {
2232 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2233 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2234 }
2235 else
2236 {
2237 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
2238 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
2239 }
2240
2241 /* Copy addresses to multicast address table, then update adapter CAM */
2242
2243 i = 0;
2244 netdev_for_each_mc_addr(ha, dev)
2245 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2246 ha->addr, FDDI_K_ALEN);
2247
2248 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2249 {
2250 DBG_printk("%s: Could not update multicast address table!\n", dev->name);
2251 }
2252 else
2253 {
2254 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2255 }
2256 }
2257
2258 /* Update adapter filters */
2259
2260 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2261 {
2262 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2263 }
2264 else
2265 {
2266 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2267 }
2268 }
2269
2270
2271/*
2272 * ===========================
2273 * = dfx_ctl_set_mac_address =
2274 * ===========================
2275 *
2276 * Overview:
2277 * Add node address override (unicast address) to adapter
2278 * CAM and update dev_addr field in device table.
2279 *
2280 * Returns:
2281 * None
2282 *
2283 * Arguments:
2284 * dev - pointer to device information
2285 * addr - pointer to sockaddr structure containing unicast address to add
2286 *
2287 * Functional Description:
2288 * The adapter supports node address overrides by adding one or more
2289 * unicast addresses to the adapter CAM. This is similar to adding
2290 * multicast addresses. In this routine we'll update the driver and
2291 * device structures with the new address, then update the adapter CAM
2292 * to ensure that the adapter will copy and strip frames destined and
2293 * sourced by that address.
2294 *
2295 * Return Codes:
2296 * Always returns zero.
2297 *
2298 * Assumptions:
2299 * The address pointed to by addr->sa_data is a valid unicast
2300 * address and is presented in canonical (LSB) format.
2301 *
2302 * Side Effects:
2303 * On-board adapter CAM is updated. On-board adapter filters
2304 * may be updated.
2305 */
2306
2307static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2308 {
2309 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2310 DFX_board_t *bp = netdev_priv(dev);
2311
2312 /* Copy unicast address to driver-maintained structs and update count */
2313
2314 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */
2315 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
2316 bp->uc_count = 1;
2317
2318 /*
2319 * Verify we're not exceeding the CAM size by adding unicast address
2320 *
2321 * Note: It's possible that before entering this routine we've
2322 * already filled the CAM with 62 multicast addresses.
2323 * Since we need to place the node address override into
2324 * the CAM, we have to check to see that we're not
2325 * exceeding the CAM size. If we are, we have to enable
2326 * the LLC group (multicast) promiscuous mode filter as
2327 * in dfx_ctl_set_multicast_list.
2328 */
2329
2330 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE)
2331 {
2332 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
2333 bp->mc_count = 0; /* Don't add mc addrs to CAM */
2334
2335 /* Update adapter filters */
2336
2337 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2338 {
2339 DBG_printk("%s: Could not update adapter filters!\n", dev->name);
2340 }
2341 else
2342 {
2343 DBG_printk("%s: Adapter filters updated!\n", dev->name);
2344 }
2345 }
2346
2347 /* Update adapter CAM with new unicast address */
2348
2349 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2350 {
2351 DBG_printk("%s: Could not set new MAC address!\n", dev->name);
2352 }
2353 else
2354 {
2355 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2356 }
2357 return 0; /* always return zero */
2358 }
2359
2360
2361/*
2362 * ======================
2363 * = dfx_ctl_update_cam =
2364 * ======================
2365 *
2366 * Overview:
2367 * Procedure to update adapter CAM (Content Addressable Memory)
2368 * with desired unicast and multicast address entries.
2369 *
2370 * Returns:
2371 * Condition code
2372 *
2373 * Arguments:
2374 * bp - pointer to board information
2375 *
2376 * Functional Description:
2377 * Updates adapter CAM with current contents of board structure
2378 * unicast and multicast address tables. Since there are only 62
2379 * free entries in CAM, this routine ensures that the command
2380 * request buffer is not overrun.
2381 *
2382 * Return Codes:
2383 * DFX_K_SUCCESS - Request succeeded
2384 * DFX_K_FAILURE - Request failed
2385 *
2386 * Assumptions:
2387 * All addresses being added (unicast and multicast) are in canonical
2388 * order.
2389 *
2390 * Side Effects:
2391 * On-board adapter CAM is updated.
2392 */
2393
2394static int dfx_ctl_update_cam(DFX_board_t *bp)
2395 {
2396 int i; /* used as index */
2397 PI_LAN_ADDR *p_addr; /* pointer to CAM entry */
2398
2399 /*
2400 * Fill in command request information
2401 *
2402 * Note: Even though both the unicast and multicast address
2403 * table entries are stored as contiguous 6 byte entries,
2404 * the firmware address filter set command expects each
2405 * entry to be two longwords (8 bytes total). We must be
2406 * careful to only copy the six bytes of each unicast and
2407 * multicast table entry into each command entry. This
2408 * is also why we must first clear the entire command
2409 * request buffer.
2410 */
2411
2412 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */
2413 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET;
2414 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0];
2415
2416 /* Now add unicast addresses to command request buffer, if any */
2417
2418 for (i=0; i < (int)bp->uc_count; i++)
2419 {
2420 if (i < PI_CMD_ADDR_FILTER_K_SIZE)
2421 {
2422 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2423 p_addr++; /* point to next command entry */
2424 }
2425 }
2426
2427 /* Now add multicast addresses to command request buffer, if any */
2428
2429 for (i=0; i < (int)bp->mc_count; i++)
2430 {
2431 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE)
2432 {
2433 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN);
2434 p_addr++; /* point to next command entry */
2435 }
2436 }
2437
2438 /* Issue command to update adapter CAM, then return */
2439
2440 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2441 return DFX_K_FAILURE;
2442 return DFX_K_SUCCESS;
2443 }
2444
2445
2446/*
2447 * ==========================
2448 * = dfx_ctl_update_filters =
2449 * ==========================
2450 *
2451 * Overview:
2452 * Procedure to update adapter filters with desired
2453 * filter settings.
2454 *
2455 * Returns:
2456 * Condition code
2457 *
2458 * Arguments:
2459 * bp - pointer to board information
2460 *
2461 * Functional Description:
2462 * Enables or disables filter using current filter settings.
2463 *
2464 * Return Codes:
2465 * DFX_K_SUCCESS - Request succeeded.
2466 * DFX_K_FAILURE - Request failed.
2467 *
2468 * Assumptions:
2469 * We must always pass up packets destined to the broadcast
2470 * address (FF-FF-FF-FF-FF-FF), so we'll always keep the
2471 * broadcast filter enabled.
2472 *
2473 * Side Effects:
2474 * On-board adapter filters are updated.
2475 */
2476
2477static int dfx_ctl_update_filters(DFX_board_t *bp)
2478 {
2479 int i = 0; /* used as index */
2480
2481 /* Fill in command request information */
2482
2483 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET;
2484
2485 /* Initialize Broadcast filter - * ALWAYS ENABLED * */
2486
2487 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST;
2488 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS;
2489
2490 /* Initialize LLC Individual/Group Promiscuous filter */
2491
2492 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM;
2493 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom;
2494
2495 /* Initialize LLC Group Promiscuous filter */
2496
2497 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM;
2498 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom;
2499
2500 /* Terminate the item code list */
2501
2502 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL;
2503
2504 /* Issue command to update adapter filters, then return */
2505
2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2507 return DFX_K_FAILURE;
2508 return DFX_K_SUCCESS;
2509 }
2510
2511
2512/*
2513 * ======================
2514 * = dfx_hw_dma_cmd_req =
2515 * ======================
2516 *
2517 * Overview:
2518 * Sends PDQ DMA command to adapter firmware
2519 *
2520 * Returns:
2521 * Condition code
2522 *
2523 * Arguments:
2524 * bp - pointer to board information
2525 *
2526 * Functional Description:
2527 * The command request and response buffers are posted to the adapter in the manner
2528 * described in the PDQ Port Specification:
2529 *
2530 * 1. Command Response Buffer is posted to adapter.
2531 * 2. Command Request Buffer is posted to adapter.
2532 * 3. Command Request consumer index is polled until it indicates that request
2533 * buffer has been DMA'd to adapter.
2534 * 4. Command Response consumer index is polled until it indicates that response
2535 * buffer has been DMA'd from adapter.
2536 *
2537 * This ordering ensures that a response buffer is already available for the firmware
2538 * to use once it's done processing the request buffer.
2539 *
2540 * Return Codes:
2541 * DFX_K_SUCCESS - DMA command succeeded
2542 * DFX_K_OUTSTATE - Adapter is NOT in proper state
2543 * DFX_K_HW_TIMEOUT - DMA command timed out
2544 *
2545 * Assumptions:
2546 * Command request buffer has already been filled with desired DMA command.
2547 *
2548 * Side Effects:
2549 * None
2550 */
2551
2552static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2553 {
2554 int status; /* adapter status */
2555 int timeout_cnt; /* used in for loops */
2556
2557 /* Make sure the adapter is in a state that we can issue the DMA command in */
2558
2559 status = dfx_hw_adap_state_rd(bp);
2560 if ((status == PI_STATE_K_RESET) ||
2561 (status == PI_STATE_K_HALTED) ||
2562 (status == PI_STATE_K_DMA_UNAVAIL) ||
2563 (status == PI_STATE_K_UPGRADE))
2564 return DFX_K_OUTSTATE;
2565
2566 /* Put response buffer on the command response queue */
2567
2568 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2569 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2570 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys;
2571
2572 /* Bump (and wrap) the producer index and write out to register */
2573
2574 bp->cmd_rsp_reg.index.prod += 1;
2575 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2576 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2577
2578 /* Put request buffer on the command request queue */
2579
2580 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP |
2581 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN));
2582 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys;
2583
2584 /* Bump (and wrap) the producer index and write out to register */
2585
2586 bp->cmd_req_reg.index.prod += 1;
2587 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2588 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2589
2590 /*
2591 * Here we wait for the command request consumer index to be equal
2592 * to the producer, indicating that the adapter has DMAed the request.
2593 */
2594
2595 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2596 {
2597 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req))
2598 break;
2599 udelay(100); /* wait for 100 microseconds */
2600 }
2601 if (timeout_cnt == 0)
2602 return DFX_K_HW_TIMEOUT;
2603
2604 /* Bump (and wrap) the completion index and write out to register */
2605
2606 bp->cmd_req_reg.index.comp += 1;
2607 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1;
2608 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword);
2609
2610 /*
2611 * Here we wait for the command response consumer index to be equal
2612 * to the producer, indicating that the adapter has DMAed the response.
2613 */
2614
2615 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--)
2616 {
2617 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp))
2618 break;
2619 udelay(100); /* wait for 100 microseconds */
2620 }
2621 if (timeout_cnt == 0)
2622 return DFX_K_HW_TIMEOUT;
2623
2624 /* Bump (and wrap) the completion index and write out to register */
2625
2626 bp->cmd_rsp_reg.index.comp += 1;
2627 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2628 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2629 return DFX_K_SUCCESS;
2630 }
2631
2632
2633/*
2634 * ========================
2635 * = dfx_hw_port_ctrl_req =
2636 * ========================
2637 *
2638 * Overview:
2639 * Sends PDQ port control command to adapter firmware
2640 *
2641 * Returns:
2642 * Host data register value in host_data if ptr is not NULL
2643 *
2644 * Arguments:
2645 * bp - pointer to board information
2646 * command - port control command
2647 * data_a - port data A register value
2648 * data_b - port data B register value
2649 * host_data - ptr to host data register value
2650 *
2651 * Functional Description:
2652 * Send generic port control command to adapter by writing
2653 * to various PDQ port registers, then polling for completion.
2654 *
2655 * Return Codes:
2656 * DFX_K_SUCCESS - port control command succeeded
2657 * DFX_K_HW_TIMEOUT - port control command timed out
2658 *
2659 * Assumptions:
2660 * None
2661 *
2662 * Side Effects:
2663 * None
2664 */
2665
2666static int dfx_hw_port_ctrl_req(
2667 DFX_board_t *bp,
2668 PI_UINT32 command,
2669 PI_UINT32 data_a,
2670 PI_UINT32 data_b,
2671 PI_UINT32 *host_data
2672 )
2673
2674 {
2675 PI_UINT32 port_cmd; /* Port Control command register value */
2676 int timeout_cnt; /* used in for loops */
2677
2678 /* Set Command Error bit in command longword */
2679
2680 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR);
2681
2682 /* Issue port command to the adapter */
2683
2684 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a);
2685 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b);
2686 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd);
2687
2688 /* Now wait for command to complete */
2689
2690 if (command == PI_PCTRL_M_BLAST_FLASH)
2691 timeout_cnt = 600000; /* set command timeout count to 60 seconds */
2692 else
2693 timeout_cnt = 20000; /* set command timeout count to 2 seconds */
2694
2695 for (; timeout_cnt > 0; timeout_cnt--)
2696 {
2697 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd);
2698 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR))
2699 break;
2700 udelay(100); /* wait for 100 microseconds */
2701 }
2702 if (timeout_cnt == 0)
2703 return DFX_K_HW_TIMEOUT;
2704
2705 /*
2706 * If the address of host_data is non-zero, assume caller has supplied a
2707 * non NULL pointer, and return the contents of the HOST_DATA register in
2708 * it.
2709 */
2710
2711 if (host_data != NULL)
2712 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2713 return DFX_K_SUCCESS;
2714 }
2715
2716
2717/*
2718 * =====================
2719 * = dfx_hw_adap_reset =
2720 * =====================
2721 *
2722 * Overview:
2723 * Resets adapter
2724 *
2725 * Returns:
2726 * None
2727 *
2728 * Arguments:
2729 * bp - pointer to board information
2730 * type - type of reset to perform
2731 *
2732 * Functional Description:
2733 * Issue soft reset to adapter by writing to PDQ Port Reset
2734 * register. Use incoming reset type to tell adapter what
2735 * kind of reset operation to perform.
2736 *
2737 * Return Codes:
2738 * None
2739 *
2740 * Assumptions:
2741 * This routine merely issues a soft reset to the adapter.
2742 * It is expected that after this routine returns, the caller
2743 * will appropriately poll the Port Status register for the
2744 * adapter to enter the proper state.
2745 *
2746 * Side Effects:
2747 * Internal adapter registers are cleared.
2748 */
2749
2750static void dfx_hw_adap_reset(
2751 DFX_board_t *bp,
2752 PI_UINT32 type
2753 )
2754
2755 {
2756 /* Set Reset type and assert reset */
2757
2758 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */
2759 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET);
2760
2761 /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */
2762
2763 udelay(20);
2764
2765 /* Deassert reset */
2766
2767 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0);
2768 }
2769
2770
2771/*
2772 * ========================
2773 * = dfx_hw_adap_state_rd =
2774 * ========================
2775 *
2776 * Overview:
2777 * Returns current adapter state
2778 *
2779 * Returns:
2780 * Adapter state per PDQ Port Specification
2781 *
2782 * Arguments:
2783 * bp - pointer to board information
2784 *
2785 * Functional Description:
2786 * Reads PDQ Port Status register and returns adapter state.
2787 *
2788 * Return Codes:
2789 * None
2790 *
2791 * Assumptions:
2792 * None
2793 *
2794 * Side Effects:
2795 * None
2796 */
2797
2798static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2799 {
2800 PI_UINT32 port_status; /* Port Status register value */
2801
2802 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2803 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2804 }
2805
2806
2807/*
2808 * =====================
2809 * = dfx_hw_dma_uninit =
2810 * =====================
2811 *
2812 * Overview:
2813 * Brings adapter to DMA_UNAVAILABLE state
2814 *
2815 * Returns:
2816 * Condition code
2817 *
2818 * Arguments:
2819 * bp - pointer to board information
2820 * type - type of reset to perform
2821 *
2822 * Functional Description:
2823 * Bring adapter to DMA_UNAVAILABLE state by performing the following:
2824 * 1. Set reset type bit in Port Data A Register then reset adapter.
2825 * 2. Check that adapter is in DMA_UNAVAILABLE state.
2826 *
2827 * Return Codes:
2828 * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state
2829 * DFX_K_HW_TIMEOUT - adapter did not reset properly
2830 *
2831 * Assumptions:
2832 * None
2833 *
2834 * Side Effects:
2835 * Internal adapter registers are cleared.
2836 */
2837
2838static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2839 {
2840 int timeout_cnt; /* used in for loops */
2841
2842 /* Set reset type bit and reset adapter */
2843
2844 dfx_hw_adap_reset(bp, type);
2845
2846 /* Now wait for adapter to enter DMA_UNAVAILABLE state */
2847
2848 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--)
2849 {
2850 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL)
2851 break;
2852 udelay(100); /* wait for 100 microseconds */
2853 }
2854 if (timeout_cnt == 0)
2855 return DFX_K_HW_TIMEOUT;
2856 return DFX_K_SUCCESS;
2857 }
2858
2859/*
2860 * Align an sk_buff to a boundary power of 2
2861 *
2862 */
2863
2864static void my_skb_align(struct sk_buff *skb, int n)
2865{
2866 unsigned long x = (unsigned long)skb->data;
2867 unsigned long v;
2868
2869 v = ALIGN(x, n); /* Where we want to be */
2870
2871 skb_reserve(skb, v - x);
2872}
2873
2874
2875/*
2876 * ================
2877 * = dfx_rcv_init =
2878 * ================
2879 *
2880 * Overview:
2881 * Produces buffers to adapter LLC Host receive descriptor block
2882 *
2883 * Returns:
2884 * None
2885 *
2886 * Arguments:
2887 * bp - pointer to board information
2888 * get_buffers - non-zero if buffers to be allocated
2889 *
2890 * Functional Description:
2891 * This routine can be called during dfx_adap_init() or during an adapter
2892 * reset. It initializes the descriptor block and produces all allocated
2893 * LLC Host queue receive buffers.
2894 *
2895 * Return Codes:
2896 * Return 0 on success or -ENOMEM if buffer allocation failed (when using
2897 * dynamic buffer allocation). If the buffer allocation failed, the
2898 * already allocated buffers will not be released and the caller should do
2899 * this.
2900 *
2901 * Assumptions:
2902 * The PDQ has been reset and the adapter and driver maintained Type 2
2903 * register indices are cleared.
2904 *
2905 * Side Effects:
2906 * Receive buffers are posted to the adapter LLC queue and the adapter
2907 * is notified.
2908 */
2909
2910static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2911 {
2912 int i, j; /* used in for loop */
2913
2914 /*
2915 * Since each receive buffer is a single fragment of same length, initialize
2916 * first longword in each receive descriptor for entire LLC Host descriptor
2917 * block. Also initialize second longword in each receive descriptor with
2918 * physical address of receive buffer. We'll always allocate receive
2919 * buffers in powers of 2 so that we can easily fill the 256 entry descriptor
2920 * block and produce new receive buffers by simply updating the receive
2921 * producer index.
2922 *
2923 * Assumptions:
2924 * To support all shipping versions of PDQ, the receive buffer size
2925 * must be mod 128 in length and the physical address must be 128 byte
2926 * aligned. In other words, bits 0-6 of the length and address must
2927 * be zero for the following descriptor field entries to be correct on
2928 * all PDQ-based boards. We guaranteed both requirements during
2929 * driver initialization when we allocated memory for the receive buffers.
2930 */
2931
2932 if (get_buffers) {
2933#ifdef DYNAMIC_BUFFERS
2934 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
2935 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2936 {
2937 struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
2938 if (!newskb)
2939 return -ENOMEM;
2940 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2941 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2942 /*
2943 * align to 128 bytes for compatibility with
2944 * the old EISA boards.
2945 */
2946
2947 my_skb_align(newskb, 128);
2948 bp->descr_block_virt->rcv_data[i + j].long_1 =
2949 (u32)dma_map_single(bp->bus_dev, newskb->data,
2950 NEW_SKB_SIZE,
2951 DMA_FROM_DEVICE);
2952 /*
2953 * p_rcv_buff_va is only used inside the
2954 * kernel so we put the skb pointer here.
2955 */
2956 bp->p_rcv_buff_va[i+j] = (char *) newskb;
2957 }
2958#else
2959 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++)
2960 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
2961 {
2962 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
2963 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
2964 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
2965 bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
2966 }
2967#endif
2968 }
2969
2970 /* Update receive producer and Type 2 register */
2971
2972 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post;
2973 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
2974 return 0;
2975 }
2976
2977
2978/*
2979 * =========================
2980 * = dfx_rcv_queue_process =
2981 * =========================
2982 *
2983 * Overview:
2984 * Process received LLC frames.
2985 *
2986 * Returns:
2987 * None
2988 *
2989 * Arguments:
2990 * bp - pointer to board information
2991 *
2992 * Functional Description:
2993 * Received LLC frames are processed until there are no more consumed frames.
2994 * Once all frames are processed, the receive buffers are returned to the
2995 * adapter. Note that this algorithm fixes the length of time that can be spent
2996 * in this routine, because there are a fixed number of receive buffers to
2997 * process and buffers are not produced until this routine exits and returns
2998 * to the ISR.
2999 *
3000 * Return Codes:
3001 * None
3002 *
3003 * Assumptions:
3004 * None
3005 *
3006 * Side Effects:
3007 * None
3008 */
3009
3010static void dfx_rcv_queue_process(
3011 DFX_board_t *bp
3012 )
3013
3014 {
3015 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3016 char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */
3017 u32 descr, pkt_len; /* FMC descriptor field and packet length */
3018 struct sk_buff *skb; /* pointer to a sk_buff to hold incoming packet data */
3019
3020 /* Service all consumed LLC receive frames */
3021
3022 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3023 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
3024 {
3025 /* Process any errors */
3026
3027 int entry;
3028
3029 entry = bp->rcv_xmt_reg.index.rcv_comp;
3030#ifdef DYNAMIC_BUFFERS
3031 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
3032#else
3033 p_buff = (char *) bp->p_rcv_buff_va[entry];
3034#endif
3035 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
3036
3037 if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
3038 {
3039 if (descr & PI_FMC_DESCR_M_RCC_CRC)
3040 bp->rcv_crc_errors++;
3041 else
3042 bp->rcv_frame_status_errors++;
3043 }
3044 else
3045 {
3046 int rx_in_place = 0;
3047
3048 /* The frame was received without errors - verify packet length */
3049
3050 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN);
3051 pkt_len -= 4; /* subtract 4 byte CRC */
3052 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3053 bp->rcv_length_errors++;
3054 else{
3055#ifdef DYNAMIC_BUFFERS
3056 if (pkt_len > SKBUFF_RX_COPYBREAK) {
3057 struct sk_buff *newskb;
3058
3059 newskb = dev_alloc_skb(NEW_SKB_SIZE);
3060 if (newskb){
3061 rx_in_place = 1;
3062
3063 my_skb_align(newskb, 128);
3064 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
3065 dma_unmap_single(bp->bus_dev,
3066 bp->descr_block_virt->rcv_data[entry].long_1,
3067 NEW_SKB_SIZE,
3068 DMA_FROM_DEVICE);
3069 skb_reserve(skb, RCV_BUFF_K_PADDING);
3070 bp->p_rcv_buff_va[entry] = (char *)newskb;
3071 bp->descr_block_virt->rcv_data[entry].long_1 =
3072 (u32)dma_map_single(bp->bus_dev,
3073 newskb->data,
3074 NEW_SKB_SIZE,
3075 DMA_FROM_DEVICE);
3076 } else
3077 skb = NULL;
3078 } else
3079#endif
3080 skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */
3081 if (skb == NULL)
3082 {
3083 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name);
3084 bp->rcv_discards++;
3085 break;
3086 }
3087 else {
3088#ifndef DYNAMIC_BUFFERS
3089 if (! rx_in_place)
3090#endif
3091 {
3092 /* Receive buffer allocated, pass receive packet up */
3093
3094 skb_copy_to_linear_data(skb,
3095 p_buff + RCV_BUFF_K_PADDING,
3096 pkt_len + 3);
3097 }
3098
3099 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */
3100 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */
3101 skb->protocol = fddi_type_trans(skb, bp->dev);
3102 bp->rcv_total_bytes += skb->len;
3103 netif_rx(skb);
3104
3105 /* Update the rcv counters */
3106 bp->rcv_total_frames++;
3107 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3108 bp->rcv_multicast_frames++;
3109 }
3110 }
3111 }
3112
3113 /*
3114 * Advance the producer (for recycling) and advance the completion
3115 * (for servicing received frames). Note that it is okay to
3116 * advance the producer without checking that it passes the
3117 * completion index because they are both advanced at the same
3118 * rate.
3119 */
3120
3121 bp->rcv_xmt_reg.index.rcv_prod += 1;
3122 bp->rcv_xmt_reg.index.rcv_comp += 1;
3123 }
3124 }
3125
3126
3127/*
3128 * =====================
3129 * = dfx_xmt_queue_pkt =
3130 * =====================
3131 *
3132 * Overview:
3133 * Queues packets for transmission
3134 *
3135 * Returns:
3136 * Condition code
3137 *
3138 * Arguments:
3139 * skb - pointer to sk_buff to queue for transmission
3140 * dev - pointer to device information
3141 *
3142 * Functional Description:
3143 * Here we assume that an incoming skb transmit request
3144 * is contained in a single physically contiguous buffer
3145 * in which the virtual address of the start of packet
3146 * (skb->data) can be converted to a physical address
3147 * by using pci_map_single().
3148 *
3149 * Since the adapter architecture requires a three byte
3150 * packet request header to prepend the start of packet,
3151 * we'll write the three byte field immediately prior to
3152 * the FC byte. This assumption is valid because we've
3153 * ensured that dev->hard_header_len includes three pad
3154 * bytes. By posting a single fragment to the adapter,
3155 * we'll reduce the number of descriptor fetches and
3156 * bus traffic needed to send the request.
3157 *
3158 * Also, we can't free the skb until after it's been DMA'd
3159 * out by the adapter, so we'll queue it in the driver and
3160 * return it in dfx_xmt_done.
3161 *
3162 * Return Codes:
3163 * 0 - driver queued packet, link is unavailable, or skbuff was bad
3164 * 1 - caller should requeue the sk_buff for later transmission
3165 *
3166 * Assumptions:
3167 * First and foremost, we assume the incoming skb pointer
3168 * is NOT NULL and is pointing to a valid sk_buff structure.
3169 *
3170 * The outgoing packet is complete, starting with the
3171 * frame control byte including the last byte of data,
3172 * but NOT including the 4 byte CRC. We'll let the
3173 * adapter hardware generate and append the CRC.
3174 *
3175 * The entire packet is stored in one physically
3176 * contiguous buffer which is not cached and whose
3177 * 32-bit physical address can be determined.
3178 *
3179 * It's vital that this routine is NOT reentered for the
3180 * same board and that the OS is not in another section of
3181 * code (eg. dfx_int_common) for the same board on a
3182 * different thread.
3183 *
3184 * Side Effects:
3185 * None
3186 */
3187
3188static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
3189 struct net_device *dev)
3190 {
3191 DFX_board_t *bp = netdev_priv(dev);
3192 u8 prod; /* local transmit producer index */
3193 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
3194 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3195 unsigned long flags;
3196
3197 netif_stop_queue(dev);
3198
3199 /*
3200 * Verify that incoming transmit request is OK
3201 *
3202 * Note: The packet size check is consistent with other
3203 * Linux device drivers, although the correct packet
3204 * size should be verified before calling the
3205 * transmit routine.
3206 */
3207
3208 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN))
3209 {
3210 printk("%s: Invalid packet length - %u bytes\n",
3211 dev->name, skb->len);
3212 bp->xmt_length_errors++; /* bump error counter */
3213 netif_wake_queue(dev);
3214 dev_kfree_skb(skb);
3215 return NETDEV_TX_OK; /* return "success" */
3216 }
3217 /*
3218 * See if adapter link is available, if not, free buffer
3219 *
3220 * Note: If the link isn't available, free buffer and return 0
3221 * rather than tell the upper layer to requeue the packet.
3222 * The methodology here is that by the time the link
3223 * becomes available, the packet to be sent will be
3224 * fairly stale. By simply dropping the packet, the
3225 * higher layer protocols will eventually time out
3226 * waiting for response packets which it won't receive.
3227 */
3228
3229 if (bp->link_available == PI_K_FALSE)
3230 {
3231 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */
3232 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */
3233 else
3234 {
3235 bp->xmt_discards++; /* bump error counter */
3236 dev_kfree_skb(skb); /* free sk_buff now */
3237 netif_wake_queue(dev);
3238 return NETDEV_TX_OK; /* return "success" */
3239 }
3240 }
3241
3242 spin_lock_irqsave(&bp->lock, flags);
3243
3244 /* Get the current producer and the next free xmt data descriptor */
3245
3246 prod = bp->rcv_xmt_reg.index.xmt_prod;
3247 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]);
3248
3249 /*
3250 * Get pointer to auxiliary queue entry to contain information
3251 * for this packet.
3252 *
3253 * Note: The current xmt producer index will become the
3254 * current xmt completion index when we complete this
3255 * packet later on. So, we'll get the pointer to the
3256 * next auxiliary queue entry now before we bump the
3257 * producer index.
3258 */
3259
3260 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */
3261
3262 /* Write the three PRH bytes immediately before the FC byte */
3263
3264 skb_push(skb,3);
3265 skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */
3266 skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */
3267 skb->data[2] = DFX_PRH2_BYTE; /* specification */
3268
3269 /*
3270 * Write the descriptor with buffer info and bump producer
3271 *
3272 * Note: Since we need to start DMA from the packet request
3273 * header, we'll add 3 bytes to the DMA buffer length,
3274 * and we'll determine the physical address of the
3275 * buffer from the PRH, not skb->data.
3276 *
3277 * Assumptions:
3278 * 1. Packet starts with the frame control (FC) byte
3279 * at skb->data.
3280 * 2. The 4-byte CRC is not appended to the buffer or
3281 * included in the length.
3282 * 3. Packet length (skb->len) is from FC to end of
3283 * data, inclusive.
3284 * 4. The packet length does not exceed the maximum
3285 * FDDI LLC frame length of 4491 bytes.
3286 * 5. The entire packet is contained in a physically
3287 * contiguous, non-cached, locked memory space
3288 * comprised of a single buffer pointed to by
3289 * skb->data.
3290 * 6. The physical address of the start of packet
3291 * can be determined from the virtual address
3292 * by using pci_map_single() and is only 32-bits
3293 * wide.
3294 */
3295
3296 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3297 p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
3298 skb->len, DMA_TO_DEVICE);
3299
3300 /*
3301 * Verify that descriptor is actually available
3302 *
3303 * Note: If descriptor isn't available, return 1 which tells
3304 * the upper layer to requeue the packet for later
3305 * transmission.
3306 *
3307 * We need to ensure that the producer never reaches the
3308 * completion, except to indicate that the queue is empty.
3309 */
3310
3311 if (prod == bp->rcv_xmt_reg.index.xmt_comp)
3312 {
3313 skb_pull(skb,3);
3314 spin_unlock_irqrestore(&bp->lock, flags);
3315 return NETDEV_TX_BUSY; /* requeue packet for later */
3316 }
3317
3318 /*
3319 * Save info for this packet for xmt done indication routine
3320 *
3321 * Normally, we'd save the producer index in the p_xmt_drv_descr
3322 * structure so that we'd have it handy when we complete this
3323 * packet later (in dfx_xmt_done). However, since the current
3324 * transmit architecture guarantees a single fragment for the
3325 * entire packet, we can simply bump the completion index by
3326 * one (1) for each completed packet.
3327 *
3328 * Note: If this assumption changes and we're presented with
3329 * an inconsistent number of transmit fragments for packet
3330 * data, we'll need to modify this code to save the current
3331 * transmit producer index.
3332 */
3333
3334 p_xmt_drv_descr->p_skb = skb;
3335
3336 /* Update Type 2 register */
3337
3338 bp->rcv_xmt_reg.index.xmt_prod = prod;
3339 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
3340 spin_unlock_irqrestore(&bp->lock, flags);
3341 netif_wake_queue(dev);
3342 return NETDEV_TX_OK; /* packet queued to adapter */
3343 }
3344
3345
3346/*
3347 * ================
3348 * = dfx_xmt_done =
3349 * ================
3350 *
3351 * Overview:
3352 * Processes all frames that have been transmitted.
3353 *
3354 * Returns:
3355 * None
3356 *
3357 * Arguments:
3358 * bp - pointer to board information
3359 *
3360 * Functional Description:
3361 * For all consumed transmit descriptors that have not
3362 * yet been completed, we'll free the skb we were holding
3363 * onto using dev_kfree_skb and bump the appropriate
3364 * counters.
3365 *
3366 * Return Codes:
3367 * None
3368 *
3369 * Assumptions:
3370 * The Type 2 register is not updated in this routine. It is
3371 * assumed that it will be updated in the ISR when dfx_xmt_done
3372 * returns.
3373 *
3374 * Side Effects:
3375 * None
3376 */
3377
3378static int dfx_xmt_done(DFX_board_t *bp)
3379 {
3380 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3381 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */
3382 u8 comp; /* local transmit completion index */
3383 int freed = 0; /* buffers freed */
3384
3385 /* Service all consumed transmit frames */
3386
3387 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data);
3388 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons)
3389 {
3390 /* Get pointer to the transmit driver descriptor block information */
3391
3392 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3393
3394 /* Increment transmit counters */
3395
3396 bp->xmt_total_frames++;
3397 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len;
3398
3399 /* Return skb to operating system */
3400 comp = bp->rcv_xmt_reg.index.xmt_comp;
3401 dma_unmap_single(bp->bus_dev,
3402 bp->descr_block_virt->xmt_data[comp].long_1,
3403 p_xmt_drv_descr->p_skb->len,
3404 DMA_TO_DEVICE);
3405 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3406
3407 /*
3408 * Move to start of next packet by updating completion index
3409 *
3410 * Here we assume that a transmit packet request is always
3411 * serviced by posting one fragment. We can therefore
3412 * simplify the completion code by incrementing the
3413 * completion index by one. This code will need to be
3414 * modified if this assumption changes. See comments
3415 * in dfx_xmt_queue_pkt for more details.
3416 */
3417
3418 bp->rcv_xmt_reg.index.xmt_comp += 1;
3419 freed++;
3420 }
3421 return freed;
3422 }
3423
3424
3425/*
3426 * =================
3427 * = dfx_rcv_flush =
3428 * =================
3429 *
3430 * Overview:
3431 * Remove all skb's in the receive ring.
3432 *
3433 * Returns:
3434 * None
3435 *
3436 * Arguments:
3437 * bp - pointer to board information
3438 *
3439 * Functional Description:
3440 * Free's all the dynamically allocated skb's that are
3441 * currently attached to the device receive ring. This
3442 * function is typically only used when the device is
3443 * initialized or reinitialized.
3444 *
3445 * Return Codes:
3446 * None
3447 *
3448 * Side Effects:
3449 * None
3450 */
3451#ifdef DYNAMIC_BUFFERS
3452static void dfx_rcv_flush( DFX_board_t *bp )
3453 {
3454 int i, j;
3455
3456 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
3457 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
3458 {
3459 struct sk_buff *skb;
3460 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
3461 if (skb)
3462 dev_kfree_skb(skb);
3463 bp->p_rcv_buff_va[i+j] = NULL;
3464 }
3465
3466 }
3467#else
3468static inline void dfx_rcv_flush( DFX_board_t *bp )
3469{
3470}
3471#endif /* DYNAMIC_BUFFERS */
3472
3473/*
3474 * =================
3475 * = dfx_xmt_flush =
3476 * =================
3477 *
3478 * Overview:
3479 * Processes all frames whether they've been transmitted
3480 * or not.
3481 *
3482 * Returns:
3483 * None
3484 *
3485 * Arguments:
3486 * bp - pointer to board information
3487 *
3488 * Functional Description:
3489 * For all produced transmit descriptors that have not
3490 * yet been completed, we'll free the skb we were holding
3491 * onto using dev_kfree_skb and bump the appropriate
3492 * counters. Of course, it's possible that some of
3493 * these transmit requests actually did go out, but we
3494 * won't make that distinction here. Finally, we'll
3495 * update the consumer index to match the producer.
3496 *
3497 * Return Codes:
3498 * None
3499 *
3500 * Assumptions:
3501 * This routine does NOT update the Type 2 register. It
3502 * is assumed that this routine is being called during a
3503 * transmit flush interrupt, or a shutdown or close routine.
3504 *
3505 * Side Effects:
3506 * None
3507 */
3508
3509static void dfx_xmt_flush( DFX_board_t *bp )
3510 {
3511 u32 prod_cons; /* rcv/xmt consumer block longword */
3512 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
3513 u8 comp; /* local transmit completion index */
3514
3515 /* Flush all outstanding transmit frames */
3516
3517 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod)
3518 {
3519 /* Get pointer to the transmit driver descriptor block information */
3520
3521 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]);
3522
3523 /* Return skb to operating system */
3524 comp = bp->rcv_xmt_reg.index.xmt_comp;
3525 dma_unmap_single(bp->bus_dev,
3526 bp->descr_block_virt->xmt_data[comp].long_1,
3527 p_xmt_drv_descr->p_skb->len,
3528 DMA_TO_DEVICE);
3529 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3530
3531 /* Increment transmit error counter */
3532
3533 bp->xmt_discards++;
3534
3535 /*
3536 * Move to start of next packet by updating completion index
3537 *
3538 * Here we assume that a transmit packet request is always
3539 * serviced by posting one fragment. We can therefore
3540 * simplify the completion code by incrementing the
3541 * completion index by one. This code will need to be
3542 * modified if this assumption changes. See comments
3543 * in dfx_xmt_queue_pkt for more details.
3544 */
3545
3546 bp->rcv_xmt_reg.index.xmt_comp += 1;
3547 }
3548
3549 /* Update the transmit consumer index in the consumer block */
3550
3551 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX);
3552 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX);
3553 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3554 }
3555
3556/*
3557 * ==================
3558 * = dfx_unregister =
3559 * ==================
3560 *
3561 * Overview:
3562 * Shuts down an FDDI controller
3563 *
3564 * Returns:
3565 * Condition code
3566 *
3567 * Arguments:
3568 * bdev - pointer to device information
3569 *
3570 * Functional Description:
3571 *
3572 * Return Codes:
3573 * None
3574 *
3575 * Assumptions:
3576 * It compiles so it should work :-( (PCI cards do :-)
3577 *
3578 * Side Effects:
3579 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
3580 * freed.
3581 */
3582static void __devexit dfx_unregister(struct device *bdev)
3583{
3584 struct net_device *dev = dev_get_drvdata(bdev);
3585 DFX_board_t *bp = netdev_priv(dev);
3586 int dfx_bus_pci = DFX_BUS_PCI(bdev);
3587 int dfx_bus_tc = DFX_BUS_TC(bdev);
3588 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3589 resource_size_t bar_start = 0; /* pointer to port */
3590 resource_size_t bar_len = 0; /* resource length */
3591 int alloc_size; /* total buffer size used */
3592
3593 unregister_netdev(dev);
3594
3595 alloc_size = sizeof(PI_DESCR_BLOCK) +
3596 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
3597#ifndef DYNAMIC_BUFFERS
3598 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) +
3599#endif
3600 sizeof(PI_CONSUMER_BLOCK) +
3601 (PI_ALIGN_K_DESC_BLK - 1);
3602 if (bp->kmalloced)
3603 dma_free_coherent(bdev, alloc_size,
3604 bp->kmalloced, bp->kmalloced_dma);
3605
3606 dfx_bus_uninit(dev);
3607
3608 dfx_get_bars(bdev, &bar_start, &bar_len);
3609 if (dfx_use_mmio) {
3610 iounmap(bp->base.mem);
3611 release_mem_region(bar_start, bar_len);
3612 } else
3613 release_region(bar_start, bar_len);
3614
3615 if (dfx_bus_pci)
3616 pci_disable_device(to_pci_dev(bdev));
3617
3618 free_netdev(dev);
3619}
3620
3621
3622static int __devinit __maybe_unused dfx_dev_register(struct device *);
3623static int __devexit __maybe_unused dfx_dev_unregister(struct device *);
3624
3625#ifdef CONFIG_PCI
3626static int __devinit dfx_pci_register(struct pci_dev *,
3627 const struct pci_device_id *);
3628static void __devexit dfx_pci_unregister(struct pci_dev *);
3629
3630static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3631 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3632 { }
3633};
3634MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3635
3636static struct pci_driver dfx_pci_driver = {
3637 .name = "defxx",
3638 .id_table = dfx_pci_table,
3639 .probe = dfx_pci_register,
3640 .remove = __devexit_p(dfx_pci_unregister),
3641};
3642
3643static __devinit int dfx_pci_register(struct pci_dev *pdev,
3644 const struct pci_device_id *ent)
3645{
3646 return dfx_register(&pdev->dev);
3647}
3648
3649static void __devexit dfx_pci_unregister(struct pci_dev *pdev)
3650{
3651 dfx_unregister(&pdev->dev);
3652}
3653#endif /* CONFIG_PCI */
3654
3655#ifdef CONFIG_EISA
3656static struct eisa_device_id dfx_eisa_table[] = {
3657 { "DEC3001", DEFEA_PROD_ID_1 },
3658 { "DEC3002", DEFEA_PROD_ID_2 },
3659 { "DEC3003", DEFEA_PROD_ID_3 },
3660 { "DEC3004", DEFEA_PROD_ID_4 },
3661 { }
3662};
3663MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3664
3665static struct eisa_driver dfx_eisa_driver = {
3666 .id_table = dfx_eisa_table,
3667 .driver = {
3668 .name = "defxx",
3669 .bus = &eisa_bus_type,
3670 .probe = dfx_dev_register,
3671 .remove = __devexit_p(dfx_dev_unregister),
3672 },
3673};
3674#endif /* CONFIG_EISA */
3675
3676#ifdef CONFIG_TC
3677static struct tc_device_id const dfx_tc_table[] = {
3678 { "DEC ", "PMAF-FA " },
3679 { "DEC ", "PMAF-FD " },
3680 { "DEC ", "PMAF-FS " },
3681 { "DEC ", "PMAF-FU " },
3682 { }
3683};
3684MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3685
3686static struct tc_driver dfx_tc_driver = {
3687 .id_table = dfx_tc_table,
3688 .driver = {
3689 .name = "defxx",
3690 .bus = &tc_bus_type,
3691 .probe = dfx_dev_register,
3692 .remove = __devexit_p(dfx_dev_unregister),
3693 },
3694};
3695#endif /* CONFIG_TC */
3696
3697static int __devinit __maybe_unused dfx_dev_register(struct device *dev)
3698{
3699 int status;
3700
3701 status = dfx_register(dev);
3702 if (!status)
3703 get_device(dev);
3704 return status;
3705}
3706
3707static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev)
3708{
3709 put_device(dev);
3710 dfx_unregister(dev);
3711 return 0;
3712}
3713
3714
3715static int __devinit dfx_init(void)
3716{
3717 int status;
3718
3719 status = pci_register_driver(&dfx_pci_driver);
3720 if (!status)
3721 status = eisa_driver_register(&dfx_eisa_driver);
3722 if (!status)
3723 status = tc_register_driver(&dfx_tc_driver);
3724 return status;
3725}
3726
3727static void __devexit dfx_cleanup(void)
3728{
3729 tc_unregister_driver(&dfx_tc_driver);
3730 eisa_driver_unregister(&dfx_eisa_driver);
3731 pci_unregister_driver(&dfx_pci_driver);
3732}
3733
3734module_init(dfx_init);
3735module_exit(dfx_cleanup);
3736MODULE_AUTHOR("Lawrence V. Stefani");
3737MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3738 DRV_VERSION " " DRV_RELDATE);
3739MODULE_LICENSE("GPL");