Loading...
1/*
2
3 he.c
4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
7
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
12
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22*/
23
24/*
25
26 he.c
27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
30
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
40
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48 NOTES:
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
56#include <linux/module.h>
57#include <linux/kernel.h>
58#include <linux/skbuff.h>
59#include <linux/pci.h>
60#include <linux/errno.h>
61#include <linux/types.h>
62#include <linux/string.h>
63#include <linux/delay.h>
64#include <linux/init.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/dma-mapping.h>
70#include <linux/bitmap.h>
71#include <linux/slab.h>
72#include <asm/io.h>
73#include <asm/byteorder.h>
74#include <asm/uaccess.h>
75
76#include <linux/atmdev.h>
77#include <linux/atm.h>
78#include <linux/sonet.h>
79
80#undef USE_SCATTERGATHER
81#undef USE_CHECKSUM_HW /* still confused about this */
82/* #undef HE_DEBUG */
83
84#include "he.h"
85#include "suni.h"
86#include <linux/atm_he.h>
87
88#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90#ifdef HE_DEBUG
91#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92#else /* !HE_DEBUG */
93#define HPRINTK(fmt,args...) do { } while (0)
94#endif /* HE_DEBUG */
95
96/* declarations */
97
98static int he_open(struct atm_vcc *vcc);
99static void he_close(struct atm_vcc *vcc);
100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102static irqreturn_t he_irq_handler(int irq, void *dev_id);
103static void he_tasklet(unsigned long data);
104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105static int he_start(struct atm_dev *dev);
106static void he_stop(struct he_dev *dev);
107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112/* globals */
113
114static struct he_dev *he_devs;
115static int disable64;
116static short nvpibits = -1;
117static short nvcibits = -1;
118static short rx_skb_reserve = 16;
119static int irq_coalesce = 1;
120static int sdh = 0;
121
122/* Read from EEPROM = 0000 0011b */
123static unsigned int readtab[] = {
124 CS_HIGH | CLK_HIGH,
125 CS_LOW | CLK_LOW,
126 CLK_HIGH, /* 0 */
127 CLK_LOW,
128 CLK_HIGH, /* 0 */
129 CLK_LOW,
130 CLK_HIGH, /* 0 */
131 CLK_LOW,
132 CLK_HIGH, /* 0 */
133 CLK_LOW,
134 CLK_HIGH, /* 0 */
135 CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW | SI_HIGH,
138 CLK_HIGH | SI_HIGH, /* 1 */
139 CLK_LOW | SI_HIGH,
140 CLK_HIGH | SI_HIGH /* 1 */
141};
142
143/* Clock to read from/write to the EEPROM */
144static unsigned int clocktab[] = {
145 CLK_LOW,
146 CLK_HIGH,
147 CLK_LOW,
148 CLK_HIGH,
149 CLK_LOW,
150 CLK_HIGH,
151 CLK_LOW,
152 CLK_HIGH,
153 CLK_LOW,
154 CLK_HIGH,
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW
162};
163
164static struct atmdev_ops he_ops =
165{
166 .open = he_open,
167 .close = he_close,
168 .ioctl = he_ioctl,
169 .send = he_send,
170 .phy_put = he_phy_put,
171 .phy_get = he_phy_get,
172 .proc_read = he_proc_read,
173 .owner = THIS_MODULE
174};
175
176#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177#define he_readl(dev, reg) readl((dev)->membase + (reg))
178
179/* section 2.12 connection memory access */
180
181static __inline__ void
182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 unsigned flags)
184{
185 he_writel(he_dev, val, CON_DAT);
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189}
190
191#define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194#define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197#define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200static unsigned
201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202{
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 return he_readl(he_dev, CON_DAT);
206}
207
208#define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
210
211#define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
213
214#define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218/* figure 2.2 connection id */
219
220#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222/* 2.5.1 per connection transmit state registers */
223
224#define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226#define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229#define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232#define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235#define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238#define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241 /* from page 2-20
242 *
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
247 */
248
249#define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 CON_CTL_TCM \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
255
256#define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259#define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262#define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265#define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269#define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272#define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275#define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278#define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282#define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285#define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289#define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292#define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 CON_CTL_TCM \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
298
299/* 2.7.1 per connection receive state registers */
300
301#define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303#define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306#define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309#define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312#define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315#define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318#define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321#define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324#define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327static __inline__ struct atm_vcc*
328__find_vcc(struct he_dev *he_dev, unsigned cid)
329{
330 struct hlist_head *head;
331 struct atm_vcc *vcc;
332 struct hlist_node *node;
333 struct sock *s;
334 short vpi;
335 int vci;
336
337 vpi = cid >> he_dev->vcibits;
338 vci = cid & ((1 << he_dev->vcibits) - 1);
339 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340
341 sk_for_each(s, node, head) {
342 vcc = atm_sk(s);
343 if (vcc->dev == he_dev->atm_dev &&
344 vcc->vci == vci && vcc->vpi == vpi &&
345 vcc->qos.rxtp.traffic_class != ATM_NONE) {
346 return vcc;
347 }
348 }
349 return NULL;
350}
351
352static int __devinit
353he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
354{
355 struct atm_dev *atm_dev = NULL;
356 struct he_dev *he_dev = NULL;
357 int err = 0;
358
359 printk(KERN_INFO "ATM he driver\n");
360
361 if (pci_enable_device(pci_dev))
362 return -EIO;
363 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
364 printk(KERN_WARNING "he: no suitable dma available\n");
365 err = -EIO;
366 goto init_one_failure;
367 }
368
369 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
370 if (!atm_dev) {
371 err = -ENODEV;
372 goto init_one_failure;
373 }
374 pci_set_drvdata(pci_dev, atm_dev);
375
376 he_dev = kzalloc(sizeof(struct he_dev),
377 GFP_KERNEL);
378 if (!he_dev) {
379 err = -ENOMEM;
380 goto init_one_failure;
381 }
382 he_dev->pci_dev = pci_dev;
383 he_dev->atm_dev = atm_dev;
384 he_dev->atm_dev->dev_data = he_dev;
385 atm_dev->dev_data = he_dev;
386 he_dev->number = atm_dev->number;
387 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
388 spin_lock_init(&he_dev->global_lock);
389
390 if (he_start(atm_dev)) {
391 he_stop(he_dev);
392 err = -ENODEV;
393 goto init_one_failure;
394 }
395 he_dev->next = NULL;
396 if (he_devs)
397 he_dev->next = he_devs;
398 he_devs = he_dev;
399 return 0;
400
401init_one_failure:
402 if (atm_dev)
403 atm_dev_deregister(atm_dev);
404 kfree(he_dev);
405 pci_disable_device(pci_dev);
406 return err;
407}
408
409static void __devexit
410he_remove_one (struct pci_dev *pci_dev)
411{
412 struct atm_dev *atm_dev;
413 struct he_dev *he_dev;
414
415 atm_dev = pci_get_drvdata(pci_dev);
416 he_dev = HE_DEV(atm_dev);
417
418 /* need to remove from he_devs */
419
420 he_stop(he_dev);
421 atm_dev_deregister(atm_dev);
422 kfree(he_dev);
423
424 pci_set_drvdata(pci_dev, NULL);
425 pci_disable_device(pci_dev);
426}
427
428
429static unsigned
430rate_to_atmf(unsigned rate) /* cps to atm forum format */
431{
432#define NONZERO (1 << 14)
433
434 unsigned exp = 0;
435
436 if (rate == 0)
437 return 0;
438
439 rate <<= 9;
440 while (rate > 0x3ff) {
441 ++exp;
442 rate >>= 1;
443 }
444
445 return (NONZERO | (exp << 9) | (rate & 0x1ff));
446}
447
448static void __devinit
449he_init_rx_lbfp0(struct he_dev *he_dev)
450{
451 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
452 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
453 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
454 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
455
456 lbufd_index = 0;
457 lbm_offset = he_readl(he_dev, RCMLBM_BA);
458
459 he_writel(he_dev, lbufd_index, RLBF0_H);
460
461 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
462 lbufd_index += 2;
463 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
464
465 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
466 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
467
468 if (++lbuf_count == lbufs_per_row) {
469 lbuf_count = 0;
470 row_offset += he_dev->bytes_per_row;
471 }
472 lbm_offset += 4;
473 }
474
475 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
476 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
477}
478
479static void __devinit
480he_init_rx_lbfp1(struct he_dev *he_dev)
481{
482 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
483 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
484 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
485 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
486
487 lbufd_index = 1;
488 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
489
490 he_writel(he_dev, lbufd_index, RLBF1_H);
491
492 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
493 lbufd_index += 2;
494 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
495
496 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
497 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
498
499 if (++lbuf_count == lbufs_per_row) {
500 lbuf_count = 0;
501 row_offset += he_dev->bytes_per_row;
502 }
503 lbm_offset += 4;
504 }
505
506 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
507 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
508}
509
510static void __devinit
511he_init_tx_lbfp(struct he_dev *he_dev)
512{
513 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
514 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
515 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
516 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
517
518 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
519 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
520
521 he_writel(he_dev, lbufd_index, TLBF_H);
522
523 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
524 lbufd_index += 1;
525 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
526
527 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
528 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
529
530 if (++lbuf_count == lbufs_per_row) {
531 lbuf_count = 0;
532 row_offset += he_dev->bytes_per_row;
533 }
534 lbm_offset += 2;
535 }
536
537 he_writel(he_dev, lbufd_index - 1, TLBF_T);
538}
539
540static int __devinit
541he_init_tpdrq(struct he_dev *he_dev)
542{
543 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
544 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
545 if (he_dev->tpdrq_base == NULL) {
546 hprintk("failed to alloc tpdrq\n");
547 return -ENOMEM;
548 }
549 memset(he_dev->tpdrq_base, 0,
550 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
551
552 he_dev->tpdrq_tail = he_dev->tpdrq_base;
553 he_dev->tpdrq_head = he_dev->tpdrq_base;
554
555 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
556 he_writel(he_dev, 0, TPDRQ_T);
557 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
558
559 return 0;
560}
561
562static void __devinit
563he_init_cs_block(struct he_dev *he_dev)
564{
565 unsigned clock, rate, delta;
566 int reg;
567
568 /* 5.1.7 cs block initialization */
569
570 for (reg = 0; reg < 0x20; ++reg)
571 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
572
573 /* rate grid timer reload values */
574
575 clock = he_is622(he_dev) ? 66667000 : 50000000;
576 rate = he_dev->atm_dev->link_rate;
577 delta = rate / 16 / 2;
578
579 for (reg = 0; reg < 0x10; ++reg) {
580 /* 2.4 internal transmit function
581 *
582 * we initialize the first row in the rate grid.
583 * values are period (in clock cycles) of timer
584 */
585 unsigned period = clock / rate;
586
587 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
588 rate -= delta;
589 }
590
591 if (he_is622(he_dev)) {
592 /* table 5.2 (4 cells per lbuf) */
593 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
594 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
595 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
596 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
597 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
598
599 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
600 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
601 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
602 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
603 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
604 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
605 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
606
607 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
608
609 /* table 5.8 */
610 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
611 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
612 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
613 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
614 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
615 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
616
617 /* table 5.9 */
618 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
619 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
620 } else {
621 /* table 5.1 (4 cells per lbuf) */
622 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
623 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
624 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
625 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
626 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
627
628 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
629 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
630 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
631 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
632 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
633 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
634 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
635
636 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
637
638 /* table 5.8 */
639 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
640 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
641 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
642 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
643 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
644 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
645
646 /* table 5.9 */
647 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
648 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
649 }
650
651 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
652
653 for (reg = 0; reg < 0x8; ++reg)
654 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
655
656}
657
658static int __devinit
659he_init_cs_block_rcm(struct he_dev *he_dev)
660{
661 unsigned (*rategrid)[16][16];
662 unsigned rate, delta;
663 int i, j, reg;
664
665 unsigned rate_atmf, exp, man;
666 unsigned long long rate_cps;
667 int mult, buf, buf_limit = 4;
668
669 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
670 if (!rategrid)
671 return -ENOMEM;
672
673 /* initialize rate grid group table */
674
675 for (reg = 0x0; reg < 0xff; ++reg)
676 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
677
678 /* initialize rate controller groups */
679
680 for (reg = 0x100; reg < 0x1ff; ++reg)
681 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
682
683 /* initialize tNrm lookup table */
684
685 /* the manual makes reference to a routine in a sample driver
686 for proper configuration; fortunately, we only need this
687 in order to support abr connection */
688
689 /* initialize rate to group table */
690
691 rate = he_dev->atm_dev->link_rate;
692 delta = rate / 32;
693
694 /*
695 * 2.4 transmit internal functions
696 *
697 * we construct a copy of the rate grid used by the scheduler
698 * in order to construct the rate to group table below
699 */
700
701 for (j = 0; j < 16; j++) {
702 (*rategrid)[0][j] = rate;
703 rate -= delta;
704 }
705
706 for (i = 1; i < 16; i++)
707 for (j = 0; j < 16; j++)
708 if (i > 14)
709 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
710 else
711 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
712
713 /*
714 * 2.4 transmit internal function
715 *
716 * this table maps the upper 5 bits of exponent and mantissa
717 * of the atm forum representation of the rate into an index
718 * on rate grid
719 */
720
721 rate_atmf = 0;
722 while (rate_atmf < 0x400) {
723 man = (rate_atmf & 0x1f) << 4;
724 exp = rate_atmf >> 5;
725
726 /*
727 instead of '/ 512', use '>> 9' to prevent a call
728 to divdu3 on x86 platforms
729 */
730 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
731
732 if (rate_cps < 10)
733 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
734
735 for (i = 255; i > 0; i--)
736 if ((*rategrid)[i/16][i%16] >= rate_cps)
737 break; /* pick nearest rate instead? */
738
739 /*
740 * each table entry is 16 bits: (rate grid index (8 bits)
741 * and a buffer limit (8 bits)
742 * there are two table entries in each 32-bit register
743 */
744
745#ifdef notdef
746 buf = rate_cps * he_dev->tx_numbuffs /
747 (he_dev->atm_dev->link_rate * 2);
748#else
749 /* this is pretty, but avoids _divdu3 and is mostly correct */
750 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
751 if (rate_cps > (272 * mult))
752 buf = 4;
753 else if (rate_cps > (204 * mult))
754 buf = 3;
755 else if (rate_cps > (136 * mult))
756 buf = 2;
757 else if (rate_cps > (68 * mult))
758 buf = 1;
759 else
760 buf = 0;
761#endif
762 if (buf > buf_limit)
763 buf = buf_limit;
764 reg = (reg << 16) | ((i << 8) | buf);
765
766#define RTGTBL_OFFSET 0x400
767
768 if (rate_atmf & 0x1)
769 he_writel_rcm(he_dev, reg,
770 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
771
772 ++rate_atmf;
773 }
774
775 kfree(rategrid);
776 return 0;
777}
778
779static int __devinit
780he_init_group(struct he_dev *he_dev, int group)
781{
782 struct he_buff *heb, *next;
783 dma_addr_t mapping;
784 int i;
785
786 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
787 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
788 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
789 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
790 G0_RBPS_BS + (group * 32));
791
792 /* bitmap table */
793 he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
794 * sizeof(unsigned long), GFP_KERNEL);
795 if (!he_dev->rbpl_table) {
796 hprintk("unable to allocate rbpl bitmap table\n");
797 return -ENOMEM;
798 }
799 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
800
801 /* rbpl_virt 64-bit pointers */
802 he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
803 * sizeof(struct he_buff *), GFP_KERNEL);
804 if (!he_dev->rbpl_virt) {
805 hprintk("unable to allocate rbpl virt table\n");
806 goto out_free_rbpl_table;
807 }
808
809 /* large buffer pool */
810 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
811 CONFIG_RBPL_BUFSIZE, 64, 0);
812 if (he_dev->rbpl_pool == NULL) {
813 hprintk("unable to create rbpl pool\n");
814 goto out_free_rbpl_virt;
815 }
816
817 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
818 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
819 if (he_dev->rbpl_base == NULL) {
820 hprintk("failed to alloc rbpl_base\n");
821 goto out_destroy_rbpl_pool;
822 }
823 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
824
825 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
826
827 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
828
829 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
830 if (!heb)
831 goto out_free_rbpl;
832 heb->mapping = mapping;
833 list_add(&heb->entry, &he_dev->rbpl_outstanding);
834
835 set_bit(i, he_dev->rbpl_table);
836 he_dev->rbpl_virt[i] = heb;
837 he_dev->rbpl_hint = i + 1;
838 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
839 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
840 }
841 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
842
843 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
844 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
845 G0_RBPL_T + (group * 32));
846 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
847 G0_RBPL_BS + (group * 32));
848 he_writel(he_dev,
849 RBP_THRESH(CONFIG_RBPL_THRESH) |
850 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
851 RBP_INT_ENB,
852 G0_RBPL_QI + (group * 32));
853
854 /* rx buffer ready queue */
855
856 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
857 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
858 if (he_dev->rbrq_base == NULL) {
859 hprintk("failed to allocate rbrq\n");
860 goto out_free_rbpl;
861 }
862 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
863
864 he_dev->rbrq_head = he_dev->rbrq_base;
865 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
866 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
867 he_writel(he_dev,
868 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
869 G0_RBRQ_Q + (group * 16));
870 if (irq_coalesce) {
871 hprintk("coalescing interrupts\n");
872 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
873 G0_RBRQ_I + (group * 16));
874 } else
875 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
876 G0_RBRQ_I + (group * 16));
877
878 /* tx buffer ready queue */
879
880 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
881 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
882 if (he_dev->tbrq_base == NULL) {
883 hprintk("failed to allocate tbrq\n");
884 goto out_free_rbpq_base;
885 }
886 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
887
888 he_dev->tbrq_head = he_dev->tbrq_base;
889
890 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
891 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
892 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
893 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
894
895 return 0;
896
897out_free_rbpq_base:
898 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
899 sizeof(struct he_rbrq), he_dev->rbrq_base,
900 he_dev->rbrq_phys);
901out_free_rbpl:
902 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
903 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
904
905 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
906 sizeof(struct he_rbp), he_dev->rbpl_base,
907 he_dev->rbpl_phys);
908out_destroy_rbpl_pool:
909 pci_pool_destroy(he_dev->rbpl_pool);
910out_free_rbpl_virt:
911 kfree(he_dev->rbpl_virt);
912out_free_rbpl_table:
913 kfree(he_dev->rbpl_table);
914
915 return -ENOMEM;
916}
917
918static int __devinit
919he_init_irq(struct he_dev *he_dev)
920{
921 int i;
922
923 /* 2.9.3.5 tail offset for each interrupt queue is located after the
924 end of the interrupt queue */
925
926 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
927 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
928 if (he_dev->irq_base == NULL) {
929 hprintk("failed to allocate irq\n");
930 return -ENOMEM;
931 }
932 he_dev->irq_tailoffset = (unsigned *)
933 &he_dev->irq_base[CONFIG_IRQ_SIZE];
934 *he_dev->irq_tailoffset = 0;
935 he_dev->irq_head = he_dev->irq_base;
936 he_dev->irq_tail = he_dev->irq_base;
937
938 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
939 he_dev->irq_base[i].isw = ITYPE_INVALID;
940
941 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
942 he_writel(he_dev,
943 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
944 IRQ0_HEAD);
945 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
946 he_writel(he_dev, 0x0, IRQ0_DATA);
947
948 he_writel(he_dev, 0x0, IRQ1_BASE);
949 he_writel(he_dev, 0x0, IRQ1_HEAD);
950 he_writel(he_dev, 0x0, IRQ1_CNTL);
951 he_writel(he_dev, 0x0, IRQ1_DATA);
952
953 he_writel(he_dev, 0x0, IRQ2_BASE);
954 he_writel(he_dev, 0x0, IRQ2_HEAD);
955 he_writel(he_dev, 0x0, IRQ2_CNTL);
956 he_writel(he_dev, 0x0, IRQ2_DATA);
957
958 he_writel(he_dev, 0x0, IRQ3_BASE);
959 he_writel(he_dev, 0x0, IRQ3_HEAD);
960 he_writel(he_dev, 0x0, IRQ3_CNTL);
961 he_writel(he_dev, 0x0, IRQ3_DATA);
962
963 /* 2.9.3.2 interrupt queue mapping registers */
964
965 he_writel(he_dev, 0x0, GRP_10_MAP);
966 he_writel(he_dev, 0x0, GRP_32_MAP);
967 he_writel(he_dev, 0x0, GRP_54_MAP);
968 he_writel(he_dev, 0x0, GRP_76_MAP);
969
970 if (request_irq(he_dev->pci_dev->irq,
971 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
972 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
973 return -EINVAL;
974 }
975
976 he_dev->irq = he_dev->pci_dev->irq;
977
978 return 0;
979}
980
981static int __devinit
982he_start(struct atm_dev *dev)
983{
984 struct he_dev *he_dev;
985 struct pci_dev *pci_dev;
986 unsigned long membase;
987
988 u16 command;
989 u32 gen_cntl_0, host_cntl, lb_swap;
990 u8 cache_size, timer;
991
992 unsigned err;
993 unsigned int status, reg;
994 int i, group;
995
996 he_dev = HE_DEV(dev);
997 pci_dev = he_dev->pci_dev;
998
999 membase = pci_resource_start(pci_dev, 0);
1000 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1001
1002 /*
1003 * pci bus controller initialization
1004 */
1005
1006 /* 4.3 pci bus controller-specific initialization */
1007 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1008 hprintk("can't read GEN_CNTL_0\n");
1009 return -EINVAL;
1010 }
1011 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1012 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1013 hprintk("can't write GEN_CNTL_0.\n");
1014 return -EINVAL;
1015 }
1016
1017 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1018 hprintk("can't read PCI_COMMAND.\n");
1019 return -EINVAL;
1020 }
1021
1022 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1023 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1024 hprintk("can't enable memory.\n");
1025 return -EINVAL;
1026 }
1027
1028 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1029 hprintk("can't read cache line size?\n");
1030 return -EINVAL;
1031 }
1032
1033 if (cache_size < 16) {
1034 cache_size = 16;
1035 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1036 hprintk("can't set cache line size to %d\n", cache_size);
1037 }
1038
1039 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1040 hprintk("can't read latency timer?\n");
1041 return -EINVAL;
1042 }
1043
1044 /* from table 3.9
1045 *
1046 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1047 *
1048 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1049 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1050 *
1051 */
1052#define LAT_TIMER 209
1053 if (timer < LAT_TIMER) {
1054 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1055 timer = LAT_TIMER;
1056 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1057 hprintk("can't set latency timer to %d\n", timer);
1058 }
1059
1060 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1061 hprintk("can't set up page mapping\n");
1062 return -EINVAL;
1063 }
1064
1065 /* 4.4 card reset */
1066 he_writel(he_dev, 0x0, RESET_CNTL);
1067 he_writel(he_dev, 0xff, RESET_CNTL);
1068
1069 udelay(16*1000); /* 16 ms */
1070 status = he_readl(he_dev, RESET_CNTL);
1071 if ((status & BOARD_RST_STATUS) == 0) {
1072 hprintk("reset failed\n");
1073 return -EINVAL;
1074 }
1075
1076 /* 4.5 set bus width */
1077 host_cntl = he_readl(he_dev, HOST_CNTL);
1078 if (host_cntl & PCI_BUS_SIZE64)
1079 gen_cntl_0 |= ENBL_64;
1080 else
1081 gen_cntl_0 &= ~ENBL_64;
1082
1083 if (disable64 == 1) {
1084 hprintk("disabling 64-bit pci bus transfers\n");
1085 gen_cntl_0 &= ~ENBL_64;
1086 }
1087
1088 if (gen_cntl_0 & ENBL_64)
1089 hprintk("64-bit transfers enabled\n");
1090
1091 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1092
1093 /* 4.7 read prom contents */
1094 for (i = 0; i < PROD_ID_LEN; ++i)
1095 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1096
1097 he_dev->media = read_prom_byte(he_dev, MEDIA);
1098
1099 for (i = 0; i < 6; ++i)
1100 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1101
1102 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1103 he_dev->prod_id,
1104 he_dev->media & 0x40 ? "SM" : "MM",
1105 dev->esi[0],
1106 dev->esi[1],
1107 dev->esi[2],
1108 dev->esi[3],
1109 dev->esi[4],
1110 dev->esi[5]);
1111 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1112 ATM_OC12_PCR : ATM_OC3_PCR;
1113
1114 /* 4.6 set host endianess */
1115 lb_swap = he_readl(he_dev, LB_SWAP);
1116 if (he_is622(he_dev))
1117 lb_swap &= ~XFER_SIZE; /* 4 cells */
1118 else
1119 lb_swap |= XFER_SIZE; /* 8 cells */
1120#ifdef __BIG_ENDIAN
1121 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1122#else
1123 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1124 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1125#endif /* __BIG_ENDIAN */
1126 he_writel(he_dev, lb_swap, LB_SWAP);
1127
1128 /* 4.8 sdram controller initialization */
1129 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1130
1131 /* 4.9 initialize rnum value */
1132 lb_swap |= SWAP_RNUM_MAX(0xf);
1133 he_writel(he_dev, lb_swap, LB_SWAP);
1134
1135 /* 4.10 initialize the interrupt queues */
1136 if ((err = he_init_irq(he_dev)) != 0)
1137 return err;
1138
1139 /* 4.11 enable pci bus controller state machines */
1140 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1141 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1142 he_writel(he_dev, host_cntl, HOST_CNTL);
1143
1144 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1145 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1146
1147 /*
1148 * atm network controller initialization
1149 */
1150
1151 /* 5.1.1 generic configuration state */
1152
1153 /*
1154 * local (cell) buffer memory map
1155 *
1156 * HE155 HE622
1157 *
1158 * 0 ____________1023 bytes 0 _______________________2047 bytes
1159 * | | | | |
1160 * | utility | | rx0 | |
1161 * 5|____________| 255|___________________| u |
1162 * 6| | 256| | t |
1163 * | | | | i |
1164 * | rx0 | row | tx | l |
1165 * | | | | i |
1166 * | | 767|___________________| t |
1167 * 517|____________| 768| | y |
1168 * row 518| | | rx1 | |
1169 * | | 1023|___________________|___|
1170 * | |
1171 * | tx |
1172 * | |
1173 * | |
1174 * 1535|____________|
1175 * 1536| |
1176 * | rx1 |
1177 * 2047|____________|
1178 *
1179 */
1180
1181 /* total 4096 connections */
1182 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1183 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1184
1185 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1186 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1187 return -ENODEV;
1188 }
1189
1190 if (nvpibits != -1) {
1191 he_dev->vpibits = nvpibits;
1192 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1193 }
1194
1195 if (nvcibits != -1) {
1196 he_dev->vcibits = nvcibits;
1197 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1198 }
1199
1200
1201 if (he_is622(he_dev)) {
1202 he_dev->cells_per_row = 40;
1203 he_dev->bytes_per_row = 2048;
1204 he_dev->r0_numrows = 256;
1205 he_dev->tx_numrows = 512;
1206 he_dev->r1_numrows = 256;
1207 he_dev->r0_startrow = 0;
1208 he_dev->tx_startrow = 256;
1209 he_dev->r1_startrow = 768;
1210 } else {
1211 he_dev->cells_per_row = 20;
1212 he_dev->bytes_per_row = 1024;
1213 he_dev->r0_numrows = 512;
1214 he_dev->tx_numrows = 1018;
1215 he_dev->r1_numrows = 512;
1216 he_dev->r0_startrow = 6;
1217 he_dev->tx_startrow = 518;
1218 he_dev->r1_startrow = 1536;
1219 }
1220
1221 he_dev->cells_per_lbuf = 4;
1222 he_dev->buffer_limit = 4;
1223 he_dev->r0_numbuffs = he_dev->r0_numrows *
1224 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1225 if (he_dev->r0_numbuffs > 2560)
1226 he_dev->r0_numbuffs = 2560;
1227
1228 he_dev->r1_numbuffs = he_dev->r1_numrows *
1229 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1230 if (he_dev->r1_numbuffs > 2560)
1231 he_dev->r1_numbuffs = 2560;
1232
1233 he_dev->tx_numbuffs = he_dev->tx_numrows *
1234 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1235 if (he_dev->tx_numbuffs > 5120)
1236 he_dev->tx_numbuffs = 5120;
1237
1238 /* 5.1.2 configure hardware dependent registers */
1239
1240 he_writel(he_dev,
1241 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1242 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1243 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1244 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1245 LBARB);
1246
1247 he_writel(he_dev, BANK_ON |
1248 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1249 SDRAMCON);
1250
1251 he_writel(he_dev,
1252 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1253 RM_RW_WAIT(1), RCMCONFIG);
1254 he_writel(he_dev,
1255 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1256 TM_RW_WAIT(1), TCMCONFIG);
1257
1258 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1259
1260 he_writel(he_dev,
1261 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1262 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1263 RX_VALVP(he_dev->vpibits) |
1264 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1265
1266 he_writel(he_dev, DRF_THRESH(0x20) |
1267 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1268 TX_VCI_MASK(he_dev->vcibits) |
1269 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1270
1271 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1272
1273 he_writel(he_dev, PHY_INT_ENB |
1274 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1275 RH_CONFIG);
1276
1277 /* 5.1.3 initialize connection memory */
1278
1279 for (i = 0; i < TCM_MEM_SIZE; ++i)
1280 he_writel_tcm(he_dev, 0, i);
1281
1282 for (i = 0; i < RCM_MEM_SIZE; ++i)
1283 he_writel_rcm(he_dev, 0, i);
1284
1285 /*
1286 * transmit connection memory map
1287 *
1288 * tx memory
1289 * 0x0 ___________________
1290 * | |
1291 * | |
1292 * | TSRa |
1293 * | |
1294 * | |
1295 * 0x8000|___________________|
1296 * | |
1297 * | TSRb |
1298 * 0xc000|___________________|
1299 * | |
1300 * | TSRc |
1301 * 0xe000|___________________|
1302 * | TSRd |
1303 * 0xf000|___________________|
1304 * | tmABR |
1305 * 0x10000|___________________|
1306 * | |
1307 * | tmTPD |
1308 * |___________________|
1309 * | |
1310 * ....
1311 * 0x1ffff|___________________|
1312 *
1313 *
1314 */
1315
1316 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1317 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1318 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1319 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1320 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1321
1322
1323 /*
1324 * receive connection memory map
1325 *
1326 * 0x0 ___________________
1327 * | |
1328 * | |
1329 * | RSRa |
1330 * | |
1331 * | |
1332 * 0x8000|___________________|
1333 * | |
1334 * | rx0/1 |
1335 * | LBM | link lists of local
1336 * | tx | buffer memory
1337 * | |
1338 * 0xd000|___________________|
1339 * | |
1340 * | rmABR |
1341 * 0xe000|___________________|
1342 * | |
1343 * | RSRb |
1344 * |___________________|
1345 * | |
1346 * ....
1347 * 0xffff|___________________|
1348 */
1349
1350 he_writel(he_dev, 0x08000, RCMLBM_BA);
1351 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1352 he_writel(he_dev, 0x0d800, RCMABR_BA);
1353
1354 /* 5.1.4 initialize local buffer free pools linked lists */
1355
1356 he_init_rx_lbfp0(he_dev);
1357 he_init_rx_lbfp1(he_dev);
1358
1359 he_writel(he_dev, 0x0, RLBC_H);
1360 he_writel(he_dev, 0x0, RLBC_T);
1361 he_writel(he_dev, 0x0, RLBC_H2);
1362
1363 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1364 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1365
1366 he_init_tx_lbfp(he_dev);
1367
1368 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1369
1370 /* 5.1.5 initialize intermediate receive queues */
1371
1372 if (he_is622(he_dev)) {
1373 he_writel(he_dev, 0x000f, G0_INMQ_S);
1374 he_writel(he_dev, 0x200f, G0_INMQ_L);
1375
1376 he_writel(he_dev, 0x001f, G1_INMQ_S);
1377 he_writel(he_dev, 0x201f, G1_INMQ_L);
1378
1379 he_writel(he_dev, 0x002f, G2_INMQ_S);
1380 he_writel(he_dev, 0x202f, G2_INMQ_L);
1381
1382 he_writel(he_dev, 0x003f, G3_INMQ_S);
1383 he_writel(he_dev, 0x203f, G3_INMQ_L);
1384
1385 he_writel(he_dev, 0x004f, G4_INMQ_S);
1386 he_writel(he_dev, 0x204f, G4_INMQ_L);
1387
1388 he_writel(he_dev, 0x005f, G5_INMQ_S);
1389 he_writel(he_dev, 0x205f, G5_INMQ_L);
1390
1391 he_writel(he_dev, 0x006f, G6_INMQ_S);
1392 he_writel(he_dev, 0x206f, G6_INMQ_L);
1393
1394 he_writel(he_dev, 0x007f, G7_INMQ_S);
1395 he_writel(he_dev, 0x207f, G7_INMQ_L);
1396 } else {
1397 he_writel(he_dev, 0x0000, G0_INMQ_S);
1398 he_writel(he_dev, 0x0008, G0_INMQ_L);
1399
1400 he_writel(he_dev, 0x0001, G1_INMQ_S);
1401 he_writel(he_dev, 0x0009, G1_INMQ_L);
1402
1403 he_writel(he_dev, 0x0002, G2_INMQ_S);
1404 he_writel(he_dev, 0x000a, G2_INMQ_L);
1405
1406 he_writel(he_dev, 0x0003, G3_INMQ_S);
1407 he_writel(he_dev, 0x000b, G3_INMQ_L);
1408
1409 he_writel(he_dev, 0x0004, G4_INMQ_S);
1410 he_writel(he_dev, 0x000c, G4_INMQ_L);
1411
1412 he_writel(he_dev, 0x0005, G5_INMQ_S);
1413 he_writel(he_dev, 0x000d, G5_INMQ_L);
1414
1415 he_writel(he_dev, 0x0006, G6_INMQ_S);
1416 he_writel(he_dev, 0x000e, G6_INMQ_L);
1417
1418 he_writel(he_dev, 0x0007, G7_INMQ_S);
1419 he_writel(he_dev, 0x000f, G7_INMQ_L);
1420 }
1421
1422 /* 5.1.6 application tunable parameters */
1423
1424 he_writel(he_dev, 0x0, MCC);
1425 he_writel(he_dev, 0x0, OEC);
1426 he_writel(he_dev, 0x0, DCC);
1427 he_writel(he_dev, 0x0, CEC);
1428
1429 /* 5.1.7 cs block initialization */
1430
1431 he_init_cs_block(he_dev);
1432
1433 /* 5.1.8 cs block connection memory initialization */
1434
1435 if (he_init_cs_block_rcm(he_dev) < 0)
1436 return -ENOMEM;
1437
1438 /* 5.1.10 initialize host structures */
1439
1440 he_init_tpdrq(he_dev);
1441
1442 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1443 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1444 if (he_dev->tpd_pool == NULL) {
1445 hprintk("unable to create tpd pci_pool\n");
1446 return -ENOMEM;
1447 }
1448
1449 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1450
1451 if (he_init_group(he_dev, 0) != 0)
1452 return -ENOMEM;
1453
1454 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1455 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1456 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1457 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1458 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1459 G0_RBPS_BS + (group * 32));
1460
1461 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1462 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1463 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1464 G0_RBPL_QI + (group * 32));
1465 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1466
1467 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1468 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1469 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1470 G0_RBRQ_Q + (group * 16));
1471 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1472
1473 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1474 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1475 he_writel(he_dev, TBRQ_THRESH(0x1),
1476 G0_TBRQ_THRESH + (group * 16));
1477 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1478 }
1479
1480 /* host status page */
1481
1482 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1483 sizeof(struct he_hsp), &he_dev->hsp_phys);
1484 if (he_dev->hsp == NULL) {
1485 hprintk("failed to allocate host status page\n");
1486 return -ENOMEM;
1487 }
1488 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1489 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1490
1491 /* initialize framer */
1492
1493#ifdef CONFIG_ATM_HE_USE_SUNI
1494 if (he_isMM(he_dev))
1495 suni_init(he_dev->atm_dev);
1496 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1497 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1498#endif /* CONFIG_ATM_HE_USE_SUNI */
1499
1500 if (sdh) {
1501 /* this really should be in suni.c but for now... */
1502 int val;
1503
1504 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1505 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1506 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1507 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1508 }
1509
1510 /* 5.1.12 enable transmit and receive */
1511
1512 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1513 reg |= TX_ENABLE|ER_ENABLE;
1514 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1515
1516 reg = he_readl(he_dev, RC_CONFIG);
1517 reg |= RX_ENABLE;
1518 he_writel(he_dev, reg, RC_CONFIG);
1519
1520 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1521 he_dev->cs_stper[i].inuse = 0;
1522 he_dev->cs_stper[i].pcr = -1;
1523 }
1524 he_dev->total_bw = 0;
1525
1526
1527 /* atm linux initialization */
1528
1529 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1530 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1531
1532 he_dev->irq_peak = 0;
1533 he_dev->rbrq_peak = 0;
1534 he_dev->rbpl_peak = 0;
1535 he_dev->tbrq_peak = 0;
1536
1537 HPRINTK("hell bent for leather!\n");
1538
1539 return 0;
1540}
1541
1542static void
1543he_stop(struct he_dev *he_dev)
1544{
1545 struct he_buff *heb, *next;
1546 struct pci_dev *pci_dev;
1547 u32 gen_cntl_0, reg;
1548 u16 command;
1549
1550 pci_dev = he_dev->pci_dev;
1551
1552 /* disable interrupts */
1553
1554 if (he_dev->membase) {
1555 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1556 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1557 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1558
1559 tasklet_disable(&he_dev->tasklet);
1560
1561 /* disable recv and transmit */
1562
1563 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1564 reg &= ~(TX_ENABLE|ER_ENABLE);
1565 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1566
1567 reg = he_readl(he_dev, RC_CONFIG);
1568 reg &= ~(RX_ENABLE);
1569 he_writel(he_dev, reg, RC_CONFIG);
1570 }
1571
1572#ifdef CONFIG_ATM_HE_USE_SUNI
1573 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1574 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1575#endif /* CONFIG_ATM_HE_USE_SUNI */
1576
1577 if (he_dev->irq)
1578 free_irq(he_dev->irq, he_dev);
1579
1580 if (he_dev->irq_base)
1581 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1582 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1583
1584 if (he_dev->hsp)
1585 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1586 he_dev->hsp, he_dev->hsp_phys);
1587
1588 if (he_dev->rbpl_base) {
1589 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1590 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1591
1592 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1593 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1594 }
1595
1596 kfree(he_dev->rbpl_virt);
1597 kfree(he_dev->rbpl_table);
1598
1599 if (he_dev->rbpl_pool)
1600 pci_pool_destroy(he_dev->rbpl_pool);
1601
1602 if (he_dev->rbrq_base)
1603 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1604 he_dev->rbrq_base, he_dev->rbrq_phys);
1605
1606 if (he_dev->tbrq_base)
1607 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1608 he_dev->tbrq_base, he_dev->tbrq_phys);
1609
1610 if (he_dev->tpdrq_base)
1611 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1612 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1613
1614 if (he_dev->tpd_pool)
1615 pci_pool_destroy(he_dev->tpd_pool);
1616
1617 if (he_dev->pci_dev) {
1618 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1619 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1620 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1621 }
1622
1623 if (he_dev->membase)
1624 iounmap(he_dev->membase);
1625}
1626
1627static struct he_tpd *
1628__alloc_tpd(struct he_dev *he_dev)
1629{
1630 struct he_tpd *tpd;
1631 dma_addr_t mapping;
1632
1633 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1634 if (tpd == NULL)
1635 return NULL;
1636
1637 tpd->status = TPD_ADDR(mapping);
1638 tpd->reserved = 0;
1639 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1640 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1641 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1642
1643 return tpd;
1644}
1645
1646#define AAL5_LEN(buf,len) \
1647 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1648 (((unsigned char *)(buf))[(len)-5]))
1649
1650/* 2.10.1.2 receive
1651 *
1652 * aal5 packets can optionally return the tcp checksum in the lower
1653 * 16 bits of the crc (RSR0_TCP_CKSUM)
1654 */
1655
1656#define TCP_CKSUM(buf,len) \
1657 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1658 (((unsigned char *)(buf))[(len-1)]))
1659
1660static int
1661he_service_rbrq(struct he_dev *he_dev, int group)
1662{
1663 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1664 ((unsigned long)he_dev->rbrq_base |
1665 he_dev->hsp->group[group].rbrq_tail);
1666 unsigned cid, lastcid = -1;
1667 struct sk_buff *skb;
1668 struct atm_vcc *vcc = NULL;
1669 struct he_vcc *he_vcc;
1670 struct he_buff *heb, *next;
1671 int i;
1672 int pdus_assembled = 0;
1673 int updated = 0;
1674
1675 read_lock(&vcc_sklist_lock);
1676 while (he_dev->rbrq_head != rbrq_tail) {
1677 ++updated;
1678
1679 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1680 he_dev->rbrq_head, group,
1681 RBRQ_ADDR(he_dev->rbrq_head),
1682 RBRQ_BUFLEN(he_dev->rbrq_head),
1683 RBRQ_CID(he_dev->rbrq_head),
1684 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1685 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1686 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1687 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1688 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1689 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1690
1691 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1692 heb = he_dev->rbpl_virt[i];
1693
1694 cid = RBRQ_CID(he_dev->rbrq_head);
1695 if (cid != lastcid)
1696 vcc = __find_vcc(he_dev, cid);
1697 lastcid = cid;
1698
1699 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1700 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1701 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1702 clear_bit(i, he_dev->rbpl_table);
1703 list_del(&heb->entry);
1704 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1705 }
1706
1707 goto next_rbrq_entry;
1708 }
1709
1710 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1711 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1712 atomic_inc(&vcc->stats->rx_drop);
1713 goto return_host_buffers;
1714 }
1715
1716 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1717 clear_bit(i, he_dev->rbpl_table);
1718 list_move_tail(&heb->entry, &he_vcc->buffers);
1719 he_vcc->pdu_len += heb->len;
1720
1721 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1722 lastcid = -1;
1723 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1724 wake_up(&he_vcc->rx_waitq);
1725 goto return_host_buffers;
1726 }
1727
1728 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1729 goto next_rbrq_entry;
1730
1731 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1732 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1733 HPRINTK("%s%s (%d.%d)\n",
1734 RBRQ_CRC_ERR(he_dev->rbrq_head)
1735 ? "CRC_ERR " : "",
1736 RBRQ_LEN_ERR(he_dev->rbrq_head)
1737 ? "LEN_ERR" : "",
1738 vcc->vpi, vcc->vci);
1739 atomic_inc(&vcc->stats->rx_err);
1740 goto return_host_buffers;
1741 }
1742
1743 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1744 GFP_ATOMIC);
1745 if (!skb) {
1746 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1747 goto return_host_buffers;
1748 }
1749
1750 if (rx_skb_reserve > 0)
1751 skb_reserve(skb, rx_skb_reserve);
1752
1753 __net_timestamp(skb);
1754
1755 list_for_each_entry(heb, &he_vcc->buffers, entry)
1756 memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1757
1758 switch (vcc->qos.aal) {
1759 case ATM_AAL0:
1760 /* 2.10.1.5 raw cell receive */
1761 skb->len = ATM_AAL0_SDU;
1762 skb_set_tail_pointer(skb, skb->len);
1763 break;
1764 case ATM_AAL5:
1765 /* 2.10.1.2 aal5 receive */
1766
1767 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1768 skb_set_tail_pointer(skb, skb->len);
1769#ifdef USE_CHECKSUM_HW
1770 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1771 skb->ip_summed = CHECKSUM_COMPLETE;
1772 skb->csum = TCP_CKSUM(skb->data,
1773 he_vcc->pdu_len);
1774 }
1775#endif
1776 break;
1777 }
1778
1779#ifdef should_never_happen
1780 if (skb->len > vcc->qos.rxtp.max_sdu)
1781 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1782#endif
1783
1784#ifdef notdef
1785 ATM_SKB(skb)->vcc = vcc;
1786#endif
1787 spin_unlock(&he_dev->global_lock);
1788 vcc->push(vcc, skb);
1789 spin_lock(&he_dev->global_lock);
1790
1791 atomic_inc(&vcc->stats->rx);
1792
1793return_host_buffers:
1794 ++pdus_assembled;
1795
1796 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1797 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1798 INIT_LIST_HEAD(&he_vcc->buffers);
1799 he_vcc->pdu_len = 0;
1800
1801next_rbrq_entry:
1802 he_dev->rbrq_head = (struct he_rbrq *)
1803 ((unsigned long) he_dev->rbrq_base |
1804 RBRQ_MASK(he_dev->rbrq_head + 1));
1805
1806 }
1807 read_unlock(&vcc_sklist_lock);
1808
1809 if (updated) {
1810 if (updated > he_dev->rbrq_peak)
1811 he_dev->rbrq_peak = updated;
1812
1813 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1814 G0_RBRQ_H + (group * 16));
1815 }
1816
1817 return pdus_assembled;
1818}
1819
1820static void
1821he_service_tbrq(struct he_dev *he_dev, int group)
1822{
1823 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1824 ((unsigned long)he_dev->tbrq_base |
1825 he_dev->hsp->group[group].tbrq_tail);
1826 struct he_tpd *tpd;
1827 int slot, updated = 0;
1828 struct he_tpd *__tpd;
1829
1830 /* 2.1.6 transmit buffer return queue */
1831
1832 while (he_dev->tbrq_head != tbrq_tail) {
1833 ++updated;
1834
1835 HPRINTK("tbrq%d 0x%x%s%s\n",
1836 group,
1837 TBRQ_TPD(he_dev->tbrq_head),
1838 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1839 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1840 tpd = NULL;
1841 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1842 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1843 tpd = __tpd;
1844 list_del(&__tpd->entry);
1845 break;
1846 }
1847 }
1848
1849 if (tpd == NULL) {
1850 hprintk("unable to locate tpd for dma buffer %x\n",
1851 TBRQ_TPD(he_dev->tbrq_head));
1852 goto next_tbrq_entry;
1853 }
1854
1855 if (TBRQ_EOS(he_dev->tbrq_head)) {
1856 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1857 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1858 if (tpd->vcc)
1859 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1860
1861 goto next_tbrq_entry;
1862 }
1863
1864 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1865 if (tpd->iovec[slot].addr)
1866 pci_unmap_single(he_dev->pci_dev,
1867 tpd->iovec[slot].addr,
1868 tpd->iovec[slot].len & TPD_LEN_MASK,
1869 PCI_DMA_TODEVICE);
1870 if (tpd->iovec[slot].len & TPD_LST)
1871 break;
1872
1873 }
1874
1875 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1876 if (tpd->vcc && tpd->vcc->pop)
1877 tpd->vcc->pop(tpd->vcc, tpd->skb);
1878 else
1879 dev_kfree_skb_any(tpd->skb);
1880 }
1881
1882next_tbrq_entry:
1883 if (tpd)
1884 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1885 he_dev->tbrq_head = (struct he_tbrq *)
1886 ((unsigned long) he_dev->tbrq_base |
1887 TBRQ_MASK(he_dev->tbrq_head + 1));
1888 }
1889
1890 if (updated) {
1891 if (updated > he_dev->tbrq_peak)
1892 he_dev->tbrq_peak = updated;
1893
1894 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1895 G0_TBRQ_H + (group * 16));
1896 }
1897}
1898
1899static void
1900he_service_rbpl(struct he_dev *he_dev, int group)
1901{
1902 struct he_rbp *new_tail;
1903 struct he_rbp *rbpl_head;
1904 struct he_buff *heb;
1905 dma_addr_t mapping;
1906 int i;
1907 int moved = 0;
1908
1909 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1910 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1911
1912 for (;;) {
1913 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1914 RBPL_MASK(he_dev->rbpl_tail+1));
1915
1916 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1917 if (new_tail == rbpl_head)
1918 break;
1919
1920 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1921 if (i > (RBPL_TABLE_SIZE - 1)) {
1922 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1923 if (i > (RBPL_TABLE_SIZE - 1))
1924 break;
1925 }
1926 he_dev->rbpl_hint = i + 1;
1927
1928 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1929 if (!heb)
1930 break;
1931 heb->mapping = mapping;
1932 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1933 he_dev->rbpl_virt[i] = heb;
1934 set_bit(i, he_dev->rbpl_table);
1935 new_tail->idx = i << RBP_IDX_OFFSET;
1936 new_tail->phys = mapping + offsetof(struct he_buff, data);
1937
1938 he_dev->rbpl_tail = new_tail;
1939 ++moved;
1940 }
1941
1942 if (moved)
1943 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1944}
1945
1946static void
1947he_tasklet(unsigned long data)
1948{
1949 unsigned long flags;
1950 struct he_dev *he_dev = (struct he_dev *) data;
1951 int group, type;
1952 int updated = 0;
1953
1954 HPRINTK("tasklet (0x%lx)\n", data);
1955 spin_lock_irqsave(&he_dev->global_lock, flags);
1956
1957 while (he_dev->irq_head != he_dev->irq_tail) {
1958 ++updated;
1959
1960 type = ITYPE_TYPE(he_dev->irq_head->isw);
1961 group = ITYPE_GROUP(he_dev->irq_head->isw);
1962
1963 switch (type) {
1964 case ITYPE_RBRQ_THRESH:
1965 HPRINTK("rbrq%d threshold\n", group);
1966 /* fall through */
1967 case ITYPE_RBRQ_TIMER:
1968 if (he_service_rbrq(he_dev, group))
1969 he_service_rbpl(he_dev, group);
1970 break;
1971 case ITYPE_TBRQ_THRESH:
1972 HPRINTK("tbrq%d threshold\n", group);
1973 /* fall through */
1974 case ITYPE_TPD_COMPLETE:
1975 he_service_tbrq(he_dev, group);
1976 break;
1977 case ITYPE_RBPL_THRESH:
1978 he_service_rbpl(he_dev, group);
1979 break;
1980 case ITYPE_RBPS_THRESH:
1981 /* shouldn't happen unless small buffers enabled */
1982 break;
1983 case ITYPE_PHY:
1984 HPRINTK("phy interrupt\n");
1985#ifdef CONFIG_ATM_HE_USE_SUNI
1986 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1987 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1988 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1989 spin_lock_irqsave(&he_dev->global_lock, flags);
1990#endif
1991 break;
1992 case ITYPE_OTHER:
1993 switch (type|group) {
1994 case ITYPE_PARITY:
1995 hprintk("parity error\n");
1996 break;
1997 case ITYPE_ABORT:
1998 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1999 break;
2000 }
2001 break;
2002 case ITYPE_TYPE(ITYPE_INVALID):
2003 /* see 8.1.1 -- check all queues */
2004
2005 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2006
2007 he_service_rbrq(he_dev, 0);
2008 he_service_rbpl(he_dev, 0);
2009 he_service_tbrq(he_dev, 0);
2010 break;
2011 default:
2012 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2013 }
2014
2015 he_dev->irq_head->isw = ITYPE_INVALID;
2016
2017 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2018 }
2019
2020 if (updated) {
2021 if (updated > he_dev->irq_peak)
2022 he_dev->irq_peak = updated;
2023
2024 he_writel(he_dev,
2025 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2026 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2027 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2028 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2029 }
2030 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2031}
2032
2033static irqreturn_t
2034he_irq_handler(int irq, void *dev_id)
2035{
2036 unsigned long flags;
2037 struct he_dev *he_dev = (struct he_dev * )dev_id;
2038 int handled = 0;
2039
2040 if (he_dev == NULL)
2041 return IRQ_NONE;
2042
2043 spin_lock_irqsave(&he_dev->global_lock, flags);
2044
2045 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2046 (*he_dev->irq_tailoffset << 2));
2047
2048 if (he_dev->irq_tail == he_dev->irq_head) {
2049 HPRINTK("tailoffset not updated?\n");
2050 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2051 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2052 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2053 }
2054
2055#ifdef DEBUG
2056 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2057 hprintk("spurious (or shared) interrupt?\n");
2058#endif
2059
2060 if (he_dev->irq_head != he_dev->irq_tail) {
2061 handled = 1;
2062 tasklet_schedule(&he_dev->tasklet);
2063 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2064 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2065 }
2066 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2067 return IRQ_RETVAL(handled);
2068
2069}
2070
2071static __inline__ void
2072__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2073{
2074 struct he_tpdrq *new_tail;
2075
2076 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2077 tpd, cid, he_dev->tpdrq_tail);
2078
2079 /* new_tail = he_dev->tpdrq_tail; */
2080 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2081 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2082
2083 /*
2084 * check to see if we are about to set the tail == head
2085 * if true, update the head pointer from the adapter
2086 * to see if this is really the case (reading the queue
2087 * head for every enqueue would be unnecessarily slow)
2088 */
2089
2090 if (new_tail == he_dev->tpdrq_head) {
2091 he_dev->tpdrq_head = (struct he_tpdrq *)
2092 (((unsigned long)he_dev->tpdrq_base) |
2093 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2094
2095 if (new_tail == he_dev->tpdrq_head) {
2096 int slot;
2097
2098 hprintk("tpdrq full (cid 0x%x)\n", cid);
2099 /*
2100 * FIXME
2101 * push tpd onto a transmit backlog queue
2102 * after service_tbrq, service the backlog
2103 * for now, we just drop the pdu
2104 */
2105 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2106 if (tpd->iovec[slot].addr)
2107 pci_unmap_single(he_dev->pci_dev,
2108 tpd->iovec[slot].addr,
2109 tpd->iovec[slot].len & TPD_LEN_MASK,
2110 PCI_DMA_TODEVICE);
2111 }
2112 if (tpd->skb) {
2113 if (tpd->vcc->pop)
2114 tpd->vcc->pop(tpd->vcc, tpd->skb);
2115 else
2116 dev_kfree_skb_any(tpd->skb);
2117 atomic_inc(&tpd->vcc->stats->tx_err);
2118 }
2119 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2120 return;
2121 }
2122 }
2123
2124 /* 2.1.5 transmit packet descriptor ready queue */
2125 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2126 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2127 he_dev->tpdrq_tail->cid = cid;
2128 wmb();
2129
2130 he_dev->tpdrq_tail = new_tail;
2131
2132 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2133 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2134}
2135
2136static int
2137he_open(struct atm_vcc *vcc)
2138{
2139 unsigned long flags;
2140 struct he_dev *he_dev = HE_DEV(vcc->dev);
2141 struct he_vcc *he_vcc;
2142 int err = 0;
2143 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2144 short vpi = vcc->vpi;
2145 int vci = vcc->vci;
2146
2147 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2148 return 0;
2149
2150 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2151
2152 set_bit(ATM_VF_ADDR, &vcc->flags);
2153
2154 cid = he_mkcid(he_dev, vpi, vci);
2155
2156 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2157 if (he_vcc == NULL) {
2158 hprintk("unable to allocate he_vcc during open\n");
2159 return -ENOMEM;
2160 }
2161
2162 INIT_LIST_HEAD(&he_vcc->buffers);
2163 he_vcc->pdu_len = 0;
2164 he_vcc->rc_index = -1;
2165
2166 init_waitqueue_head(&he_vcc->rx_waitq);
2167 init_waitqueue_head(&he_vcc->tx_waitq);
2168
2169 vcc->dev_data = he_vcc;
2170
2171 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2172 int pcr_goal;
2173
2174 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2175 if (pcr_goal == 0)
2176 pcr_goal = he_dev->atm_dev->link_rate;
2177 if (pcr_goal < 0) /* means round down, technically */
2178 pcr_goal = -pcr_goal;
2179
2180 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2181
2182 switch (vcc->qos.aal) {
2183 case ATM_AAL5:
2184 tsr0_aal = TSR0_AAL5;
2185 tsr4 = TSR4_AAL5;
2186 break;
2187 case ATM_AAL0:
2188 tsr0_aal = TSR0_AAL0_SDU;
2189 tsr4 = TSR4_AAL0_SDU;
2190 break;
2191 default:
2192 err = -EINVAL;
2193 goto open_failed;
2194 }
2195
2196 spin_lock_irqsave(&he_dev->global_lock, flags);
2197 tsr0 = he_readl_tsr0(he_dev, cid);
2198 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2199
2200 if (TSR0_CONN_STATE(tsr0) != 0) {
2201 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2202 err = -EBUSY;
2203 goto open_failed;
2204 }
2205
2206 switch (vcc->qos.txtp.traffic_class) {
2207 case ATM_UBR:
2208 /* 2.3.3.1 open connection ubr */
2209
2210 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2211 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2212 break;
2213
2214 case ATM_CBR:
2215 /* 2.3.3.2 open connection cbr */
2216
2217 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2218 if ((he_dev->total_bw + pcr_goal)
2219 > (he_dev->atm_dev->link_rate * 9 / 10))
2220 {
2221 err = -EBUSY;
2222 goto open_failed;
2223 }
2224
2225 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2226
2227 /* find an unused cs_stper register */
2228 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2229 if (he_dev->cs_stper[reg].inuse == 0 ||
2230 he_dev->cs_stper[reg].pcr == pcr_goal)
2231 break;
2232
2233 if (reg == HE_NUM_CS_STPER) {
2234 err = -EBUSY;
2235 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2236 goto open_failed;
2237 }
2238
2239 he_dev->total_bw += pcr_goal;
2240
2241 he_vcc->rc_index = reg;
2242 ++he_dev->cs_stper[reg].inuse;
2243 he_dev->cs_stper[reg].pcr = pcr_goal;
2244
2245 clock = he_is622(he_dev) ? 66667000 : 50000000;
2246 period = clock / pcr_goal;
2247
2248 HPRINTK("rc_index = %d period = %d\n",
2249 reg, period);
2250
2251 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2252 CS_STPER0 + reg);
2253 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2254
2255 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2256 TSR0_RC_INDEX(reg);
2257
2258 break;
2259 default:
2260 err = -EINVAL;
2261 goto open_failed;
2262 }
2263
2264 spin_lock_irqsave(&he_dev->global_lock, flags);
2265
2266 he_writel_tsr0(he_dev, tsr0, cid);
2267 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2268 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2269 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2270 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2271 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2272
2273 he_writel_tsr3(he_dev, 0x0, cid);
2274 he_writel_tsr5(he_dev, 0x0, cid);
2275 he_writel_tsr6(he_dev, 0x0, cid);
2276 he_writel_tsr7(he_dev, 0x0, cid);
2277 he_writel_tsr8(he_dev, 0x0, cid);
2278 he_writel_tsr10(he_dev, 0x0, cid);
2279 he_writel_tsr11(he_dev, 0x0, cid);
2280 he_writel_tsr12(he_dev, 0x0, cid);
2281 he_writel_tsr13(he_dev, 0x0, cid);
2282 he_writel_tsr14(he_dev, 0x0, cid);
2283 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2284 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2285 }
2286
2287 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2288 unsigned aal;
2289
2290 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2291 &HE_VCC(vcc)->rx_waitq);
2292
2293 switch (vcc->qos.aal) {
2294 case ATM_AAL5:
2295 aal = RSR0_AAL5;
2296 break;
2297 case ATM_AAL0:
2298 aal = RSR0_RAWCELL;
2299 break;
2300 default:
2301 err = -EINVAL;
2302 goto open_failed;
2303 }
2304
2305 spin_lock_irqsave(&he_dev->global_lock, flags);
2306
2307 rsr0 = he_readl_rsr0(he_dev, cid);
2308 if (rsr0 & RSR0_OPEN_CONN) {
2309 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2310
2311 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2312 err = -EBUSY;
2313 goto open_failed;
2314 }
2315
2316 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2317 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2318 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2319 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2320
2321#ifdef USE_CHECKSUM_HW
2322 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2323 rsr0 |= RSR0_TCP_CKSUM;
2324#endif
2325
2326 he_writel_rsr4(he_dev, rsr4, cid);
2327 he_writel_rsr1(he_dev, rsr1, cid);
2328 /* 5.1.11 last parameter initialized should be
2329 the open/closed indication in rsr0 */
2330 he_writel_rsr0(he_dev,
2331 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2332 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2333
2334 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2335 }
2336
2337open_failed:
2338
2339 if (err) {
2340 kfree(he_vcc);
2341 clear_bit(ATM_VF_ADDR, &vcc->flags);
2342 }
2343 else
2344 set_bit(ATM_VF_READY, &vcc->flags);
2345
2346 return err;
2347}
2348
2349static void
2350he_close(struct atm_vcc *vcc)
2351{
2352 unsigned long flags;
2353 DECLARE_WAITQUEUE(wait, current);
2354 struct he_dev *he_dev = HE_DEV(vcc->dev);
2355 struct he_tpd *tpd;
2356 unsigned cid;
2357 struct he_vcc *he_vcc = HE_VCC(vcc);
2358#define MAX_RETRY 30
2359 int retry = 0, sleep = 1, tx_inuse;
2360
2361 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2362
2363 clear_bit(ATM_VF_READY, &vcc->flags);
2364 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2365
2366 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2367 int timeout;
2368
2369 HPRINTK("close rx cid 0x%x\n", cid);
2370
2371 /* 2.7.2.2 close receive operation */
2372
2373 /* wait for previous close (if any) to finish */
2374
2375 spin_lock_irqsave(&he_dev->global_lock, flags);
2376 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2377 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2378 udelay(250);
2379 }
2380
2381 set_current_state(TASK_UNINTERRUPTIBLE);
2382 add_wait_queue(&he_vcc->rx_waitq, &wait);
2383
2384 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2385 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2386 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2387 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2388
2389 timeout = schedule_timeout(30*HZ);
2390
2391 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2392 set_current_state(TASK_RUNNING);
2393
2394 if (timeout == 0)
2395 hprintk("close rx timeout cid 0x%x\n", cid);
2396
2397 HPRINTK("close rx cid 0x%x complete\n", cid);
2398
2399 }
2400
2401 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2402 volatile unsigned tsr4, tsr0;
2403 int timeout;
2404
2405 HPRINTK("close tx cid 0x%x\n", cid);
2406
2407 /* 2.1.2
2408 *
2409 * ... the host must first stop queueing packets to the TPDRQ
2410 * on the connection to be closed, then wait for all outstanding
2411 * packets to be transmitted and their buffers returned to the
2412 * TBRQ. When the last packet on the connection arrives in the
2413 * TBRQ, the host issues the close command to the adapter.
2414 */
2415
2416 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2417 (retry < MAX_RETRY)) {
2418 msleep(sleep);
2419 if (sleep < 250)
2420 sleep = sleep * 2;
2421
2422 ++retry;
2423 }
2424
2425 if (tx_inuse > 1)
2426 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2427
2428 /* 2.3.1.1 generic close operations with flush */
2429
2430 spin_lock_irqsave(&he_dev->global_lock, flags);
2431 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2432 /* also clears TSR4_SESSION_ENDED */
2433
2434 switch (vcc->qos.txtp.traffic_class) {
2435 case ATM_UBR:
2436 he_writel_tsr1(he_dev,
2437 TSR1_MCR(rate_to_atmf(200000))
2438 | TSR1_PCR(0), cid);
2439 break;
2440 case ATM_CBR:
2441 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2442 break;
2443 }
2444 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2445
2446 tpd = __alloc_tpd(he_dev);
2447 if (tpd == NULL) {
2448 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2449 goto close_tx_incomplete;
2450 }
2451 tpd->status |= TPD_EOS | TPD_INT;
2452 tpd->skb = NULL;
2453 tpd->vcc = vcc;
2454 wmb();
2455
2456 set_current_state(TASK_UNINTERRUPTIBLE);
2457 add_wait_queue(&he_vcc->tx_waitq, &wait);
2458 __enqueue_tpd(he_dev, tpd, cid);
2459 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2460
2461 timeout = schedule_timeout(30*HZ);
2462
2463 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2464 set_current_state(TASK_RUNNING);
2465
2466 spin_lock_irqsave(&he_dev->global_lock, flags);
2467
2468 if (timeout == 0) {
2469 hprintk("close tx timeout cid 0x%x\n", cid);
2470 goto close_tx_incomplete;
2471 }
2472
2473 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2474 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2475 udelay(250);
2476 }
2477
2478 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2479 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2480 udelay(250);
2481 }
2482
2483close_tx_incomplete:
2484
2485 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2486 int reg = he_vcc->rc_index;
2487
2488 HPRINTK("cs_stper reg = %d\n", reg);
2489
2490 if (he_dev->cs_stper[reg].inuse == 0)
2491 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2492 else
2493 --he_dev->cs_stper[reg].inuse;
2494
2495 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2496 }
2497 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2498
2499 HPRINTK("close tx cid 0x%x complete\n", cid);
2500 }
2501
2502 kfree(he_vcc);
2503
2504 clear_bit(ATM_VF_ADDR, &vcc->flags);
2505}
2506
2507static int
2508he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2509{
2510 unsigned long flags;
2511 struct he_dev *he_dev = HE_DEV(vcc->dev);
2512 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2513 struct he_tpd *tpd;
2514#ifdef USE_SCATTERGATHER
2515 int i, slot = 0;
2516#endif
2517
2518#define HE_TPD_BUFSIZE 0xffff
2519
2520 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2521
2522 if ((skb->len > HE_TPD_BUFSIZE) ||
2523 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2524 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2525 if (vcc->pop)
2526 vcc->pop(vcc, skb);
2527 else
2528 dev_kfree_skb_any(skb);
2529 atomic_inc(&vcc->stats->tx_err);
2530 return -EINVAL;
2531 }
2532
2533#ifndef USE_SCATTERGATHER
2534 if (skb_shinfo(skb)->nr_frags) {
2535 hprintk("no scatter/gather support\n");
2536 if (vcc->pop)
2537 vcc->pop(vcc, skb);
2538 else
2539 dev_kfree_skb_any(skb);
2540 atomic_inc(&vcc->stats->tx_err);
2541 return -EINVAL;
2542 }
2543#endif
2544 spin_lock_irqsave(&he_dev->global_lock, flags);
2545
2546 tpd = __alloc_tpd(he_dev);
2547 if (tpd == NULL) {
2548 if (vcc->pop)
2549 vcc->pop(vcc, skb);
2550 else
2551 dev_kfree_skb_any(skb);
2552 atomic_inc(&vcc->stats->tx_err);
2553 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2554 return -ENOMEM;
2555 }
2556
2557 if (vcc->qos.aal == ATM_AAL5)
2558 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2559 else {
2560 char *pti_clp = (void *) (skb->data + 3);
2561 int clp, pti;
2562
2563 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2564 clp = (*pti_clp & ATM_HDR_CLP);
2565 tpd->status |= TPD_CELLTYPE(pti);
2566 if (clp)
2567 tpd->status |= TPD_CLP;
2568
2569 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2570 }
2571
2572#ifdef USE_SCATTERGATHER
2573 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2574 skb_headlen(skb), PCI_DMA_TODEVICE);
2575 tpd->iovec[slot].len = skb_headlen(skb);
2576 ++slot;
2577
2578 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2579 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2580
2581 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2582 tpd->vcc = vcc;
2583 tpd->skb = NULL; /* not the last fragment
2584 so dont ->push() yet */
2585 wmb();
2586
2587 __enqueue_tpd(he_dev, tpd, cid);
2588 tpd = __alloc_tpd(he_dev);
2589 if (tpd == NULL) {
2590 if (vcc->pop)
2591 vcc->pop(vcc, skb);
2592 else
2593 dev_kfree_skb_any(skb);
2594 atomic_inc(&vcc->stats->tx_err);
2595 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2596 return -ENOMEM;
2597 }
2598 tpd->status |= TPD_USERCELL;
2599 slot = 0;
2600 }
2601
2602 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2603 (void *) page_address(frag->page) + frag->page_offset,
2604 frag->size, PCI_DMA_TODEVICE);
2605 tpd->iovec[slot].len = frag->size;
2606 ++slot;
2607
2608 }
2609
2610 tpd->iovec[slot - 1].len |= TPD_LST;
2611#else
2612 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2613 tpd->length0 = skb->len | TPD_LST;
2614#endif
2615 tpd->status |= TPD_INT;
2616
2617 tpd->vcc = vcc;
2618 tpd->skb = skb;
2619 wmb();
2620 ATM_SKB(skb)->vcc = vcc;
2621
2622 __enqueue_tpd(he_dev, tpd, cid);
2623 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2624
2625 atomic_inc(&vcc->stats->tx);
2626
2627 return 0;
2628}
2629
2630static int
2631he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2632{
2633 unsigned long flags;
2634 struct he_dev *he_dev = HE_DEV(atm_dev);
2635 struct he_ioctl_reg reg;
2636 int err = 0;
2637
2638 switch (cmd) {
2639 case HE_GET_REG:
2640 if (!capable(CAP_NET_ADMIN))
2641 return -EPERM;
2642
2643 if (copy_from_user(®, arg,
2644 sizeof(struct he_ioctl_reg)))
2645 return -EFAULT;
2646
2647 spin_lock_irqsave(&he_dev->global_lock, flags);
2648 switch (reg.type) {
2649 case HE_REGTYPE_PCI:
2650 if (reg.addr >= HE_REGMAP_SIZE) {
2651 err = -EINVAL;
2652 break;
2653 }
2654
2655 reg.val = he_readl(he_dev, reg.addr);
2656 break;
2657 case HE_REGTYPE_RCM:
2658 reg.val =
2659 he_readl_rcm(he_dev, reg.addr);
2660 break;
2661 case HE_REGTYPE_TCM:
2662 reg.val =
2663 he_readl_tcm(he_dev, reg.addr);
2664 break;
2665 case HE_REGTYPE_MBOX:
2666 reg.val =
2667 he_readl_mbox(he_dev, reg.addr);
2668 break;
2669 default:
2670 err = -EINVAL;
2671 break;
2672 }
2673 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2674 if (err == 0)
2675 if (copy_to_user(arg, ®,
2676 sizeof(struct he_ioctl_reg)))
2677 return -EFAULT;
2678 break;
2679 default:
2680#ifdef CONFIG_ATM_HE_USE_SUNI
2681 if (atm_dev->phy && atm_dev->phy->ioctl)
2682 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2683#else /* CONFIG_ATM_HE_USE_SUNI */
2684 err = -EINVAL;
2685#endif /* CONFIG_ATM_HE_USE_SUNI */
2686 break;
2687 }
2688
2689 return err;
2690}
2691
2692static void
2693he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2694{
2695 unsigned long flags;
2696 struct he_dev *he_dev = HE_DEV(atm_dev);
2697
2698 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2699
2700 spin_lock_irqsave(&he_dev->global_lock, flags);
2701 he_writel(he_dev, val, FRAMER + (addr*4));
2702 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2703 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2704}
2705
2706
2707static unsigned char
2708he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2709{
2710 unsigned long flags;
2711 struct he_dev *he_dev = HE_DEV(atm_dev);
2712 unsigned reg;
2713
2714 spin_lock_irqsave(&he_dev->global_lock, flags);
2715 reg = he_readl(he_dev, FRAMER + (addr*4));
2716 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2717
2718 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2719 return reg;
2720}
2721
2722static int
2723he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2724{
2725 unsigned long flags;
2726 struct he_dev *he_dev = HE_DEV(dev);
2727 int left, i;
2728#ifdef notdef
2729 struct he_rbrq *rbrq_tail;
2730 struct he_tpdrq *tpdrq_head;
2731 int rbpl_head, rbpl_tail;
2732#endif
2733 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2734
2735
2736 left = *pos;
2737 if (!left--)
2738 return sprintf(page, "ATM he driver\n");
2739
2740 if (!left--)
2741 return sprintf(page, "%s%s\n\n",
2742 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2743
2744 if (!left--)
2745 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2746
2747 spin_lock_irqsave(&he_dev->global_lock, flags);
2748 mcc += he_readl(he_dev, MCC);
2749 oec += he_readl(he_dev, OEC);
2750 dcc += he_readl(he_dev, DCC);
2751 cec += he_readl(he_dev, CEC);
2752 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2753
2754 if (!left--)
2755 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2756 mcc, oec, dcc, cec);
2757
2758 if (!left--)
2759 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2760 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2761
2762 if (!left--)
2763 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2764 CONFIG_TPDRQ_SIZE);
2765
2766 if (!left--)
2767 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2768 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2769
2770 if (!left--)
2771 return sprintf(page, "tbrq_size = %d peak = %d\n",
2772 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2773
2774
2775#ifdef notdef
2776 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2777 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2778
2779 inuse = rbpl_head - rbpl_tail;
2780 if (inuse < 0)
2781 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2782 inuse /= sizeof(struct he_rbp);
2783
2784 if (!left--)
2785 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2786 CONFIG_RBPL_SIZE, inuse);
2787#endif
2788
2789 if (!left--)
2790 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2791
2792 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2793 if (!left--)
2794 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2795 he_dev->cs_stper[i].pcr,
2796 he_dev->cs_stper[i].inuse);
2797
2798 if (!left--)
2799 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2800 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2801
2802 return 0;
2803}
2804
2805/* eeprom routines -- see 4.7 */
2806
2807static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2808{
2809 u32 val = 0, tmp_read = 0;
2810 int i, j = 0;
2811 u8 byte_read = 0;
2812
2813 val = readl(he_dev->membase + HOST_CNTL);
2814 val &= 0xFFFFE0FF;
2815
2816 /* Turn on write enable */
2817 val |= 0x800;
2818 he_writel(he_dev, val, HOST_CNTL);
2819
2820 /* Send READ instruction */
2821 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2822 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2823 udelay(EEPROM_DELAY);
2824 }
2825
2826 /* Next, we need to send the byte address to read from */
2827 for (i = 7; i >= 0; i--) {
2828 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2829 udelay(EEPROM_DELAY);
2830 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2831 udelay(EEPROM_DELAY);
2832 }
2833
2834 j = 0;
2835
2836 val &= 0xFFFFF7FF; /* Turn off write enable */
2837 he_writel(he_dev, val, HOST_CNTL);
2838
2839 /* Now, we can read data from the EEPROM by clocking it in */
2840 for (i = 7; i >= 0; i--) {
2841 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2842 udelay(EEPROM_DELAY);
2843 tmp_read = he_readl(he_dev, HOST_CNTL);
2844 byte_read |= (unsigned char)
2845 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2846 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2847 udelay(EEPROM_DELAY);
2848 }
2849
2850 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2851 udelay(EEPROM_DELAY);
2852
2853 return byte_read;
2854}
2855
2856MODULE_LICENSE("GPL");
2857MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2858MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2859module_param(disable64, bool, 0);
2860MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2861module_param(nvpibits, short, 0);
2862MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2863module_param(nvcibits, short, 0);
2864MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2865module_param(rx_skb_reserve, short, 0);
2866MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2867module_param(irq_coalesce, bool, 0);
2868MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2869module_param(sdh, bool, 0);
2870MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2871
2872static struct pci_device_id he_pci_tbl[] = {
2873 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2874 { 0, }
2875};
2876
2877MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2878
2879static struct pci_driver he_driver = {
2880 .name = "he",
2881 .probe = he_init_one,
2882 .remove = __devexit_p(he_remove_one),
2883 .id_table = he_pci_tbl,
2884};
2885
2886static int __init he_init(void)
2887{
2888 return pci_register_driver(&he_driver);
2889}
2890
2891static void __exit he_cleanup(void)
2892{
2893 pci_unregister_driver(&he_driver);
2894}
2895
2896module_init(he_init);
2897module_exit(he_cleanup);
1/*
2
3 he.c
4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
7
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
12
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22*/
23
24/*
25
26 he.c
27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
30
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
40
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48 NOTES:
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
56#include <linux/module.h>
57#include <linux/kernel.h>
58#include <linux/skbuff.h>
59#include <linux/pci.h>
60#include <linux/errno.h>
61#include <linux/types.h>
62#include <linux/string.h>
63#include <linux/delay.h>
64#include <linux/init.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/dma-mapping.h>
70#include <linux/bitmap.h>
71#include <linux/slab.h>
72#include <asm/io.h>
73#include <asm/byteorder.h>
74#include <linux/uaccess.h>
75
76#include <linux/atmdev.h>
77#include <linux/atm.h>
78#include <linux/sonet.h>
79
80#undef USE_SCATTERGATHER
81#undef USE_CHECKSUM_HW /* still confused about this */
82/* #undef HE_DEBUG */
83
84#include "he.h"
85#include "suni.h"
86#include <linux/atm_he.h>
87
88#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90#ifdef HE_DEBUG
91#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92#else /* !HE_DEBUG */
93#define HPRINTK(fmt,args...) do { } while (0)
94#endif /* HE_DEBUG */
95
96/* declarations */
97
98static int he_open(struct atm_vcc *vcc);
99static void he_close(struct atm_vcc *vcc);
100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102static irqreturn_t he_irq_handler(int irq, void *dev_id);
103static void he_tasklet(unsigned long data);
104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105static int he_start(struct atm_dev *dev);
106static void he_stop(struct he_dev *dev);
107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112/* globals */
113
114static struct he_dev *he_devs;
115static bool disable64;
116static short nvpibits = -1;
117static short nvcibits = -1;
118static short rx_skb_reserve = 16;
119static bool irq_coalesce = true;
120static bool sdh;
121
122/* Read from EEPROM = 0000 0011b */
123static unsigned int readtab[] = {
124 CS_HIGH | CLK_HIGH,
125 CS_LOW | CLK_LOW,
126 CLK_HIGH, /* 0 */
127 CLK_LOW,
128 CLK_HIGH, /* 0 */
129 CLK_LOW,
130 CLK_HIGH, /* 0 */
131 CLK_LOW,
132 CLK_HIGH, /* 0 */
133 CLK_LOW,
134 CLK_HIGH, /* 0 */
135 CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW | SI_HIGH,
138 CLK_HIGH | SI_HIGH, /* 1 */
139 CLK_LOW | SI_HIGH,
140 CLK_HIGH | SI_HIGH /* 1 */
141};
142
143/* Clock to read from/write to the EEPROM */
144static unsigned int clocktab[] = {
145 CLK_LOW,
146 CLK_HIGH,
147 CLK_LOW,
148 CLK_HIGH,
149 CLK_LOW,
150 CLK_HIGH,
151 CLK_LOW,
152 CLK_HIGH,
153 CLK_LOW,
154 CLK_HIGH,
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW
162};
163
164static struct atmdev_ops he_ops =
165{
166 .open = he_open,
167 .close = he_close,
168 .ioctl = he_ioctl,
169 .send = he_send,
170 .phy_put = he_phy_put,
171 .phy_get = he_phy_get,
172 .proc_read = he_proc_read,
173 .owner = THIS_MODULE
174};
175
176#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177#define he_readl(dev, reg) readl((dev)->membase + (reg))
178
179/* section 2.12 connection memory access */
180
181static __inline__ void
182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 unsigned flags)
184{
185 he_writel(he_dev, val, CON_DAT);
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189}
190
191#define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194#define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197#define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200static unsigned
201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202{
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 return he_readl(he_dev, CON_DAT);
206}
207
208#define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
210
211#define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
213
214#define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218/* figure 2.2 connection id */
219
220#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222/* 2.5.1 per connection transmit state registers */
223
224#define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226#define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229#define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232#define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235#define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238#define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241 /* from page 2-20
242 *
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
247 */
248
249#define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 CON_CTL_TCM \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
255
256#define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259#define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262#define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265#define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269#define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272#define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275#define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278#define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282#define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285#define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289#define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292#define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 CON_CTL_TCM \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
298
299/* 2.7.1 per connection receive state registers */
300
301#define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303#define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306#define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309#define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312#define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315#define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318#define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321#define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324#define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327static __inline__ struct atm_vcc*
328__find_vcc(struct he_dev *he_dev, unsigned cid)
329{
330 struct hlist_head *head;
331 struct atm_vcc *vcc;
332 struct sock *s;
333 short vpi;
334 int vci;
335
336 vpi = cid >> he_dev->vcibits;
337 vci = cid & ((1 << he_dev->vcibits) - 1);
338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339
340 sk_for_each(s, head) {
341 vcc = atm_sk(s);
342 if (vcc->dev == he_dev->atm_dev &&
343 vcc->vci == vci && vcc->vpi == vpi &&
344 vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 return vcc;
346 }
347 }
348 return NULL;
349}
350
351static int he_init_one(struct pci_dev *pci_dev,
352 const struct pci_device_id *pci_ent)
353{
354 struct atm_dev *atm_dev = NULL;
355 struct he_dev *he_dev = NULL;
356 int err = 0;
357
358 printk(KERN_INFO "ATM he driver\n");
359
360 if (pci_enable_device(pci_dev))
361 return -EIO;
362 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363 printk(KERN_WARNING "he: no suitable dma available\n");
364 err = -EIO;
365 goto init_one_failure;
366 }
367
368 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 if (!atm_dev) {
370 err = -ENODEV;
371 goto init_one_failure;
372 }
373 pci_set_drvdata(pci_dev, atm_dev);
374
375 he_dev = kzalloc(sizeof(struct he_dev),
376 GFP_KERNEL);
377 if (!he_dev) {
378 err = -ENOMEM;
379 goto init_one_failure;
380 }
381 he_dev->pci_dev = pci_dev;
382 he_dev->atm_dev = atm_dev;
383 he_dev->atm_dev->dev_data = he_dev;
384 atm_dev->dev_data = he_dev;
385 he_dev->number = atm_dev->number;
386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 spin_lock_init(&he_dev->global_lock);
388
389 if (he_start(atm_dev)) {
390 he_stop(he_dev);
391 err = -ENODEV;
392 goto init_one_failure;
393 }
394 he_dev->next = NULL;
395 if (he_devs)
396 he_dev->next = he_devs;
397 he_devs = he_dev;
398 return 0;
399
400init_one_failure:
401 if (atm_dev)
402 atm_dev_deregister(atm_dev);
403 kfree(he_dev);
404 pci_disable_device(pci_dev);
405 return err;
406}
407
408static void he_remove_one(struct pci_dev *pci_dev)
409{
410 struct atm_dev *atm_dev;
411 struct he_dev *he_dev;
412
413 atm_dev = pci_get_drvdata(pci_dev);
414 he_dev = HE_DEV(atm_dev);
415
416 /* need to remove from he_devs */
417
418 he_stop(he_dev);
419 atm_dev_deregister(atm_dev);
420 kfree(he_dev);
421
422 pci_disable_device(pci_dev);
423}
424
425
426static unsigned
427rate_to_atmf(unsigned rate) /* cps to atm forum format */
428{
429#define NONZERO (1 << 14)
430
431 unsigned exp = 0;
432
433 if (rate == 0)
434 return 0;
435
436 rate <<= 9;
437 while (rate > 0x3ff) {
438 ++exp;
439 rate >>= 1;
440 }
441
442 return (NONZERO | (exp << 9) | (rate & 0x1ff));
443}
444
445static void he_init_rx_lbfp0(struct he_dev *he_dev)
446{
447 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
448 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
449 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
450 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
451
452 lbufd_index = 0;
453 lbm_offset = he_readl(he_dev, RCMLBM_BA);
454
455 he_writel(he_dev, lbufd_index, RLBF0_H);
456
457 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
458 lbufd_index += 2;
459 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
460
461 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
462 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
463
464 if (++lbuf_count == lbufs_per_row) {
465 lbuf_count = 0;
466 row_offset += he_dev->bytes_per_row;
467 }
468 lbm_offset += 4;
469 }
470
471 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
472 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
473}
474
475static void he_init_rx_lbfp1(struct he_dev *he_dev)
476{
477 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
478 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
479 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
480 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
481
482 lbufd_index = 1;
483 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
484
485 he_writel(he_dev, lbufd_index, RLBF1_H);
486
487 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
488 lbufd_index += 2;
489 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
490
491 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
492 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
493
494 if (++lbuf_count == lbufs_per_row) {
495 lbuf_count = 0;
496 row_offset += he_dev->bytes_per_row;
497 }
498 lbm_offset += 4;
499 }
500
501 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
502 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
503}
504
505static void he_init_tx_lbfp(struct he_dev *he_dev)
506{
507 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511
512 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514
515 he_writel(he_dev, lbufd_index, TLBF_H);
516
517 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518 lbufd_index += 1;
519 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520
521 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523
524 if (++lbuf_count == lbufs_per_row) {
525 lbuf_count = 0;
526 row_offset += he_dev->bytes_per_row;
527 }
528 lbm_offset += 2;
529 }
530
531 he_writel(he_dev, lbufd_index - 1, TLBF_T);
532}
533
534static int he_init_tpdrq(struct he_dev *he_dev)
535{
536 he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 &he_dev->tpdrq_phys, GFP_KERNEL);
539 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n");
541 return -ENOMEM;
542 }
543
544 he_dev->tpdrq_tail = he_dev->tpdrq_base;
545 he_dev->tpdrq_head = he_dev->tpdrq_base;
546
547 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
548 he_writel(he_dev, 0, TPDRQ_T);
549 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
550
551 return 0;
552}
553
554static void he_init_cs_block(struct he_dev *he_dev)
555{
556 unsigned clock, rate, delta;
557 int reg;
558
559 /* 5.1.7 cs block initialization */
560
561 for (reg = 0; reg < 0x20; ++reg)
562 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
563
564 /* rate grid timer reload values */
565
566 clock = he_is622(he_dev) ? 66667000 : 50000000;
567 rate = he_dev->atm_dev->link_rate;
568 delta = rate / 16 / 2;
569
570 for (reg = 0; reg < 0x10; ++reg) {
571 /* 2.4 internal transmit function
572 *
573 * we initialize the first row in the rate grid.
574 * values are period (in clock cycles) of timer
575 */
576 unsigned period = clock / rate;
577
578 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
579 rate -= delta;
580 }
581
582 if (he_is622(he_dev)) {
583 /* table 5.2 (4 cells per lbuf) */
584 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
585 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
586 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
587 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
588 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
589
590 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
591 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
592 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
593 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
594 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
595 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
596 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
597
598 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
599
600 /* table 5.8 */
601 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
602 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
603 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
604 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
605 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
606 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
607
608 /* table 5.9 */
609 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
610 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
611 } else {
612 /* table 5.1 (4 cells per lbuf) */
613 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
614 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
615 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
616 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
617 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
618
619 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
620 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
621 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
622 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
623 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
624 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
625 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
626
627 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
628
629 /* table 5.8 */
630 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
631 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
632 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
633 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
634 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
635 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
636
637 /* table 5.9 */
638 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
639 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
640 }
641
642 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
643
644 for (reg = 0; reg < 0x8; ++reg)
645 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
646
647}
648
649static int he_init_cs_block_rcm(struct he_dev *he_dev)
650{
651 unsigned (*rategrid)[16][16];
652 unsigned rate, delta;
653 int i, j, reg;
654
655 unsigned rate_atmf, exp, man;
656 unsigned long long rate_cps;
657 int mult, buf, buf_limit = 4;
658
659 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
660 if (!rategrid)
661 return -ENOMEM;
662
663 /* initialize rate grid group table */
664
665 for (reg = 0x0; reg < 0xff; ++reg)
666 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
667
668 /* initialize rate controller groups */
669
670 for (reg = 0x100; reg < 0x1ff; ++reg)
671 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
672
673 /* initialize tNrm lookup table */
674
675 /* the manual makes reference to a routine in a sample driver
676 for proper configuration; fortunately, we only need this
677 in order to support abr connection */
678
679 /* initialize rate to group table */
680
681 rate = he_dev->atm_dev->link_rate;
682 delta = rate / 32;
683
684 /*
685 * 2.4 transmit internal functions
686 *
687 * we construct a copy of the rate grid used by the scheduler
688 * in order to construct the rate to group table below
689 */
690
691 for (j = 0; j < 16; j++) {
692 (*rategrid)[0][j] = rate;
693 rate -= delta;
694 }
695
696 for (i = 1; i < 16; i++)
697 for (j = 0; j < 16; j++)
698 if (i > 14)
699 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
700 else
701 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
702
703 /*
704 * 2.4 transmit internal function
705 *
706 * this table maps the upper 5 bits of exponent and mantissa
707 * of the atm forum representation of the rate into an index
708 * on rate grid
709 */
710
711 rate_atmf = 0;
712 while (rate_atmf < 0x400) {
713 man = (rate_atmf & 0x1f) << 4;
714 exp = rate_atmf >> 5;
715
716 /*
717 instead of '/ 512', use '>> 9' to prevent a call
718 to divdu3 on x86 platforms
719 */
720 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
721
722 if (rate_cps < 10)
723 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
724
725 for (i = 255; i > 0; i--)
726 if ((*rategrid)[i/16][i%16] >= rate_cps)
727 break; /* pick nearest rate instead? */
728
729 /*
730 * each table entry is 16 bits: (rate grid index (8 bits)
731 * and a buffer limit (8 bits)
732 * there are two table entries in each 32-bit register
733 */
734
735#ifdef notdef
736 buf = rate_cps * he_dev->tx_numbuffs /
737 (he_dev->atm_dev->link_rate * 2);
738#else
739 /* this is pretty, but avoids _divdu3 and is mostly correct */
740 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
741 if (rate_cps > (272 * mult))
742 buf = 4;
743 else if (rate_cps > (204 * mult))
744 buf = 3;
745 else if (rate_cps > (136 * mult))
746 buf = 2;
747 else if (rate_cps > (68 * mult))
748 buf = 1;
749 else
750 buf = 0;
751#endif
752 if (buf > buf_limit)
753 buf = buf_limit;
754 reg = (reg << 16) | ((i << 8) | buf);
755
756#define RTGTBL_OFFSET 0x400
757
758 if (rate_atmf & 0x1)
759 he_writel_rcm(he_dev, reg,
760 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
761
762 ++rate_atmf;
763 }
764
765 kfree(rategrid);
766 return 0;
767}
768
769static int he_init_group(struct he_dev *he_dev, int group)
770{
771 struct he_buff *heb, *next;
772 dma_addr_t mapping;
773 int i;
774
775 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
776 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
777 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
778 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
779 G0_RBPS_BS + (group * 32));
780
781 /* bitmap table */
782 he_dev->rbpl_table = kmalloc_array(BITS_TO_LONGS(RBPL_TABLE_SIZE),
783 sizeof(*he_dev->rbpl_table),
784 GFP_KERNEL);
785 if (!he_dev->rbpl_table) {
786 hprintk("unable to allocate rbpl bitmap table\n");
787 return -ENOMEM;
788 }
789 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
790
791 /* rbpl_virt 64-bit pointers */
792 he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
793 sizeof(*he_dev->rbpl_virt),
794 GFP_KERNEL);
795 if (!he_dev->rbpl_virt) {
796 hprintk("unable to allocate rbpl virt table\n");
797 goto out_free_rbpl_table;
798 }
799
800 /* large buffer pool */
801 he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
802 CONFIG_RBPL_BUFSIZE, 64, 0);
803 if (he_dev->rbpl_pool == NULL) {
804 hprintk("unable to create rbpl pool\n");
805 goto out_free_rbpl_virt;
806 }
807
808 he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
809 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
810 &he_dev->rbpl_phys, GFP_KERNEL);
811 if (he_dev->rbpl_base == NULL) {
812 hprintk("failed to alloc rbpl_base\n");
813 goto out_destroy_rbpl_pool;
814 }
815
816 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
817
818 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
819
820 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
821 if (!heb)
822 goto out_free_rbpl;
823 heb->mapping = mapping;
824 list_add(&heb->entry, &he_dev->rbpl_outstanding);
825
826 set_bit(i, he_dev->rbpl_table);
827 he_dev->rbpl_virt[i] = heb;
828 he_dev->rbpl_hint = i + 1;
829 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
830 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
831 }
832 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
833
834 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
835 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
836 G0_RBPL_T + (group * 32));
837 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
838 G0_RBPL_BS + (group * 32));
839 he_writel(he_dev,
840 RBP_THRESH(CONFIG_RBPL_THRESH) |
841 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
842 RBP_INT_ENB,
843 G0_RBPL_QI + (group * 32));
844
845 /* rx buffer ready queue */
846
847 he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
848 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
849 &he_dev->rbrq_phys, GFP_KERNEL);
850 if (he_dev->rbrq_base == NULL) {
851 hprintk("failed to allocate rbrq\n");
852 goto out_free_rbpl;
853 }
854
855 he_dev->rbrq_head = he_dev->rbrq_base;
856 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
857 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
858 he_writel(he_dev,
859 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
860 G0_RBRQ_Q + (group * 16));
861 if (irq_coalesce) {
862 hprintk("coalescing interrupts\n");
863 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
864 G0_RBRQ_I + (group * 16));
865 } else
866 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
867 G0_RBRQ_I + (group * 16));
868
869 /* tx buffer ready queue */
870
871 he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
872 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
873 &he_dev->tbrq_phys, GFP_KERNEL);
874 if (he_dev->tbrq_base == NULL) {
875 hprintk("failed to allocate tbrq\n");
876 goto out_free_rbpq_base;
877 }
878
879 he_dev->tbrq_head = he_dev->tbrq_base;
880
881 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
882 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
883 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
884 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
885
886 return 0;
887
888out_free_rbpq_base:
889 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
890 sizeof(struct he_rbrq), he_dev->rbrq_base,
891 he_dev->rbrq_phys);
892out_free_rbpl:
893 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
894 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
895
896 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
897 sizeof(struct he_rbp), he_dev->rbpl_base,
898 he_dev->rbpl_phys);
899out_destroy_rbpl_pool:
900 dma_pool_destroy(he_dev->rbpl_pool);
901out_free_rbpl_virt:
902 kfree(he_dev->rbpl_virt);
903out_free_rbpl_table:
904 kfree(he_dev->rbpl_table);
905
906 return -ENOMEM;
907}
908
909static int he_init_irq(struct he_dev *he_dev)
910{
911 int i;
912
913 /* 2.9.3.5 tail offset for each interrupt queue is located after the
914 end of the interrupt queue */
915
916 he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
917 (CONFIG_IRQ_SIZE + 1)
918 * sizeof(struct he_irq),
919 &he_dev->irq_phys,
920 GFP_KERNEL);
921 if (he_dev->irq_base == NULL) {
922 hprintk("failed to allocate irq\n");
923 return -ENOMEM;
924 }
925 he_dev->irq_tailoffset = (unsigned *)
926 &he_dev->irq_base[CONFIG_IRQ_SIZE];
927 *he_dev->irq_tailoffset = 0;
928 he_dev->irq_head = he_dev->irq_base;
929 he_dev->irq_tail = he_dev->irq_base;
930
931 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
932 he_dev->irq_base[i].isw = ITYPE_INVALID;
933
934 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
935 he_writel(he_dev,
936 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
937 IRQ0_HEAD);
938 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
939 he_writel(he_dev, 0x0, IRQ0_DATA);
940
941 he_writel(he_dev, 0x0, IRQ1_BASE);
942 he_writel(he_dev, 0x0, IRQ1_HEAD);
943 he_writel(he_dev, 0x0, IRQ1_CNTL);
944 he_writel(he_dev, 0x0, IRQ1_DATA);
945
946 he_writel(he_dev, 0x0, IRQ2_BASE);
947 he_writel(he_dev, 0x0, IRQ2_HEAD);
948 he_writel(he_dev, 0x0, IRQ2_CNTL);
949 he_writel(he_dev, 0x0, IRQ2_DATA);
950
951 he_writel(he_dev, 0x0, IRQ3_BASE);
952 he_writel(he_dev, 0x0, IRQ3_HEAD);
953 he_writel(he_dev, 0x0, IRQ3_CNTL);
954 he_writel(he_dev, 0x0, IRQ3_DATA);
955
956 /* 2.9.3.2 interrupt queue mapping registers */
957
958 he_writel(he_dev, 0x0, GRP_10_MAP);
959 he_writel(he_dev, 0x0, GRP_32_MAP);
960 he_writel(he_dev, 0x0, GRP_54_MAP);
961 he_writel(he_dev, 0x0, GRP_76_MAP);
962
963 if (request_irq(he_dev->pci_dev->irq,
964 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
965 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
966 return -EINVAL;
967 }
968
969 he_dev->irq = he_dev->pci_dev->irq;
970
971 return 0;
972}
973
974static int he_start(struct atm_dev *dev)
975{
976 struct he_dev *he_dev;
977 struct pci_dev *pci_dev;
978 unsigned long membase;
979
980 u16 command;
981 u32 gen_cntl_0, host_cntl, lb_swap;
982 u8 cache_size, timer;
983
984 unsigned err;
985 unsigned int status, reg;
986 int i, group;
987
988 he_dev = HE_DEV(dev);
989 pci_dev = he_dev->pci_dev;
990
991 membase = pci_resource_start(pci_dev, 0);
992 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
993
994 /*
995 * pci bus controller initialization
996 */
997
998 /* 4.3 pci bus controller-specific initialization */
999 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1000 hprintk("can't read GEN_CNTL_0\n");
1001 return -EINVAL;
1002 }
1003 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1004 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1005 hprintk("can't write GEN_CNTL_0.\n");
1006 return -EINVAL;
1007 }
1008
1009 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1010 hprintk("can't read PCI_COMMAND.\n");
1011 return -EINVAL;
1012 }
1013
1014 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1015 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1016 hprintk("can't enable memory.\n");
1017 return -EINVAL;
1018 }
1019
1020 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1021 hprintk("can't read cache line size?\n");
1022 return -EINVAL;
1023 }
1024
1025 if (cache_size < 16) {
1026 cache_size = 16;
1027 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1028 hprintk("can't set cache line size to %d\n", cache_size);
1029 }
1030
1031 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1032 hprintk("can't read latency timer?\n");
1033 return -EINVAL;
1034 }
1035
1036 /* from table 3.9
1037 *
1038 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1039 *
1040 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1041 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1042 *
1043 */
1044#define LAT_TIMER 209
1045 if (timer < LAT_TIMER) {
1046 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1047 timer = LAT_TIMER;
1048 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1049 hprintk("can't set latency timer to %d\n", timer);
1050 }
1051
1052 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1053 hprintk("can't set up page mapping\n");
1054 return -EINVAL;
1055 }
1056
1057 /* 4.4 card reset */
1058 he_writel(he_dev, 0x0, RESET_CNTL);
1059 he_writel(he_dev, 0xff, RESET_CNTL);
1060
1061 msleep(16); /* 16 ms */
1062 status = he_readl(he_dev, RESET_CNTL);
1063 if ((status & BOARD_RST_STATUS) == 0) {
1064 hprintk("reset failed\n");
1065 return -EINVAL;
1066 }
1067
1068 /* 4.5 set bus width */
1069 host_cntl = he_readl(he_dev, HOST_CNTL);
1070 if (host_cntl & PCI_BUS_SIZE64)
1071 gen_cntl_0 |= ENBL_64;
1072 else
1073 gen_cntl_0 &= ~ENBL_64;
1074
1075 if (disable64 == 1) {
1076 hprintk("disabling 64-bit pci bus transfers\n");
1077 gen_cntl_0 &= ~ENBL_64;
1078 }
1079
1080 if (gen_cntl_0 & ENBL_64)
1081 hprintk("64-bit transfers enabled\n");
1082
1083 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1084
1085 /* 4.7 read prom contents */
1086 for (i = 0; i < PROD_ID_LEN; ++i)
1087 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1088
1089 he_dev->media = read_prom_byte(he_dev, MEDIA);
1090
1091 for (i = 0; i < 6; ++i)
1092 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1093
1094 hprintk("%s%s, %pM\n", he_dev->prod_id,
1095 he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1096 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1097 ATM_OC12_PCR : ATM_OC3_PCR;
1098
1099 /* 4.6 set host endianess */
1100 lb_swap = he_readl(he_dev, LB_SWAP);
1101 if (he_is622(he_dev))
1102 lb_swap &= ~XFER_SIZE; /* 4 cells */
1103 else
1104 lb_swap |= XFER_SIZE; /* 8 cells */
1105#ifdef __BIG_ENDIAN
1106 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1107#else
1108 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1109 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1110#endif /* __BIG_ENDIAN */
1111 he_writel(he_dev, lb_swap, LB_SWAP);
1112
1113 /* 4.8 sdram controller initialization */
1114 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1115
1116 /* 4.9 initialize rnum value */
1117 lb_swap |= SWAP_RNUM_MAX(0xf);
1118 he_writel(he_dev, lb_swap, LB_SWAP);
1119
1120 /* 4.10 initialize the interrupt queues */
1121 if ((err = he_init_irq(he_dev)) != 0)
1122 return err;
1123
1124 /* 4.11 enable pci bus controller state machines */
1125 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1126 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1127 he_writel(he_dev, host_cntl, HOST_CNTL);
1128
1129 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1130 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1131
1132 /*
1133 * atm network controller initialization
1134 */
1135
1136 /* 5.1.1 generic configuration state */
1137
1138 /*
1139 * local (cell) buffer memory map
1140 *
1141 * HE155 HE622
1142 *
1143 * 0 ____________1023 bytes 0 _______________________2047 bytes
1144 * | | | | |
1145 * | utility | | rx0 | |
1146 * 5|____________| 255|___________________| u |
1147 * 6| | 256| | t |
1148 * | | | | i |
1149 * | rx0 | row | tx | l |
1150 * | | | | i |
1151 * | | 767|___________________| t |
1152 * 517|____________| 768| | y |
1153 * row 518| | | rx1 | |
1154 * | | 1023|___________________|___|
1155 * | |
1156 * | tx |
1157 * | |
1158 * | |
1159 * 1535|____________|
1160 * 1536| |
1161 * | rx1 |
1162 * 2047|____________|
1163 *
1164 */
1165
1166 /* total 4096 connections */
1167 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1168 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1169
1170 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1171 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1172 return -ENODEV;
1173 }
1174
1175 if (nvpibits != -1) {
1176 he_dev->vpibits = nvpibits;
1177 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1178 }
1179
1180 if (nvcibits != -1) {
1181 he_dev->vcibits = nvcibits;
1182 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1183 }
1184
1185
1186 if (he_is622(he_dev)) {
1187 he_dev->cells_per_row = 40;
1188 he_dev->bytes_per_row = 2048;
1189 he_dev->r0_numrows = 256;
1190 he_dev->tx_numrows = 512;
1191 he_dev->r1_numrows = 256;
1192 he_dev->r0_startrow = 0;
1193 he_dev->tx_startrow = 256;
1194 he_dev->r1_startrow = 768;
1195 } else {
1196 he_dev->cells_per_row = 20;
1197 he_dev->bytes_per_row = 1024;
1198 he_dev->r0_numrows = 512;
1199 he_dev->tx_numrows = 1018;
1200 he_dev->r1_numrows = 512;
1201 he_dev->r0_startrow = 6;
1202 he_dev->tx_startrow = 518;
1203 he_dev->r1_startrow = 1536;
1204 }
1205
1206 he_dev->cells_per_lbuf = 4;
1207 he_dev->buffer_limit = 4;
1208 he_dev->r0_numbuffs = he_dev->r0_numrows *
1209 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1210 if (he_dev->r0_numbuffs > 2560)
1211 he_dev->r0_numbuffs = 2560;
1212
1213 he_dev->r1_numbuffs = he_dev->r1_numrows *
1214 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1215 if (he_dev->r1_numbuffs > 2560)
1216 he_dev->r1_numbuffs = 2560;
1217
1218 he_dev->tx_numbuffs = he_dev->tx_numrows *
1219 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1220 if (he_dev->tx_numbuffs > 5120)
1221 he_dev->tx_numbuffs = 5120;
1222
1223 /* 5.1.2 configure hardware dependent registers */
1224
1225 he_writel(he_dev,
1226 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1227 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1228 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1229 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1230 LBARB);
1231
1232 he_writel(he_dev, BANK_ON |
1233 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1234 SDRAMCON);
1235
1236 he_writel(he_dev,
1237 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1238 RM_RW_WAIT(1), RCMCONFIG);
1239 he_writel(he_dev,
1240 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1241 TM_RW_WAIT(1), TCMCONFIG);
1242
1243 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1244
1245 he_writel(he_dev,
1246 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1247 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1248 RX_VALVP(he_dev->vpibits) |
1249 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1250
1251 he_writel(he_dev, DRF_THRESH(0x20) |
1252 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1253 TX_VCI_MASK(he_dev->vcibits) |
1254 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1255
1256 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1257
1258 he_writel(he_dev, PHY_INT_ENB |
1259 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1260 RH_CONFIG);
1261
1262 /* 5.1.3 initialize connection memory */
1263
1264 for (i = 0; i < TCM_MEM_SIZE; ++i)
1265 he_writel_tcm(he_dev, 0, i);
1266
1267 for (i = 0; i < RCM_MEM_SIZE; ++i)
1268 he_writel_rcm(he_dev, 0, i);
1269
1270 /*
1271 * transmit connection memory map
1272 *
1273 * tx memory
1274 * 0x0 ___________________
1275 * | |
1276 * | |
1277 * | TSRa |
1278 * | |
1279 * | |
1280 * 0x8000|___________________|
1281 * | |
1282 * | TSRb |
1283 * 0xc000|___________________|
1284 * | |
1285 * | TSRc |
1286 * 0xe000|___________________|
1287 * | TSRd |
1288 * 0xf000|___________________|
1289 * | tmABR |
1290 * 0x10000|___________________|
1291 * | |
1292 * | tmTPD |
1293 * |___________________|
1294 * | |
1295 * ....
1296 * 0x1ffff|___________________|
1297 *
1298 *
1299 */
1300
1301 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1302 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1303 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1304 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1305 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1306
1307
1308 /*
1309 * receive connection memory map
1310 *
1311 * 0x0 ___________________
1312 * | |
1313 * | |
1314 * | RSRa |
1315 * | |
1316 * | |
1317 * 0x8000|___________________|
1318 * | |
1319 * | rx0/1 |
1320 * | LBM | link lists of local
1321 * | tx | buffer memory
1322 * | |
1323 * 0xd000|___________________|
1324 * | |
1325 * | rmABR |
1326 * 0xe000|___________________|
1327 * | |
1328 * | RSRb |
1329 * |___________________|
1330 * | |
1331 * ....
1332 * 0xffff|___________________|
1333 */
1334
1335 he_writel(he_dev, 0x08000, RCMLBM_BA);
1336 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1337 he_writel(he_dev, 0x0d800, RCMABR_BA);
1338
1339 /* 5.1.4 initialize local buffer free pools linked lists */
1340
1341 he_init_rx_lbfp0(he_dev);
1342 he_init_rx_lbfp1(he_dev);
1343
1344 he_writel(he_dev, 0x0, RLBC_H);
1345 he_writel(he_dev, 0x0, RLBC_T);
1346 he_writel(he_dev, 0x0, RLBC_H2);
1347
1348 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1349 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1350
1351 he_init_tx_lbfp(he_dev);
1352
1353 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1354
1355 /* 5.1.5 initialize intermediate receive queues */
1356
1357 if (he_is622(he_dev)) {
1358 he_writel(he_dev, 0x000f, G0_INMQ_S);
1359 he_writel(he_dev, 0x200f, G0_INMQ_L);
1360
1361 he_writel(he_dev, 0x001f, G1_INMQ_S);
1362 he_writel(he_dev, 0x201f, G1_INMQ_L);
1363
1364 he_writel(he_dev, 0x002f, G2_INMQ_S);
1365 he_writel(he_dev, 0x202f, G2_INMQ_L);
1366
1367 he_writel(he_dev, 0x003f, G3_INMQ_S);
1368 he_writel(he_dev, 0x203f, G3_INMQ_L);
1369
1370 he_writel(he_dev, 0x004f, G4_INMQ_S);
1371 he_writel(he_dev, 0x204f, G4_INMQ_L);
1372
1373 he_writel(he_dev, 0x005f, G5_INMQ_S);
1374 he_writel(he_dev, 0x205f, G5_INMQ_L);
1375
1376 he_writel(he_dev, 0x006f, G6_INMQ_S);
1377 he_writel(he_dev, 0x206f, G6_INMQ_L);
1378
1379 he_writel(he_dev, 0x007f, G7_INMQ_S);
1380 he_writel(he_dev, 0x207f, G7_INMQ_L);
1381 } else {
1382 he_writel(he_dev, 0x0000, G0_INMQ_S);
1383 he_writel(he_dev, 0x0008, G0_INMQ_L);
1384
1385 he_writel(he_dev, 0x0001, G1_INMQ_S);
1386 he_writel(he_dev, 0x0009, G1_INMQ_L);
1387
1388 he_writel(he_dev, 0x0002, G2_INMQ_S);
1389 he_writel(he_dev, 0x000a, G2_INMQ_L);
1390
1391 he_writel(he_dev, 0x0003, G3_INMQ_S);
1392 he_writel(he_dev, 0x000b, G3_INMQ_L);
1393
1394 he_writel(he_dev, 0x0004, G4_INMQ_S);
1395 he_writel(he_dev, 0x000c, G4_INMQ_L);
1396
1397 he_writel(he_dev, 0x0005, G5_INMQ_S);
1398 he_writel(he_dev, 0x000d, G5_INMQ_L);
1399
1400 he_writel(he_dev, 0x0006, G6_INMQ_S);
1401 he_writel(he_dev, 0x000e, G6_INMQ_L);
1402
1403 he_writel(he_dev, 0x0007, G7_INMQ_S);
1404 he_writel(he_dev, 0x000f, G7_INMQ_L);
1405 }
1406
1407 /* 5.1.6 application tunable parameters */
1408
1409 he_writel(he_dev, 0x0, MCC);
1410 he_writel(he_dev, 0x0, OEC);
1411 he_writel(he_dev, 0x0, DCC);
1412 he_writel(he_dev, 0x0, CEC);
1413
1414 /* 5.1.7 cs block initialization */
1415
1416 he_init_cs_block(he_dev);
1417
1418 /* 5.1.8 cs block connection memory initialization */
1419
1420 if (he_init_cs_block_rcm(he_dev) < 0)
1421 return -ENOMEM;
1422
1423 /* 5.1.10 initialize host structures */
1424
1425 he_init_tpdrq(he_dev);
1426
1427 he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1428 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1429 if (he_dev->tpd_pool == NULL) {
1430 hprintk("unable to create tpd dma_pool\n");
1431 return -ENOMEM;
1432 }
1433
1434 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1435
1436 if (he_init_group(he_dev, 0) != 0)
1437 return -ENOMEM;
1438
1439 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1440 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1441 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1442 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1443 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1444 G0_RBPS_BS + (group * 32));
1445
1446 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1447 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1448 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1449 G0_RBPL_QI + (group * 32));
1450 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1451
1452 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1453 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1454 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1455 G0_RBRQ_Q + (group * 16));
1456 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1457
1458 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1459 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1460 he_writel(he_dev, TBRQ_THRESH(0x1),
1461 G0_TBRQ_THRESH + (group * 16));
1462 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1463 }
1464
1465 /* host status page */
1466
1467 he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
1468 sizeof(struct he_hsp),
1469 &he_dev->hsp_phys, GFP_KERNEL);
1470 if (he_dev->hsp == NULL) {
1471 hprintk("failed to allocate host status page\n");
1472 return -ENOMEM;
1473 }
1474 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1475
1476 /* initialize framer */
1477
1478#ifdef CONFIG_ATM_HE_USE_SUNI
1479 if (he_isMM(he_dev))
1480 suni_init(he_dev->atm_dev);
1481 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1482 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1483#endif /* CONFIG_ATM_HE_USE_SUNI */
1484
1485 if (sdh) {
1486 /* this really should be in suni.c but for now... */
1487 int val;
1488
1489 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1490 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1491 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1492 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1493 }
1494
1495 /* 5.1.12 enable transmit and receive */
1496
1497 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1498 reg |= TX_ENABLE|ER_ENABLE;
1499 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1500
1501 reg = he_readl(he_dev, RC_CONFIG);
1502 reg |= RX_ENABLE;
1503 he_writel(he_dev, reg, RC_CONFIG);
1504
1505 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1506 he_dev->cs_stper[i].inuse = 0;
1507 he_dev->cs_stper[i].pcr = -1;
1508 }
1509 he_dev->total_bw = 0;
1510
1511
1512 /* atm linux initialization */
1513
1514 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1515 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1516
1517 he_dev->irq_peak = 0;
1518 he_dev->rbrq_peak = 0;
1519 he_dev->rbpl_peak = 0;
1520 he_dev->tbrq_peak = 0;
1521
1522 HPRINTK("hell bent for leather!\n");
1523
1524 return 0;
1525}
1526
1527static void
1528he_stop(struct he_dev *he_dev)
1529{
1530 struct he_buff *heb, *next;
1531 struct pci_dev *pci_dev;
1532 u32 gen_cntl_0, reg;
1533 u16 command;
1534
1535 pci_dev = he_dev->pci_dev;
1536
1537 /* disable interrupts */
1538
1539 if (he_dev->membase) {
1540 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1541 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1542 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1543
1544 tasklet_disable(&he_dev->tasklet);
1545
1546 /* disable recv and transmit */
1547
1548 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1549 reg &= ~(TX_ENABLE|ER_ENABLE);
1550 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1551
1552 reg = he_readl(he_dev, RC_CONFIG);
1553 reg &= ~(RX_ENABLE);
1554 he_writel(he_dev, reg, RC_CONFIG);
1555 }
1556
1557#ifdef CONFIG_ATM_HE_USE_SUNI
1558 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1559 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1560#endif /* CONFIG_ATM_HE_USE_SUNI */
1561
1562 if (he_dev->irq)
1563 free_irq(he_dev->irq, he_dev);
1564
1565 if (he_dev->irq_base)
1566 dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1567 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1568
1569 if (he_dev->hsp)
1570 dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1571 he_dev->hsp, he_dev->hsp_phys);
1572
1573 if (he_dev->rbpl_base) {
1574 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1575 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1576
1577 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1578 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1579 }
1580
1581 kfree(he_dev->rbpl_virt);
1582 kfree(he_dev->rbpl_table);
1583 dma_pool_destroy(he_dev->rbpl_pool);
1584
1585 if (he_dev->rbrq_base)
1586 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1587 he_dev->rbrq_base, he_dev->rbrq_phys);
1588
1589 if (he_dev->tbrq_base)
1590 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1591 he_dev->tbrq_base, he_dev->tbrq_phys);
1592
1593 if (he_dev->tpdrq_base)
1594 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1595 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1596
1597 dma_pool_destroy(he_dev->tpd_pool);
1598
1599 if (he_dev->pci_dev) {
1600 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1601 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1602 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1603 }
1604
1605 if (he_dev->membase)
1606 iounmap(he_dev->membase);
1607}
1608
1609static struct he_tpd *
1610__alloc_tpd(struct he_dev *he_dev)
1611{
1612 struct he_tpd *tpd;
1613 dma_addr_t mapping;
1614
1615 tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1616 if (tpd == NULL)
1617 return NULL;
1618
1619 tpd->status = TPD_ADDR(mapping);
1620 tpd->reserved = 0;
1621 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1622 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1623 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1624
1625 return tpd;
1626}
1627
1628#define AAL5_LEN(buf,len) \
1629 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1630 (((unsigned char *)(buf))[(len)-5]))
1631
1632/* 2.10.1.2 receive
1633 *
1634 * aal5 packets can optionally return the tcp checksum in the lower
1635 * 16 bits of the crc (RSR0_TCP_CKSUM)
1636 */
1637
1638#define TCP_CKSUM(buf,len) \
1639 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1640 (((unsigned char *)(buf))[(len-1)]))
1641
1642static int
1643he_service_rbrq(struct he_dev *he_dev, int group)
1644{
1645 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1646 ((unsigned long)he_dev->rbrq_base |
1647 he_dev->hsp->group[group].rbrq_tail);
1648 unsigned cid, lastcid = -1;
1649 struct sk_buff *skb;
1650 struct atm_vcc *vcc = NULL;
1651 struct he_vcc *he_vcc;
1652 struct he_buff *heb, *next;
1653 int i;
1654 int pdus_assembled = 0;
1655 int updated = 0;
1656
1657 read_lock(&vcc_sklist_lock);
1658 while (he_dev->rbrq_head != rbrq_tail) {
1659 ++updated;
1660
1661 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1662 he_dev->rbrq_head, group,
1663 RBRQ_ADDR(he_dev->rbrq_head),
1664 RBRQ_BUFLEN(he_dev->rbrq_head),
1665 RBRQ_CID(he_dev->rbrq_head),
1666 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1667 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1668 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1669 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1670 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1671 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1672
1673 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1674 heb = he_dev->rbpl_virt[i];
1675
1676 cid = RBRQ_CID(he_dev->rbrq_head);
1677 if (cid != lastcid)
1678 vcc = __find_vcc(he_dev, cid);
1679 lastcid = cid;
1680
1681 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1682 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1683 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1684 clear_bit(i, he_dev->rbpl_table);
1685 list_del(&heb->entry);
1686 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1687 }
1688
1689 goto next_rbrq_entry;
1690 }
1691
1692 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1693 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1694 atomic_inc(&vcc->stats->rx_drop);
1695 goto return_host_buffers;
1696 }
1697
1698 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1699 clear_bit(i, he_dev->rbpl_table);
1700 list_move_tail(&heb->entry, &he_vcc->buffers);
1701 he_vcc->pdu_len += heb->len;
1702
1703 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1704 lastcid = -1;
1705 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1706 wake_up(&he_vcc->rx_waitq);
1707 goto return_host_buffers;
1708 }
1709
1710 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1711 goto next_rbrq_entry;
1712
1713 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1714 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1715 HPRINTK("%s%s (%d.%d)\n",
1716 RBRQ_CRC_ERR(he_dev->rbrq_head)
1717 ? "CRC_ERR " : "",
1718 RBRQ_LEN_ERR(he_dev->rbrq_head)
1719 ? "LEN_ERR" : "",
1720 vcc->vpi, vcc->vci);
1721 atomic_inc(&vcc->stats->rx_err);
1722 goto return_host_buffers;
1723 }
1724
1725 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1726 GFP_ATOMIC);
1727 if (!skb) {
1728 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1729 goto return_host_buffers;
1730 }
1731
1732 if (rx_skb_reserve > 0)
1733 skb_reserve(skb, rx_skb_reserve);
1734
1735 __net_timestamp(skb);
1736
1737 list_for_each_entry(heb, &he_vcc->buffers, entry)
1738 memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1739
1740 switch (vcc->qos.aal) {
1741 case ATM_AAL0:
1742 /* 2.10.1.5 raw cell receive */
1743 skb->len = ATM_AAL0_SDU;
1744 skb_set_tail_pointer(skb, skb->len);
1745 break;
1746 case ATM_AAL5:
1747 /* 2.10.1.2 aal5 receive */
1748
1749 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1750 skb_set_tail_pointer(skb, skb->len);
1751#ifdef USE_CHECKSUM_HW
1752 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1753 skb->ip_summed = CHECKSUM_COMPLETE;
1754 skb->csum = TCP_CKSUM(skb->data,
1755 he_vcc->pdu_len);
1756 }
1757#endif
1758 break;
1759 }
1760
1761#ifdef should_never_happen
1762 if (skb->len > vcc->qos.rxtp.max_sdu)
1763 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1764#endif
1765
1766#ifdef notdef
1767 ATM_SKB(skb)->vcc = vcc;
1768#endif
1769 spin_unlock(&he_dev->global_lock);
1770 vcc->push(vcc, skb);
1771 spin_lock(&he_dev->global_lock);
1772
1773 atomic_inc(&vcc->stats->rx);
1774
1775return_host_buffers:
1776 ++pdus_assembled;
1777
1778 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1779 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1780 INIT_LIST_HEAD(&he_vcc->buffers);
1781 he_vcc->pdu_len = 0;
1782
1783next_rbrq_entry:
1784 he_dev->rbrq_head = (struct he_rbrq *)
1785 ((unsigned long) he_dev->rbrq_base |
1786 RBRQ_MASK(he_dev->rbrq_head + 1));
1787
1788 }
1789 read_unlock(&vcc_sklist_lock);
1790
1791 if (updated) {
1792 if (updated > he_dev->rbrq_peak)
1793 he_dev->rbrq_peak = updated;
1794
1795 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1796 G0_RBRQ_H + (group * 16));
1797 }
1798
1799 return pdus_assembled;
1800}
1801
1802static void
1803he_service_tbrq(struct he_dev *he_dev, int group)
1804{
1805 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1806 ((unsigned long)he_dev->tbrq_base |
1807 he_dev->hsp->group[group].tbrq_tail);
1808 struct he_tpd *tpd;
1809 int slot, updated = 0;
1810 struct he_tpd *__tpd;
1811
1812 /* 2.1.6 transmit buffer return queue */
1813
1814 while (he_dev->tbrq_head != tbrq_tail) {
1815 ++updated;
1816
1817 HPRINTK("tbrq%d 0x%x%s%s\n",
1818 group,
1819 TBRQ_TPD(he_dev->tbrq_head),
1820 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1821 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1822 tpd = NULL;
1823 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1824 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1825 tpd = __tpd;
1826 list_del(&__tpd->entry);
1827 break;
1828 }
1829 }
1830
1831 if (tpd == NULL) {
1832 hprintk("unable to locate tpd for dma buffer %x\n",
1833 TBRQ_TPD(he_dev->tbrq_head));
1834 goto next_tbrq_entry;
1835 }
1836
1837 if (TBRQ_EOS(he_dev->tbrq_head)) {
1838 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1839 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1840 if (tpd->vcc)
1841 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1842
1843 goto next_tbrq_entry;
1844 }
1845
1846 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1847 if (tpd->iovec[slot].addr)
1848 dma_unmap_single(&he_dev->pci_dev->dev,
1849 tpd->iovec[slot].addr,
1850 tpd->iovec[slot].len & TPD_LEN_MASK,
1851 DMA_TO_DEVICE);
1852 if (tpd->iovec[slot].len & TPD_LST)
1853 break;
1854
1855 }
1856
1857 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1858 if (tpd->vcc && tpd->vcc->pop)
1859 tpd->vcc->pop(tpd->vcc, tpd->skb);
1860 else
1861 dev_kfree_skb_any(tpd->skb);
1862 }
1863
1864next_tbrq_entry:
1865 if (tpd)
1866 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1867 he_dev->tbrq_head = (struct he_tbrq *)
1868 ((unsigned long) he_dev->tbrq_base |
1869 TBRQ_MASK(he_dev->tbrq_head + 1));
1870 }
1871
1872 if (updated) {
1873 if (updated > he_dev->tbrq_peak)
1874 he_dev->tbrq_peak = updated;
1875
1876 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1877 G0_TBRQ_H + (group * 16));
1878 }
1879}
1880
1881static void
1882he_service_rbpl(struct he_dev *he_dev, int group)
1883{
1884 struct he_rbp *new_tail;
1885 struct he_rbp *rbpl_head;
1886 struct he_buff *heb;
1887 dma_addr_t mapping;
1888 int i;
1889 int moved = 0;
1890
1891 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1892 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1893
1894 for (;;) {
1895 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1896 RBPL_MASK(he_dev->rbpl_tail+1));
1897
1898 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1899 if (new_tail == rbpl_head)
1900 break;
1901
1902 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1903 if (i > (RBPL_TABLE_SIZE - 1)) {
1904 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1905 if (i > (RBPL_TABLE_SIZE - 1))
1906 break;
1907 }
1908 he_dev->rbpl_hint = i + 1;
1909
1910 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1911 if (!heb)
1912 break;
1913 heb->mapping = mapping;
1914 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1915 he_dev->rbpl_virt[i] = heb;
1916 set_bit(i, he_dev->rbpl_table);
1917 new_tail->idx = i << RBP_IDX_OFFSET;
1918 new_tail->phys = mapping + offsetof(struct he_buff, data);
1919
1920 he_dev->rbpl_tail = new_tail;
1921 ++moved;
1922 }
1923
1924 if (moved)
1925 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1926}
1927
1928static void
1929he_tasklet(unsigned long data)
1930{
1931 unsigned long flags;
1932 struct he_dev *he_dev = (struct he_dev *) data;
1933 int group, type;
1934 int updated = 0;
1935
1936 HPRINTK("tasklet (0x%lx)\n", data);
1937 spin_lock_irqsave(&he_dev->global_lock, flags);
1938
1939 while (he_dev->irq_head != he_dev->irq_tail) {
1940 ++updated;
1941
1942 type = ITYPE_TYPE(he_dev->irq_head->isw);
1943 group = ITYPE_GROUP(he_dev->irq_head->isw);
1944
1945 switch (type) {
1946 case ITYPE_RBRQ_THRESH:
1947 HPRINTK("rbrq%d threshold\n", group);
1948 /* fall through */
1949 case ITYPE_RBRQ_TIMER:
1950 if (he_service_rbrq(he_dev, group))
1951 he_service_rbpl(he_dev, group);
1952 break;
1953 case ITYPE_TBRQ_THRESH:
1954 HPRINTK("tbrq%d threshold\n", group);
1955 /* fall through */
1956 case ITYPE_TPD_COMPLETE:
1957 he_service_tbrq(he_dev, group);
1958 break;
1959 case ITYPE_RBPL_THRESH:
1960 he_service_rbpl(he_dev, group);
1961 break;
1962 case ITYPE_RBPS_THRESH:
1963 /* shouldn't happen unless small buffers enabled */
1964 break;
1965 case ITYPE_PHY:
1966 HPRINTK("phy interrupt\n");
1967#ifdef CONFIG_ATM_HE_USE_SUNI
1968 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1969 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1970 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1971 spin_lock_irqsave(&he_dev->global_lock, flags);
1972#endif
1973 break;
1974 case ITYPE_OTHER:
1975 switch (type|group) {
1976 case ITYPE_PARITY:
1977 hprintk("parity error\n");
1978 break;
1979 case ITYPE_ABORT:
1980 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1981 break;
1982 }
1983 break;
1984 case ITYPE_TYPE(ITYPE_INVALID):
1985 /* see 8.1.1 -- check all queues */
1986
1987 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1988
1989 he_service_rbrq(he_dev, 0);
1990 he_service_rbpl(he_dev, 0);
1991 he_service_tbrq(he_dev, 0);
1992 break;
1993 default:
1994 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1995 }
1996
1997 he_dev->irq_head->isw = ITYPE_INVALID;
1998
1999 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2000 }
2001
2002 if (updated) {
2003 if (updated > he_dev->irq_peak)
2004 he_dev->irq_peak = updated;
2005
2006 he_writel(he_dev,
2007 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2008 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2009 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2010 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2011 }
2012 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2013}
2014
2015static irqreturn_t
2016he_irq_handler(int irq, void *dev_id)
2017{
2018 unsigned long flags;
2019 struct he_dev *he_dev = (struct he_dev * )dev_id;
2020 int handled = 0;
2021
2022 if (he_dev == NULL)
2023 return IRQ_NONE;
2024
2025 spin_lock_irqsave(&he_dev->global_lock, flags);
2026
2027 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2028 (*he_dev->irq_tailoffset << 2));
2029
2030 if (he_dev->irq_tail == he_dev->irq_head) {
2031 HPRINTK("tailoffset not updated?\n");
2032 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2033 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2034 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2035 }
2036
2037#ifdef DEBUG
2038 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2039 hprintk("spurious (or shared) interrupt?\n");
2040#endif
2041
2042 if (he_dev->irq_head != he_dev->irq_tail) {
2043 handled = 1;
2044 tasklet_schedule(&he_dev->tasklet);
2045 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2046 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2047 }
2048 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2049 return IRQ_RETVAL(handled);
2050
2051}
2052
2053static __inline__ void
2054__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2055{
2056 struct he_tpdrq *new_tail;
2057
2058 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2059 tpd, cid, he_dev->tpdrq_tail);
2060
2061 /* new_tail = he_dev->tpdrq_tail; */
2062 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2063 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2064
2065 /*
2066 * check to see if we are about to set the tail == head
2067 * if true, update the head pointer from the adapter
2068 * to see if this is really the case (reading the queue
2069 * head for every enqueue would be unnecessarily slow)
2070 */
2071
2072 if (new_tail == he_dev->tpdrq_head) {
2073 he_dev->tpdrq_head = (struct he_tpdrq *)
2074 (((unsigned long)he_dev->tpdrq_base) |
2075 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2076
2077 if (new_tail == he_dev->tpdrq_head) {
2078 int slot;
2079
2080 hprintk("tpdrq full (cid 0x%x)\n", cid);
2081 /*
2082 * FIXME
2083 * push tpd onto a transmit backlog queue
2084 * after service_tbrq, service the backlog
2085 * for now, we just drop the pdu
2086 */
2087 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2088 if (tpd->iovec[slot].addr)
2089 dma_unmap_single(&he_dev->pci_dev->dev,
2090 tpd->iovec[slot].addr,
2091 tpd->iovec[slot].len & TPD_LEN_MASK,
2092 DMA_TO_DEVICE);
2093 }
2094 if (tpd->skb) {
2095 if (tpd->vcc->pop)
2096 tpd->vcc->pop(tpd->vcc, tpd->skb);
2097 else
2098 dev_kfree_skb_any(tpd->skb);
2099 atomic_inc(&tpd->vcc->stats->tx_err);
2100 }
2101 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2102 return;
2103 }
2104 }
2105
2106 /* 2.1.5 transmit packet descriptor ready queue */
2107 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2108 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2109 he_dev->tpdrq_tail->cid = cid;
2110 wmb();
2111
2112 he_dev->tpdrq_tail = new_tail;
2113
2114 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2115 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2116}
2117
2118static int
2119he_open(struct atm_vcc *vcc)
2120{
2121 unsigned long flags;
2122 struct he_dev *he_dev = HE_DEV(vcc->dev);
2123 struct he_vcc *he_vcc;
2124 int err = 0;
2125 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2126 short vpi = vcc->vpi;
2127 int vci = vcc->vci;
2128
2129 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2130 return 0;
2131
2132 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2133
2134 set_bit(ATM_VF_ADDR, &vcc->flags);
2135
2136 cid = he_mkcid(he_dev, vpi, vci);
2137
2138 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2139 if (he_vcc == NULL) {
2140 hprintk("unable to allocate he_vcc during open\n");
2141 return -ENOMEM;
2142 }
2143
2144 INIT_LIST_HEAD(&he_vcc->buffers);
2145 he_vcc->pdu_len = 0;
2146 he_vcc->rc_index = -1;
2147
2148 init_waitqueue_head(&he_vcc->rx_waitq);
2149 init_waitqueue_head(&he_vcc->tx_waitq);
2150
2151 vcc->dev_data = he_vcc;
2152
2153 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2154 int pcr_goal;
2155
2156 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2157 if (pcr_goal == 0)
2158 pcr_goal = he_dev->atm_dev->link_rate;
2159 if (pcr_goal < 0) /* means round down, technically */
2160 pcr_goal = -pcr_goal;
2161
2162 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2163
2164 switch (vcc->qos.aal) {
2165 case ATM_AAL5:
2166 tsr0_aal = TSR0_AAL5;
2167 tsr4 = TSR4_AAL5;
2168 break;
2169 case ATM_AAL0:
2170 tsr0_aal = TSR0_AAL0_SDU;
2171 tsr4 = TSR4_AAL0_SDU;
2172 break;
2173 default:
2174 err = -EINVAL;
2175 goto open_failed;
2176 }
2177
2178 spin_lock_irqsave(&he_dev->global_lock, flags);
2179 tsr0 = he_readl_tsr0(he_dev, cid);
2180 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2181
2182 if (TSR0_CONN_STATE(tsr0) != 0) {
2183 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2184 err = -EBUSY;
2185 goto open_failed;
2186 }
2187
2188 switch (vcc->qos.txtp.traffic_class) {
2189 case ATM_UBR:
2190 /* 2.3.3.1 open connection ubr */
2191
2192 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2193 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2194 break;
2195
2196 case ATM_CBR:
2197 /* 2.3.3.2 open connection cbr */
2198
2199 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2200 if ((he_dev->total_bw + pcr_goal)
2201 > (he_dev->atm_dev->link_rate * 9 / 10))
2202 {
2203 err = -EBUSY;
2204 goto open_failed;
2205 }
2206
2207 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2208
2209 /* find an unused cs_stper register */
2210 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2211 if (he_dev->cs_stper[reg].inuse == 0 ||
2212 he_dev->cs_stper[reg].pcr == pcr_goal)
2213 break;
2214
2215 if (reg == HE_NUM_CS_STPER) {
2216 err = -EBUSY;
2217 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2218 goto open_failed;
2219 }
2220
2221 he_dev->total_bw += pcr_goal;
2222
2223 he_vcc->rc_index = reg;
2224 ++he_dev->cs_stper[reg].inuse;
2225 he_dev->cs_stper[reg].pcr = pcr_goal;
2226
2227 clock = he_is622(he_dev) ? 66667000 : 50000000;
2228 period = clock / pcr_goal;
2229
2230 HPRINTK("rc_index = %d period = %d\n",
2231 reg, period);
2232
2233 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2234 CS_STPER0 + reg);
2235 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2236
2237 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2238 TSR0_RC_INDEX(reg);
2239
2240 break;
2241 default:
2242 err = -EINVAL;
2243 goto open_failed;
2244 }
2245
2246 spin_lock_irqsave(&he_dev->global_lock, flags);
2247
2248 he_writel_tsr0(he_dev, tsr0, cid);
2249 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2250 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2251 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2252 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2253 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2254
2255 he_writel_tsr3(he_dev, 0x0, cid);
2256 he_writel_tsr5(he_dev, 0x0, cid);
2257 he_writel_tsr6(he_dev, 0x0, cid);
2258 he_writel_tsr7(he_dev, 0x0, cid);
2259 he_writel_tsr8(he_dev, 0x0, cid);
2260 he_writel_tsr10(he_dev, 0x0, cid);
2261 he_writel_tsr11(he_dev, 0x0, cid);
2262 he_writel_tsr12(he_dev, 0x0, cid);
2263 he_writel_tsr13(he_dev, 0x0, cid);
2264 he_writel_tsr14(he_dev, 0x0, cid);
2265 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2266 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2267 }
2268
2269 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2270 unsigned aal;
2271
2272 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2273 &HE_VCC(vcc)->rx_waitq);
2274
2275 switch (vcc->qos.aal) {
2276 case ATM_AAL5:
2277 aal = RSR0_AAL5;
2278 break;
2279 case ATM_AAL0:
2280 aal = RSR0_RAWCELL;
2281 break;
2282 default:
2283 err = -EINVAL;
2284 goto open_failed;
2285 }
2286
2287 spin_lock_irqsave(&he_dev->global_lock, flags);
2288
2289 rsr0 = he_readl_rsr0(he_dev, cid);
2290 if (rsr0 & RSR0_OPEN_CONN) {
2291 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2292
2293 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2294 err = -EBUSY;
2295 goto open_failed;
2296 }
2297
2298 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2299 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2300 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2301 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2302
2303#ifdef USE_CHECKSUM_HW
2304 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2305 rsr0 |= RSR0_TCP_CKSUM;
2306#endif
2307
2308 he_writel_rsr4(he_dev, rsr4, cid);
2309 he_writel_rsr1(he_dev, rsr1, cid);
2310 /* 5.1.11 last parameter initialized should be
2311 the open/closed indication in rsr0 */
2312 he_writel_rsr0(he_dev,
2313 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2314 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2315
2316 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2317 }
2318
2319open_failed:
2320
2321 if (err) {
2322 kfree(he_vcc);
2323 clear_bit(ATM_VF_ADDR, &vcc->flags);
2324 }
2325 else
2326 set_bit(ATM_VF_READY, &vcc->flags);
2327
2328 return err;
2329}
2330
2331static void
2332he_close(struct atm_vcc *vcc)
2333{
2334 unsigned long flags;
2335 DECLARE_WAITQUEUE(wait, current);
2336 struct he_dev *he_dev = HE_DEV(vcc->dev);
2337 struct he_tpd *tpd;
2338 unsigned cid;
2339 struct he_vcc *he_vcc = HE_VCC(vcc);
2340#define MAX_RETRY 30
2341 int retry = 0, sleep = 1, tx_inuse;
2342
2343 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2344
2345 clear_bit(ATM_VF_READY, &vcc->flags);
2346 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2347
2348 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2349 int timeout;
2350
2351 HPRINTK("close rx cid 0x%x\n", cid);
2352
2353 /* 2.7.2.2 close receive operation */
2354
2355 /* wait for previous close (if any) to finish */
2356
2357 spin_lock_irqsave(&he_dev->global_lock, flags);
2358 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2359 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2360 udelay(250);
2361 }
2362
2363 set_current_state(TASK_UNINTERRUPTIBLE);
2364 add_wait_queue(&he_vcc->rx_waitq, &wait);
2365
2366 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2367 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2368 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2369 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2370
2371 timeout = schedule_timeout(30*HZ);
2372
2373 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2374 set_current_state(TASK_RUNNING);
2375
2376 if (timeout == 0)
2377 hprintk("close rx timeout cid 0x%x\n", cid);
2378
2379 HPRINTK("close rx cid 0x%x complete\n", cid);
2380
2381 }
2382
2383 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2384 volatile unsigned tsr4, tsr0;
2385 int timeout;
2386
2387 HPRINTK("close tx cid 0x%x\n", cid);
2388
2389 /* 2.1.2
2390 *
2391 * ... the host must first stop queueing packets to the TPDRQ
2392 * on the connection to be closed, then wait for all outstanding
2393 * packets to be transmitted and their buffers returned to the
2394 * TBRQ. When the last packet on the connection arrives in the
2395 * TBRQ, the host issues the close command to the adapter.
2396 */
2397
2398 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2399 (retry < MAX_RETRY)) {
2400 msleep(sleep);
2401 if (sleep < 250)
2402 sleep = sleep * 2;
2403
2404 ++retry;
2405 }
2406
2407 if (tx_inuse > 1)
2408 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2409
2410 /* 2.3.1.1 generic close operations with flush */
2411
2412 spin_lock_irqsave(&he_dev->global_lock, flags);
2413 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2414 /* also clears TSR4_SESSION_ENDED */
2415
2416 switch (vcc->qos.txtp.traffic_class) {
2417 case ATM_UBR:
2418 he_writel_tsr1(he_dev,
2419 TSR1_MCR(rate_to_atmf(200000))
2420 | TSR1_PCR(0), cid);
2421 break;
2422 case ATM_CBR:
2423 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2424 break;
2425 }
2426 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2427
2428 tpd = __alloc_tpd(he_dev);
2429 if (tpd == NULL) {
2430 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2431 goto close_tx_incomplete;
2432 }
2433 tpd->status |= TPD_EOS | TPD_INT;
2434 tpd->skb = NULL;
2435 tpd->vcc = vcc;
2436 wmb();
2437
2438 set_current_state(TASK_UNINTERRUPTIBLE);
2439 add_wait_queue(&he_vcc->tx_waitq, &wait);
2440 __enqueue_tpd(he_dev, tpd, cid);
2441 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2442
2443 timeout = schedule_timeout(30*HZ);
2444
2445 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2446 set_current_state(TASK_RUNNING);
2447
2448 spin_lock_irqsave(&he_dev->global_lock, flags);
2449
2450 if (timeout == 0) {
2451 hprintk("close tx timeout cid 0x%x\n", cid);
2452 goto close_tx_incomplete;
2453 }
2454
2455 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2456 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2457 udelay(250);
2458 }
2459
2460 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2461 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2462 udelay(250);
2463 }
2464
2465close_tx_incomplete:
2466
2467 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2468 int reg = he_vcc->rc_index;
2469
2470 HPRINTK("cs_stper reg = %d\n", reg);
2471
2472 if (he_dev->cs_stper[reg].inuse == 0)
2473 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2474 else
2475 --he_dev->cs_stper[reg].inuse;
2476
2477 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2478 }
2479 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2480
2481 HPRINTK("close tx cid 0x%x complete\n", cid);
2482 }
2483
2484 kfree(he_vcc);
2485
2486 clear_bit(ATM_VF_ADDR, &vcc->flags);
2487}
2488
2489static int
2490he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2491{
2492 unsigned long flags;
2493 struct he_dev *he_dev = HE_DEV(vcc->dev);
2494 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2495 struct he_tpd *tpd;
2496#ifdef USE_SCATTERGATHER
2497 int i, slot = 0;
2498#endif
2499
2500#define HE_TPD_BUFSIZE 0xffff
2501
2502 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2503
2504 if ((skb->len > HE_TPD_BUFSIZE) ||
2505 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2506 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2507 if (vcc->pop)
2508 vcc->pop(vcc, skb);
2509 else
2510 dev_kfree_skb_any(skb);
2511 atomic_inc(&vcc->stats->tx_err);
2512 return -EINVAL;
2513 }
2514
2515#ifndef USE_SCATTERGATHER
2516 if (skb_shinfo(skb)->nr_frags) {
2517 hprintk("no scatter/gather support\n");
2518 if (vcc->pop)
2519 vcc->pop(vcc, skb);
2520 else
2521 dev_kfree_skb_any(skb);
2522 atomic_inc(&vcc->stats->tx_err);
2523 return -EINVAL;
2524 }
2525#endif
2526 spin_lock_irqsave(&he_dev->global_lock, flags);
2527
2528 tpd = __alloc_tpd(he_dev);
2529 if (tpd == NULL) {
2530 if (vcc->pop)
2531 vcc->pop(vcc, skb);
2532 else
2533 dev_kfree_skb_any(skb);
2534 atomic_inc(&vcc->stats->tx_err);
2535 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2536 return -ENOMEM;
2537 }
2538
2539 if (vcc->qos.aal == ATM_AAL5)
2540 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2541 else {
2542 char *pti_clp = (void *) (skb->data + 3);
2543 int clp, pti;
2544
2545 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2546 clp = (*pti_clp & ATM_HDR_CLP);
2547 tpd->status |= TPD_CELLTYPE(pti);
2548 if (clp)
2549 tpd->status |= TPD_CLP;
2550
2551 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2552 }
2553
2554#ifdef USE_SCATTERGATHER
2555 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2556 skb_headlen(skb), DMA_TO_DEVICE);
2557 tpd->iovec[slot].len = skb_headlen(skb);
2558 ++slot;
2559
2560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562
2563 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2564 tpd->vcc = vcc;
2565 tpd->skb = NULL; /* not the last fragment
2566 so dont ->push() yet */
2567 wmb();
2568
2569 __enqueue_tpd(he_dev, tpd, cid);
2570 tpd = __alloc_tpd(he_dev);
2571 if (tpd == NULL) {
2572 if (vcc->pop)
2573 vcc->pop(vcc, skb);
2574 else
2575 dev_kfree_skb_any(skb);
2576 atomic_inc(&vcc->stats->tx_err);
2577 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2578 return -ENOMEM;
2579 }
2580 tpd->status |= TPD_USERCELL;
2581 slot = 0;
2582 }
2583
2584 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
2585 (void *) page_address(frag->page) + frag->page_offset,
2586 frag->size, DMA_TO_DEVICE);
2587 tpd->iovec[slot].len = frag->size;
2588 ++slot;
2589
2590 }
2591
2592 tpd->iovec[slot - 1].len |= TPD_LST;
2593#else
2594 tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2595 tpd->length0 = skb->len | TPD_LST;
2596#endif
2597 tpd->status |= TPD_INT;
2598
2599 tpd->vcc = vcc;
2600 tpd->skb = skb;
2601 wmb();
2602 ATM_SKB(skb)->vcc = vcc;
2603
2604 __enqueue_tpd(he_dev, tpd, cid);
2605 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2606
2607 atomic_inc(&vcc->stats->tx);
2608
2609 return 0;
2610}
2611
2612static int
2613he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2614{
2615 unsigned long flags;
2616 struct he_dev *he_dev = HE_DEV(atm_dev);
2617 struct he_ioctl_reg reg;
2618 int err = 0;
2619
2620 switch (cmd) {
2621 case HE_GET_REG:
2622 if (!capable(CAP_NET_ADMIN))
2623 return -EPERM;
2624
2625 if (copy_from_user(®, arg,
2626 sizeof(struct he_ioctl_reg)))
2627 return -EFAULT;
2628
2629 spin_lock_irqsave(&he_dev->global_lock, flags);
2630 switch (reg.type) {
2631 case HE_REGTYPE_PCI:
2632 if (reg.addr >= HE_REGMAP_SIZE) {
2633 err = -EINVAL;
2634 break;
2635 }
2636
2637 reg.val = he_readl(he_dev, reg.addr);
2638 break;
2639 case HE_REGTYPE_RCM:
2640 reg.val =
2641 he_readl_rcm(he_dev, reg.addr);
2642 break;
2643 case HE_REGTYPE_TCM:
2644 reg.val =
2645 he_readl_tcm(he_dev, reg.addr);
2646 break;
2647 case HE_REGTYPE_MBOX:
2648 reg.val =
2649 he_readl_mbox(he_dev, reg.addr);
2650 break;
2651 default:
2652 err = -EINVAL;
2653 break;
2654 }
2655 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2656 if (err == 0)
2657 if (copy_to_user(arg, ®,
2658 sizeof(struct he_ioctl_reg)))
2659 return -EFAULT;
2660 break;
2661 default:
2662#ifdef CONFIG_ATM_HE_USE_SUNI
2663 if (atm_dev->phy && atm_dev->phy->ioctl)
2664 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2665#else /* CONFIG_ATM_HE_USE_SUNI */
2666 err = -EINVAL;
2667#endif /* CONFIG_ATM_HE_USE_SUNI */
2668 break;
2669 }
2670
2671 return err;
2672}
2673
2674static void
2675he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2676{
2677 unsigned long flags;
2678 struct he_dev *he_dev = HE_DEV(atm_dev);
2679
2680 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2681
2682 spin_lock_irqsave(&he_dev->global_lock, flags);
2683 he_writel(he_dev, val, FRAMER + (addr*4));
2684 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2685 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2686}
2687
2688
2689static unsigned char
2690he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2691{
2692 unsigned long flags;
2693 struct he_dev *he_dev = HE_DEV(atm_dev);
2694 unsigned reg;
2695
2696 spin_lock_irqsave(&he_dev->global_lock, flags);
2697 reg = he_readl(he_dev, FRAMER + (addr*4));
2698 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2699
2700 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2701 return reg;
2702}
2703
2704static int
2705he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2706{
2707 unsigned long flags;
2708 struct he_dev *he_dev = HE_DEV(dev);
2709 int left, i;
2710#ifdef notdef
2711 struct he_rbrq *rbrq_tail;
2712 struct he_tpdrq *tpdrq_head;
2713 int rbpl_head, rbpl_tail;
2714#endif
2715 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2716
2717
2718 left = *pos;
2719 if (!left--)
2720 return sprintf(page, "ATM he driver\n");
2721
2722 if (!left--)
2723 return sprintf(page, "%s%s\n\n",
2724 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2725
2726 if (!left--)
2727 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2728
2729 spin_lock_irqsave(&he_dev->global_lock, flags);
2730 mcc += he_readl(he_dev, MCC);
2731 oec += he_readl(he_dev, OEC);
2732 dcc += he_readl(he_dev, DCC);
2733 cec += he_readl(he_dev, CEC);
2734 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2735
2736 if (!left--)
2737 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2738 mcc, oec, dcc, cec);
2739
2740 if (!left--)
2741 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2742 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2743
2744 if (!left--)
2745 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2746 CONFIG_TPDRQ_SIZE);
2747
2748 if (!left--)
2749 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2750 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2751
2752 if (!left--)
2753 return sprintf(page, "tbrq_size = %d peak = %d\n",
2754 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2755
2756
2757#ifdef notdef
2758 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2759 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2760
2761 inuse = rbpl_head - rbpl_tail;
2762 if (inuse < 0)
2763 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2764 inuse /= sizeof(struct he_rbp);
2765
2766 if (!left--)
2767 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2768 CONFIG_RBPL_SIZE, inuse);
2769#endif
2770
2771 if (!left--)
2772 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2773
2774 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2775 if (!left--)
2776 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2777 he_dev->cs_stper[i].pcr,
2778 he_dev->cs_stper[i].inuse);
2779
2780 if (!left--)
2781 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2782 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2783
2784 return 0;
2785}
2786
2787/* eeprom routines -- see 4.7 */
2788
2789static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2790{
2791 u32 val = 0, tmp_read = 0;
2792 int i, j = 0;
2793 u8 byte_read = 0;
2794
2795 val = readl(he_dev->membase + HOST_CNTL);
2796 val &= 0xFFFFE0FF;
2797
2798 /* Turn on write enable */
2799 val |= 0x800;
2800 he_writel(he_dev, val, HOST_CNTL);
2801
2802 /* Send READ instruction */
2803 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2804 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2805 udelay(EEPROM_DELAY);
2806 }
2807
2808 /* Next, we need to send the byte address to read from */
2809 for (i = 7; i >= 0; i--) {
2810 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2811 udelay(EEPROM_DELAY);
2812 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2813 udelay(EEPROM_DELAY);
2814 }
2815
2816 j = 0;
2817
2818 val &= 0xFFFFF7FF; /* Turn off write enable */
2819 he_writel(he_dev, val, HOST_CNTL);
2820
2821 /* Now, we can read data from the EEPROM by clocking it in */
2822 for (i = 7; i >= 0; i--) {
2823 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2824 udelay(EEPROM_DELAY);
2825 tmp_read = he_readl(he_dev, HOST_CNTL);
2826 byte_read |= (unsigned char)
2827 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2828 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2829 udelay(EEPROM_DELAY);
2830 }
2831
2832 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2833 udelay(EEPROM_DELAY);
2834
2835 return byte_read;
2836}
2837
2838MODULE_LICENSE("GPL");
2839MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2840MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2841module_param(disable64, bool, 0);
2842MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2843module_param(nvpibits, short, 0);
2844MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2845module_param(nvcibits, short, 0);
2846MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2847module_param(rx_skb_reserve, short, 0);
2848MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2849module_param(irq_coalesce, bool, 0);
2850MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2851module_param(sdh, bool, 0);
2852MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2853
2854static struct pci_device_id he_pci_tbl[] = {
2855 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2856 { 0, }
2857};
2858
2859MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2860
2861static struct pci_driver he_driver = {
2862 .name = "he",
2863 .probe = he_init_one,
2864 .remove = he_remove_one,
2865 .id_table = he_pci_tbl,
2866};
2867
2868module_pci_driver(he_driver);