Loading...
1/*
2
3 he.c
4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
7
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
12
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22*/
23
24/*
25
26 he.c
27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
30
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
40
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48 NOTES:
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
56#include <linux/module.h>
57#include <linux/kernel.h>
58#include <linux/skbuff.h>
59#include <linux/pci.h>
60#include <linux/errno.h>
61#include <linux/types.h>
62#include <linux/string.h>
63#include <linux/delay.h>
64#include <linux/init.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/dma-mapping.h>
70#include <linux/bitmap.h>
71#include <linux/slab.h>
72#include <asm/io.h>
73#include <asm/byteorder.h>
74#include <asm/uaccess.h>
75
76#include <linux/atmdev.h>
77#include <linux/atm.h>
78#include <linux/sonet.h>
79
80#undef USE_SCATTERGATHER
81#undef USE_CHECKSUM_HW /* still confused about this */
82/* #undef HE_DEBUG */
83
84#include "he.h"
85#include "suni.h"
86#include <linux/atm_he.h>
87
88#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90#ifdef HE_DEBUG
91#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92#else /* !HE_DEBUG */
93#define HPRINTK(fmt,args...) do { } while (0)
94#endif /* HE_DEBUG */
95
96/* declarations */
97
98static int he_open(struct atm_vcc *vcc);
99static void he_close(struct atm_vcc *vcc);
100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102static irqreturn_t he_irq_handler(int irq, void *dev_id);
103static void he_tasklet(unsigned long data);
104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105static int he_start(struct atm_dev *dev);
106static void he_stop(struct he_dev *dev);
107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112/* globals */
113
114static struct he_dev *he_devs;
115static int disable64;
116static short nvpibits = -1;
117static short nvcibits = -1;
118static short rx_skb_reserve = 16;
119static int irq_coalesce = 1;
120static int sdh = 0;
121
122/* Read from EEPROM = 0000 0011b */
123static unsigned int readtab[] = {
124 CS_HIGH | CLK_HIGH,
125 CS_LOW | CLK_LOW,
126 CLK_HIGH, /* 0 */
127 CLK_LOW,
128 CLK_HIGH, /* 0 */
129 CLK_LOW,
130 CLK_HIGH, /* 0 */
131 CLK_LOW,
132 CLK_HIGH, /* 0 */
133 CLK_LOW,
134 CLK_HIGH, /* 0 */
135 CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW | SI_HIGH,
138 CLK_HIGH | SI_HIGH, /* 1 */
139 CLK_LOW | SI_HIGH,
140 CLK_HIGH | SI_HIGH /* 1 */
141};
142
143/* Clock to read from/write to the EEPROM */
144static unsigned int clocktab[] = {
145 CLK_LOW,
146 CLK_HIGH,
147 CLK_LOW,
148 CLK_HIGH,
149 CLK_LOW,
150 CLK_HIGH,
151 CLK_LOW,
152 CLK_HIGH,
153 CLK_LOW,
154 CLK_HIGH,
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW
162};
163
164static struct atmdev_ops he_ops =
165{
166 .open = he_open,
167 .close = he_close,
168 .ioctl = he_ioctl,
169 .send = he_send,
170 .phy_put = he_phy_put,
171 .phy_get = he_phy_get,
172 .proc_read = he_proc_read,
173 .owner = THIS_MODULE
174};
175
176#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177#define he_readl(dev, reg) readl((dev)->membase + (reg))
178
179/* section 2.12 connection memory access */
180
181static __inline__ void
182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 unsigned flags)
184{
185 he_writel(he_dev, val, CON_DAT);
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189}
190
191#define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194#define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197#define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200static unsigned
201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202{
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 return he_readl(he_dev, CON_DAT);
206}
207
208#define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
210
211#define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
213
214#define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218/* figure 2.2 connection id */
219
220#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222/* 2.5.1 per connection transmit state registers */
223
224#define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226#define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229#define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232#define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235#define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238#define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241 /* from page 2-20
242 *
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
247 */
248
249#define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 CON_CTL_TCM \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
255
256#define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259#define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262#define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265#define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269#define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272#define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275#define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278#define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282#define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285#define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289#define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292#define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 CON_CTL_TCM \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
298
299/* 2.7.1 per connection receive state registers */
300
301#define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303#define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306#define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309#define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312#define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315#define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318#define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321#define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324#define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327static __inline__ struct atm_vcc*
328__find_vcc(struct he_dev *he_dev, unsigned cid)
329{
330 struct hlist_head *head;
331 struct atm_vcc *vcc;
332 struct hlist_node *node;
333 struct sock *s;
334 short vpi;
335 int vci;
336
337 vpi = cid >> he_dev->vcibits;
338 vci = cid & ((1 << he_dev->vcibits) - 1);
339 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
340
341 sk_for_each(s, node, head) {
342 vcc = atm_sk(s);
343 if (vcc->dev == he_dev->atm_dev &&
344 vcc->vci == vci && vcc->vpi == vpi &&
345 vcc->qos.rxtp.traffic_class != ATM_NONE) {
346 return vcc;
347 }
348 }
349 return NULL;
350}
351
352static int __devinit
353he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
354{
355 struct atm_dev *atm_dev = NULL;
356 struct he_dev *he_dev = NULL;
357 int err = 0;
358
359 printk(KERN_INFO "ATM he driver\n");
360
361 if (pci_enable_device(pci_dev))
362 return -EIO;
363 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
364 printk(KERN_WARNING "he: no suitable dma available\n");
365 err = -EIO;
366 goto init_one_failure;
367 }
368
369 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
370 if (!atm_dev) {
371 err = -ENODEV;
372 goto init_one_failure;
373 }
374 pci_set_drvdata(pci_dev, atm_dev);
375
376 he_dev = kzalloc(sizeof(struct he_dev),
377 GFP_KERNEL);
378 if (!he_dev) {
379 err = -ENOMEM;
380 goto init_one_failure;
381 }
382 he_dev->pci_dev = pci_dev;
383 he_dev->atm_dev = atm_dev;
384 he_dev->atm_dev->dev_data = he_dev;
385 atm_dev->dev_data = he_dev;
386 he_dev->number = atm_dev->number;
387 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
388 spin_lock_init(&he_dev->global_lock);
389
390 if (he_start(atm_dev)) {
391 he_stop(he_dev);
392 err = -ENODEV;
393 goto init_one_failure;
394 }
395 he_dev->next = NULL;
396 if (he_devs)
397 he_dev->next = he_devs;
398 he_devs = he_dev;
399 return 0;
400
401init_one_failure:
402 if (atm_dev)
403 atm_dev_deregister(atm_dev);
404 kfree(he_dev);
405 pci_disable_device(pci_dev);
406 return err;
407}
408
409static void __devexit
410he_remove_one (struct pci_dev *pci_dev)
411{
412 struct atm_dev *atm_dev;
413 struct he_dev *he_dev;
414
415 atm_dev = pci_get_drvdata(pci_dev);
416 he_dev = HE_DEV(atm_dev);
417
418 /* need to remove from he_devs */
419
420 he_stop(he_dev);
421 atm_dev_deregister(atm_dev);
422 kfree(he_dev);
423
424 pci_set_drvdata(pci_dev, NULL);
425 pci_disable_device(pci_dev);
426}
427
428
429static unsigned
430rate_to_atmf(unsigned rate) /* cps to atm forum format */
431{
432#define NONZERO (1 << 14)
433
434 unsigned exp = 0;
435
436 if (rate == 0)
437 return 0;
438
439 rate <<= 9;
440 while (rate > 0x3ff) {
441 ++exp;
442 rate >>= 1;
443 }
444
445 return (NONZERO | (exp << 9) | (rate & 0x1ff));
446}
447
448static void __devinit
449he_init_rx_lbfp0(struct he_dev *he_dev)
450{
451 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
452 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
453 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
454 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
455
456 lbufd_index = 0;
457 lbm_offset = he_readl(he_dev, RCMLBM_BA);
458
459 he_writel(he_dev, lbufd_index, RLBF0_H);
460
461 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
462 lbufd_index += 2;
463 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
464
465 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
466 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
467
468 if (++lbuf_count == lbufs_per_row) {
469 lbuf_count = 0;
470 row_offset += he_dev->bytes_per_row;
471 }
472 lbm_offset += 4;
473 }
474
475 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
476 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
477}
478
479static void __devinit
480he_init_rx_lbfp1(struct he_dev *he_dev)
481{
482 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
483 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
484 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
485 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
486
487 lbufd_index = 1;
488 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
489
490 he_writel(he_dev, lbufd_index, RLBF1_H);
491
492 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
493 lbufd_index += 2;
494 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
495
496 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
497 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
498
499 if (++lbuf_count == lbufs_per_row) {
500 lbuf_count = 0;
501 row_offset += he_dev->bytes_per_row;
502 }
503 lbm_offset += 4;
504 }
505
506 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
507 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
508}
509
510static void __devinit
511he_init_tx_lbfp(struct he_dev *he_dev)
512{
513 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
514 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
515 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
516 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
517
518 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
519 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
520
521 he_writel(he_dev, lbufd_index, TLBF_H);
522
523 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
524 lbufd_index += 1;
525 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
526
527 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
528 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
529
530 if (++lbuf_count == lbufs_per_row) {
531 lbuf_count = 0;
532 row_offset += he_dev->bytes_per_row;
533 }
534 lbm_offset += 2;
535 }
536
537 he_writel(he_dev, lbufd_index - 1, TLBF_T);
538}
539
540static int __devinit
541he_init_tpdrq(struct he_dev *he_dev)
542{
543 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
544 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
545 if (he_dev->tpdrq_base == NULL) {
546 hprintk("failed to alloc tpdrq\n");
547 return -ENOMEM;
548 }
549 memset(he_dev->tpdrq_base, 0,
550 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
551
552 he_dev->tpdrq_tail = he_dev->tpdrq_base;
553 he_dev->tpdrq_head = he_dev->tpdrq_base;
554
555 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
556 he_writel(he_dev, 0, TPDRQ_T);
557 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
558
559 return 0;
560}
561
562static void __devinit
563he_init_cs_block(struct he_dev *he_dev)
564{
565 unsigned clock, rate, delta;
566 int reg;
567
568 /* 5.1.7 cs block initialization */
569
570 for (reg = 0; reg < 0x20; ++reg)
571 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
572
573 /* rate grid timer reload values */
574
575 clock = he_is622(he_dev) ? 66667000 : 50000000;
576 rate = he_dev->atm_dev->link_rate;
577 delta = rate / 16 / 2;
578
579 for (reg = 0; reg < 0x10; ++reg) {
580 /* 2.4 internal transmit function
581 *
582 * we initialize the first row in the rate grid.
583 * values are period (in clock cycles) of timer
584 */
585 unsigned period = clock / rate;
586
587 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
588 rate -= delta;
589 }
590
591 if (he_is622(he_dev)) {
592 /* table 5.2 (4 cells per lbuf) */
593 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
594 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
595 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
596 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
597 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
598
599 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
600 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
601 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
602 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
603 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
604 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
605 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
606
607 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
608
609 /* table 5.8 */
610 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
611 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
612 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
613 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
614 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
615 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
616
617 /* table 5.9 */
618 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
619 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
620 } else {
621 /* table 5.1 (4 cells per lbuf) */
622 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
623 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
624 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
625 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
626 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
627
628 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
629 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
630 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
631 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
632 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
633 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
634 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
635
636 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
637
638 /* table 5.8 */
639 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
640 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
641 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
642 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
643 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
644 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
645
646 /* table 5.9 */
647 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
648 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
649 }
650
651 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
652
653 for (reg = 0; reg < 0x8; ++reg)
654 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
655
656}
657
658static int __devinit
659he_init_cs_block_rcm(struct he_dev *he_dev)
660{
661 unsigned (*rategrid)[16][16];
662 unsigned rate, delta;
663 int i, j, reg;
664
665 unsigned rate_atmf, exp, man;
666 unsigned long long rate_cps;
667 int mult, buf, buf_limit = 4;
668
669 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
670 if (!rategrid)
671 return -ENOMEM;
672
673 /* initialize rate grid group table */
674
675 for (reg = 0x0; reg < 0xff; ++reg)
676 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
677
678 /* initialize rate controller groups */
679
680 for (reg = 0x100; reg < 0x1ff; ++reg)
681 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
682
683 /* initialize tNrm lookup table */
684
685 /* the manual makes reference to a routine in a sample driver
686 for proper configuration; fortunately, we only need this
687 in order to support abr connection */
688
689 /* initialize rate to group table */
690
691 rate = he_dev->atm_dev->link_rate;
692 delta = rate / 32;
693
694 /*
695 * 2.4 transmit internal functions
696 *
697 * we construct a copy of the rate grid used by the scheduler
698 * in order to construct the rate to group table below
699 */
700
701 for (j = 0; j < 16; j++) {
702 (*rategrid)[0][j] = rate;
703 rate -= delta;
704 }
705
706 for (i = 1; i < 16; i++)
707 for (j = 0; j < 16; j++)
708 if (i > 14)
709 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
710 else
711 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
712
713 /*
714 * 2.4 transmit internal function
715 *
716 * this table maps the upper 5 bits of exponent and mantissa
717 * of the atm forum representation of the rate into an index
718 * on rate grid
719 */
720
721 rate_atmf = 0;
722 while (rate_atmf < 0x400) {
723 man = (rate_atmf & 0x1f) << 4;
724 exp = rate_atmf >> 5;
725
726 /*
727 instead of '/ 512', use '>> 9' to prevent a call
728 to divdu3 on x86 platforms
729 */
730 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
731
732 if (rate_cps < 10)
733 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
734
735 for (i = 255; i > 0; i--)
736 if ((*rategrid)[i/16][i%16] >= rate_cps)
737 break; /* pick nearest rate instead? */
738
739 /*
740 * each table entry is 16 bits: (rate grid index (8 bits)
741 * and a buffer limit (8 bits)
742 * there are two table entries in each 32-bit register
743 */
744
745#ifdef notdef
746 buf = rate_cps * he_dev->tx_numbuffs /
747 (he_dev->atm_dev->link_rate * 2);
748#else
749 /* this is pretty, but avoids _divdu3 and is mostly correct */
750 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
751 if (rate_cps > (272 * mult))
752 buf = 4;
753 else if (rate_cps > (204 * mult))
754 buf = 3;
755 else if (rate_cps > (136 * mult))
756 buf = 2;
757 else if (rate_cps > (68 * mult))
758 buf = 1;
759 else
760 buf = 0;
761#endif
762 if (buf > buf_limit)
763 buf = buf_limit;
764 reg = (reg << 16) | ((i << 8) | buf);
765
766#define RTGTBL_OFFSET 0x400
767
768 if (rate_atmf & 0x1)
769 he_writel_rcm(he_dev, reg,
770 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
771
772 ++rate_atmf;
773 }
774
775 kfree(rategrid);
776 return 0;
777}
778
779static int __devinit
780he_init_group(struct he_dev *he_dev, int group)
781{
782 struct he_buff *heb, *next;
783 dma_addr_t mapping;
784 int i;
785
786 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
787 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
788 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
789 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
790 G0_RBPS_BS + (group * 32));
791
792 /* bitmap table */
793 he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
794 * sizeof(unsigned long), GFP_KERNEL);
795 if (!he_dev->rbpl_table) {
796 hprintk("unable to allocate rbpl bitmap table\n");
797 return -ENOMEM;
798 }
799 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
800
801 /* rbpl_virt 64-bit pointers */
802 he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
803 * sizeof(struct he_buff *), GFP_KERNEL);
804 if (!he_dev->rbpl_virt) {
805 hprintk("unable to allocate rbpl virt table\n");
806 goto out_free_rbpl_table;
807 }
808
809 /* large buffer pool */
810 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
811 CONFIG_RBPL_BUFSIZE, 64, 0);
812 if (he_dev->rbpl_pool == NULL) {
813 hprintk("unable to create rbpl pool\n");
814 goto out_free_rbpl_virt;
815 }
816
817 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
818 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
819 if (he_dev->rbpl_base == NULL) {
820 hprintk("failed to alloc rbpl_base\n");
821 goto out_destroy_rbpl_pool;
822 }
823 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
824
825 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
826
827 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
828
829 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
830 if (!heb)
831 goto out_free_rbpl;
832 heb->mapping = mapping;
833 list_add(&heb->entry, &he_dev->rbpl_outstanding);
834
835 set_bit(i, he_dev->rbpl_table);
836 he_dev->rbpl_virt[i] = heb;
837 he_dev->rbpl_hint = i + 1;
838 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
839 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
840 }
841 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
842
843 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
844 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
845 G0_RBPL_T + (group * 32));
846 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
847 G0_RBPL_BS + (group * 32));
848 he_writel(he_dev,
849 RBP_THRESH(CONFIG_RBPL_THRESH) |
850 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
851 RBP_INT_ENB,
852 G0_RBPL_QI + (group * 32));
853
854 /* rx buffer ready queue */
855
856 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
857 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
858 if (he_dev->rbrq_base == NULL) {
859 hprintk("failed to allocate rbrq\n");
860 goto out_free_rbpl;
861 }
862 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
863
864 he_dev->rbrq_head = he_dev->rbrq_base;
865 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
866 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
867 he_writel(he_dev,
868 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
869 G0_RBRQ_Q + (group * 16));
870 if (irq_coalesce) {
871 hprintk("coalescing interrupts\n");
872 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
873 G0_RBRQ_I + (group * 16));
874 } else
875 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
876 G0_RBRQ_I + (group * 16));
877
878 /* tx buffer ready queue */
879
880 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
881 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
882 if (he_dev->tbrq_base == NULL) {
883 hprintk("failed to allocate tbrq\n");
884 goto out_free_rbpq_base;
885 }
886 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
887
888 he_dev->tbrq_head = he_dev->tbrq_base;
889
890 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
891 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
892 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
893 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
894
895 return 0;
896
897out_free_rbpq_base:
898 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
899 sizeof(struct he_rbrq), he_dev->rbrq_base,
900 he_dev->rbrq_phys);
901out_free_rbpl:
902 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
903 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
904
905 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
906 sizeof(struct he_rbp), he_dev->rbpl_base,
907 he_dev->rbpl_phys);
908out_destroy_rbpl_pool:
909 pci_pool_destroy(he_dev->rbpl_pool);
910out_free_rbpl_virt:
911 kfree(he_dev->rbpl_virt);
912out_free_rbpl_table:
913 kfree(he_dev->rbpl_table);
914
915 return -ENOMEM;
916}
917
918static int __devinit
919he_init_irq(struct he_dev *he_dev)
920{
921 int i;
922
923 /* 2.9.3.5 tail offset for each interrupt queue is located after the
924 end of the interrupt queue */
925
926 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
927 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
928 if (he_dev->irq_base == NULL) {
929 hprintk("failed to allocate irq\n");
930 return -ENOMEM;
931 }
932 he_dev->irq_tailoffset = (unsigned *)
933 &he_dev->irq_base[CONFIG_IRQ_SIZE];
934 *he_dev->irq_tailoffset = 0;
935 he_dev->irq_head = he_dev->irq_base;
936 he_dev->irq_tail = he_dev->irq_base;
937
938 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
939 he_dev->irq_base[i].isw = ITYPE_INVALID;
940
941 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
942 he_writel(he_dev,
943 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
944 IRQ0_HEAD);
945 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
946 he_writel(he_dev, 0x0, IRQ0_DATA);
947
948 he_writel(he_dev, 0x0, IRQ1_BASE);
949 he_writel(he_dev, 0x0, IRQ1_HEAD);
950 he_writel(he_dev, 0x0, IRQ1_CNTL);
951 he_writel(he_dev, 0x0, IRQ1_DATA);
952
953 he_writel(he_dev, 0x0, IRQ2_BASE);
954 he_writel(he_dev, 0x0, IRQ2_HEAD);
955 he_writel(he_dev, 0x0, IRQ2_CNTL);
956 he_writel(he_dev, 0x0, IRQ2_DATA);
957
958 he_writel(he_dev, 0x0, IRQ3_BASE);
959 he_writel(he_dev, 0x0, IRQ3_HEAD);
960 he_writel(he_dev, 0x0, IRQ3_CNTL);
961 he_writel(he_dev, 0x0, IRQ3_DATA);
962
963 /* 2.9.3.2 interrupt queue mapping registers */
964
965 he_writel(he_dev, 0x0, GRP_10_MAP);
966 he_writel(he_dev, 0x0, GRP_32_MAP);
967 he_writel(he_dev, 0x0, GRP_54_MAP);
968 he_writel(he_dev, 0x0, GRP_76_MAP);
969
970 if (request_irq(he_dev->pci_dev->irq,
971 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
972 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
973 return -EINVAL;
974 }
975
976 he_dev->irq = he_dev->pci_dev->irq;
977
978 return 0;
979}
980
981static int __devinit
982he_start(struct atm_dev *dev)
983{
984 struct he_dev *he_dev;
985 struct pci_dev *pci_dev;
986 unsigned long membase;
987
988 u16 command;
989 u32 gen_cntl_0, host_cntl, lb_swap;
990 u8 cache_size, timer;
991
992 unsigned err;
993 unsigned int status, reg;
994 int i, group;
995
996 he_dev = HE_DEV(dev);
997 pci_dev = he_dev->pci_dev;
998
999 membase = pci_resource_start(pci_dev, 0);
1000 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1001
1002 /*
1003 * pci bus controller initialization
1004 */
1005
1006 /* 4.3 pci bus controller-specific initialization */
1007 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1008 hprintk("can't read GEN_CNTL_0\n");
1009 return -EINVAL;
1010 }
1011 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1012 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1013 hprintk("can't write GEN_CNTL_0.\n");
1014 return -EINVAL;
1015 }
1016
1017 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1018 hprintk("can't read PCI_COMMAND.\n");
1019 return -EINVAL;
1020 }
1021
1022 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1023 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1024 hprintk("can't enable memory.\n");
1025 return -EINVAL;
1026 }
1027
1028 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1029 hprintk("can't read cache line size?\n");
1030 return -EINVAL;
1031 }
1032
1033 if (cache_size < 16) {
1034 cache_size = 16;
1035 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1036 hprintk("can't set cache line size to %d\n", cache_size);
1037 }
1038
1039 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1040 hprintk("can't read latency timer?\n");
1041 return -EINVAL;
1042 }
1043
1044 /* from table 3.9
1045 *
1046 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1047 *
1048 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1049 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1050 *
1051 */
1052#define LAT_TIMER 209
1053 if (timer < LAT_TIMER) {
1054 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1055 timer = LAT_TIMER;
1056 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1057 hprintk("can't set latency timer to %d\n", timer);
1058 }
1059
1060 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1061 hprintk("can't set up page mapping\n");
1062 return -EINVAL;
1063 }
1064
1065 /* 4.4 card reset */
1066 he_writel(he_dev, 0x0, RESET_CNTL);
1067 he_writel(he_dev, 0xff, RESET_CNTL);
1068
1069 udelay(16*1000); /* 16 ms */
1070 status = he_readl(he_dev, RESET_CNTL);
1071 if ((status & BOARD_RST_STATUS) == 0) {
1072 hprintk("reset failed\n");
1073 return -EINVAL;
1074 }
1075
1076 /* 4.5 set bus width */
1077 host_cntl = he_readl(he_dev, HOST_CNTL);
1078 if (host_cntl & PCI_BUS_SIZE64)
1079 gen_cntl_0 |= ENBL_64;
1080 else
1081 gen_cntl_0 &= ~ENBL_64;
1082
1083 if (disable64 == 1) {
1084 hprintk("disabling 64-bit pci bus transfers\n");
1085 gen_cntl_0 &= ~ENBL_64;
1086 }
1087
1088 if (gen_cntl_0 & ENBL_64)
1089 hprintk("64-bit transfers enabled\n");
1090
1091 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1092
1093 /* 4.7 read prom contents */
1094 for (i = 0; i < PROD_ID_LEN; ++i)
1095 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1096
1097 he_dev->media = read_prom_byte(he_dev, MEDIA);
1098
1099 for (i = 0; i < 6; ++i)
1100 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1101
1102 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1103 he_dev->prod_id,
1104 he_dev->media & 0x40 ? "SM" : "MM",
1105 dev->esi[0],
1106 dev->esi[1],
1107 dev->esi[2],
1108 dev->esi[3],
1109 dev->esi[4],
1110 dev->esi[5]);
1111 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1112 ATM_OC12_PCR : ATM_OC3_PCR;
1113
1114 /* 4.6 set host endianess */
1115 lb_swap = he_readl(he_dev, LB_SWAP);
1116 if (he_is622(he_dev))
1117 lb_swap &= ~XFER_SIZE; /* 4 cells */
1118 else
1119 lb_swap |= XFER_SIZE; /* 8 cells */
1120#ifdef __BIG_ENDIAN
1121 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1122#else
1123 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1124 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1125#endif /* __BIG_ENDIAN */
1126 he_writel(he_dev, lb_swap, LB_SWAP);
1127
1128 /* 4.8 sdram controller initialization */
1129 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1130
1131 /* 4.9 initialize rnum value */
1132 lb_swap |= SWAP_RNUM_MAX(0xf);
1133 he_writel(he_dev, lb_swap, LB_SWAP);
1134
1135 /* 4.10 initialize the interrupt queues */
1136 if ((err = he_init_irq(he_dev)) != 0)
1137 return err;
1138
1139 /* 4.11 enable pci bus controller state machines */
1140 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1141 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1142 he_writel(he_dev, host_cntl, HOST_CNTL);
1143
1144 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1145 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1146
1147 /*
1148 * atm network controller initialization
1149 */
1150
1151 /* 5.1.1 generic configuration state */
1152
1153 /*
1154 * local (cell) buffer memory map
1155 *
1156 * HE155 HE622
1157 *
1158 * 0 ____________1023 bytes 0 _______________________2047 bytes
1159 * | | | | |
1160 * | utility | | rx0 | |
1161 * 5|____________| 255|___________________| u |
1162 * 6| | 256| | t |
1163 * | | | | i |
1164 * | rx0 | row | tx | l |
1165 * | | | | i |
1166 * | | 767|___________________| t |
1167 * 517|____________| 768| | y |
1168 * row 518| | | rx1 | |
1169 * | | 1023|___________________|___|
1170 * | |
1171 * | tx |
1172 * | |
1173 * | |
1174 * 1535|____________|
1175 * 1536| |
1176 * | rx1 |
1177 * 2047|____________|
1178 *
1179 */
1180
1181 /* total 4096 connections */
1182 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1183 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1184
1185 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1186 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1187 return -ENODEV;
1188 }
1189
1190 if (nvpibits != -1) {
1191 he_dev->vpibits = nvpibits;
1192 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1193 }
1194
1195 if (nvcibits != -1) {
1196 he_dev->vcibits = nvcibits;
1197 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1198 }
1199
1200
1201 if (he_is622(he_dev)) {
1202 he_dev->cells_per_row = 40;
1203 he_dev->bytes_per_row = 2048;
1204 he_dev->r0_numrows = 256;
1205 he_dev->tx_numrows = 512;
1206 he_dev->r1_numrows = 256;
1207 he_dev->r0_startrow = 0;
1208 he_dev->tx_startrow = 256;
1209 he_dev->r1_startrow = 768;
1210 } else {
1211 he_dev->cells_per_row = 20;
1212 he_dev->bytes_per_row = 1024;
1213 he_dev->r0_numrows = 512;
1214 he_dev->tx_numrows = 1018;
1215 he_dev->r1_numrows = 512;
1216 he_dev->r0_startrow = 6;
1217 he_dev->tx_startrow = 518;
1218 he_dev->r1_startrow = 1536;
1219 }
1220
1221 he_dev->cells_per_lbuf = 4;
1222 he_dev->buffer_limit = 4;
1223 he_dev->r0_numbuffs = he_dev->r0_numrows *
1224 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1225 if (he_dev->r0_numbuffs > 2560)
1226 he_dev->r0_numbuffs = 2560;
1227
1228 he_dev->r1_numbuffs = he_dev->r1_numrows *
1229 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1230 if (he_dev->r1_numbuffs > 2560)
1231 he_dev->r1_numbuffs = 2560;
1232
1233 he_dev->tx_numbuffs = he_dev->tx_numrows *
1234 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1235 if (he_dev->tx_numbuffs > 5120)
1236 he_dev->tx_numbuffs = 5120;
1237
1238 /* 5.1.2 configure hardware dependent registers */
1239
1240 he_writel(he_dev,
1241 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1242 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1243 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1244 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1245 LBARB);
1246
1247 he_writel(he_dev, BANK_ON |
1248 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1249 SDRAMCON);
1250
1251 he_writel(he_dev,
1252 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1253 RM_RW_WAIT(1), RCMCONFIG);
1254 he_writel(he_dev,
1255 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1256 TM_RW_WAIT(1), TCMCONFIG);
1257
1258 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1259
1260 he_writel(he_dev,
1261 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1262 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1263 RX_VALVP(he_dev->vpibits) |
1264 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1265
1266 he_writel(he_dev, DRF_THRESH(0x20) |
1267 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1268 TX_VCI_MASK(he_dev->vcibits) |
1269 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1270
1271 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1272
1273 he_writel(he_dev, PHY_INT_ENB |
1274 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1275 RH_CONFIG);
1276
1277 /* 5.1.3 initialize connection memory */
1278
1279 for (i = 0; i < TCM_MEM_SIZE; ++i)
1280 he_writel_tcm(he_dev, 0, i);
1281
1282 for (i = 0; i < RCM_MEM_SIZE; ++i)
1283 he_writel_rcm(he_dev, 0, i);
1284
1285 /*
1286 * transmit connection memory map
1287 *
1288 * tx memory
1289 * 0x0 ___________________
1290 * | |
1291 * | |
1292 * | TSRa |
1293 * | |
1294 * | |
1295 * 0x8000|___________________|
1296 * | |
1297 * | TSRb |
1298 * 0xc000|___________________|
1299 * | |
1300 * | TSRc |
1301 * 0xe000|___________________|
1302 * | TSRd |
1303 * 0xf000|___________________|
1304 * | tmABR |
1305 * 0x10000|___________________|
1306 * | |
1307 * | tmTPD |
1308 * |___________________|
1309 * | |
1310 * ....
1311 * 0x1ffff|___________________|
1312 *
1313 *
1314 */
1315
1316 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1317 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1318 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1319 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1320 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1321
1322
1323 /*
1324 * receive connection memory map
1325 *
1326 * 0x0 ___________________
1327 * | |
1328 * | |
1329 * | RSRa |
1330 * | |
1331 * | |
1332 * 0x8000|___________________|
1333 * | |
1334 * | rx0/1 |
1335 * | LBM | link lists of local
1336 * | tx | buffer memory
1337 * | |
1338 * 0xd000|___________________|
1339 * | |
1340 * | rmABR |
1341 * 0xe000|___________________|
1342 * | |
1343 * | RSRb |
1344 * |___________________|
1345 * | |
1346 * ....
1347 * 0xffff|___________________|
1348 */
1349
1350 he_writel(he_dev, 0x08000, RCMLBM_BA);
1351 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1352 he_writel(he_dev, 0x0d800, RCMABR_BA);
1353
1354 /* 5.1.4 initialize local buffer free pools linked lists */
1355
1356 he_init_rx_lbfp0(he_dev);
1357 he_init_rx_lbfp1(he_dev);
1358
1359 he_writel(he_dev, 0x0, RLBC_H);
1360 he_writel(he_dev, 0x0, RLBC_T);
1361 he_writel(he_dev, 0x0, RLBC_H2);
1362
1363 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1364 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1365
1366 he_init_tx_lbfp(he_dev);
1367
1368 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1369
1370 /* 5.1.5 initialize intermediate receive queues */
1371
1372 if (he_is622(he_dev)) {
1373 he_writel(he_dev, 0x000f, G0_INMQ_S);
1374 he_writel(he_dev, 0x200f, G0_INMQ_L);
1375
1376 he_writel(he_dev, 0x001f, G1_INMQ_S);
1377 he_writel(he_dev, 0x201f, G1_INMQ_L);
1378
1379 he_writel(he_dev, 0x002f, G2_INMQ_S);
1380 he_writel(he_dev, 0x202f, G2_INMQ_L);
1381
1382 he_writel(he_dev, 0x003f, G3_INMQ_S);
1383 he_writel(he_dev, 0x203f, G3_INMQ_L);
1384
1385 he_writel(he_dev, 0x004f, G4_INMQ_S);
1386 he_writel(he_dev, 0x204f, G4_INMQ_L);
1387
1388 he_writel(he_dev, 0x005f, G5_INMQ_S);
1389 he_writel(he_dev, 0x205f, G5_INMQ_L);
1390
1391 he_writel(he_dev, 0x006f, G6_INMQ_S);
1392 he_writel(he_dev, 0x206f, G6_INMQ_L);
1393
1394 he_writel(he_dev, 0x007f, G7_INMQ_S);
1395 he_writel(he_dev, 0x207f, G7_INMQ_L);
1396 } else {
1397 he_writel(he_dev, 0x0000, G0_INMQ_S);
1398 he_writel(he_dev, 0x0008, G0_INMQ_L);
1399
1400 he_writel(he_dev, 0x0001, G1_INMQ_S);
1401 he_writel(he_dev, 0x0009, G1_INMQ_L);
1402
1403 he_writel(he_dev, 0x0002, G2_INMQ_S);
1404 he_writel(he_dev, 0x000a, G2_INMQ_L);
1405
1406 he_writel(he_dev, 0x0003, G3_INMQ_S);
1407 he_writel(he_dev, 0x000b, G3_INMQ_L);
1408
1409 he_writel(he_dev, 0x0004, G4_INMQ_S);
1410 he_writel(he_dev, 0x000c, G4_INMQ_L);
1411
1412 he_writel(he_dev, 0x0005, G5_INMQ_S);
1413 he_writel(he_dev, 0x000d, G5_INMQ_L);
1414
1415 he_writel(he_dev, 0x0006, G6_INMQ_S);
1416 he_writel(he_dev, 0x000e, G6_INMQ_L);
1417
1418 he_writel(he_dev, 0x0007, G7_INMQ_S);
1419 he_writel(he_dev, 0x000f, G7_INMQ_L);
1420 }
1421
1422 /* 5.1.6 application tunable parameters */
1423
1424 he_writel(he_dev, 0x0, MCC);
1425 he_writel(he_dev, 0x0, OEC);
1426 he_writel(he_dev, 0x0, DCC);
1427 he_writel(he_dev, 0x0, CEC);
1428
1429 /* 5.1.7 cs block initialization */
1430
1431 he_init_cs_block(he_dev);
1432
1433 /* 5.1.8 cs block connection memory initialization */
1434
1435 if (he_init_cs_block_rcm(he_dev) < 0)
1436 return -ENOMEM;
1437
1438 /* 5.1.10 initialize host structures */
1439
1440 he_init_tpdrq(he_dev);
1441
1442 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1443 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1444 if (he_dev->tpd_pool == NULL) {
1445 hprintk("unable to create tpd pci_pool\n");
1446 return -ENOMEM;
1447 }
1448
1449 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1450
1451 if (he_init_group(he_dev, 0) != 0)
1452 return -ENOMEM;
1453
1454 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1455 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1456 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1457 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1458 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1459 G0_RBPS_BS + (group * 32));
1460
1461 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1462 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1463 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1464 G0_RBPL_QI + (group * 32));
1465 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1466
1467 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1468 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1469 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1470 G0_RBRQ_Q + (group * 16));
1471 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1472
1473 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1474 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1475 he_writel(he_dev, TBRQ_THRESH(0x1),
1476 G0_TBRQ_THRESH + (group * 16));
1477 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1478 }
1479
1480 /* host status page */
1481
1482 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1483 sizeof(struct he_hsp), &he_dev->hsp_phys);
1484 if (he_dev->hsp == NULL) {
1485 hprintk("failed to allocate host status page\n");
1486 return -ENOMEM;
1487 }
1488 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1489 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1490
1491 /* initialize framer */
1492
1493#ifdef CONFIG_ATM_HE_USE_SUNI
1494 if (he_isMM(he_dev))
1495 suni_init(he_dev->atm_dev);
1496 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1497 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1498#endif /* CONFIG_ATM_HE_USE_SUNI */
1499
1500 if (sdh) {
1501 /* this really should be in suni.c but for now... */
1502 int val;
1503
1504 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1505 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1506 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1507 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1508 }
1509
1510 /* 5.1.12 enable transmit and receive */
1511
1512 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1513 reg |= TX_ENABLE|ER_ENABLE;
1514 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1515
1516 reg = he_readl(he_dev, RC_CONFIG);
1517 reg |= RX_ENABLE;
1518 he_writel(he_dev, reg, RC_CONFIG);
1519
1520 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1521 he_dev->cs_stper[i].inuse = 0;
1522 he_dev->cs_stper[i].pcr = -1;
1523 }
1524 he_dev->total_bw = 0;
1525
1526
1527 /* atm linux initialization */
1528
1529 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1530 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1531
1532 he_dev->irq_peak = 0;
1533 he_dev->rbrq_peak = 0;
1534 he_dev->rbpl_peak = 0;
1535 he_dev->tbrq_peak = 0;
1536
1537 HPRINTK("hell bent for leather!\n");
1538
1539 return 0;
1540}
1541
1542static void
1543he_stop(struct he_dev *he_dev)
1544{
1545 struct he_buff *heb, *next;
1546 struct pci_dev *pci_dev;
1547 u32 gen_cntl_0, reg;
1548 u16 command;
1549
1550 pci_dev = he_dev->pci_dev;
1551
1552 /* disable interrupts */
1553
1554 if (he_dev->membase) {
1555 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1556 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1557 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1558
1559 tasklet_disable(&he_dev->tasklet);
1560
1561 /* disable recv and transmit */
1562
1563 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1564 reg &= ~(TX_ENABLE|ER_ENABLE);
1565 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1566
1567 reg = he_readl(he_dev, RC_CONFIG);
1568 reg &= ~(RX_ENABLE);
1569 he_writel(he_dev, reg, RC_CONFIG);
1570 }
1571
1572#ifdef CONFIG_ATM_HE_USE_SUNI
1573 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1574 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1575#endif /* CONFIG_ATM_HE_USE_SUNI */
1576
1577 if (he_dev->irq)
1578 free_irq(he_dev->irq, he_dev);
1579
1580 if (he_dev->irq_base)
1581 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1582 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1583
1584 if (he_dev->hsp)
1585 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1586 he_dev->hsp, he_dev->hsp_phys);
1587
1588 if (he_dev->rbpl_base) {
1589 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1590 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1591
1592 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1593 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1594 }
1595
1596 kfree(he_dev->rbpl_virt);
1597 kfree(he_dev->rbpl_table);
1598
1599 if (he_dev->rbpl_pool)
1600 pci_pool_destroy(he_dev->rbpl_pool);
1601
1602 if (he_dev->rbrq_base)
1603 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1604 he_dev->rbrq_base, he_dev->rbrq_phys);
1605
1606 if (he_dev->tbrq_base)
1607 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1608 he_dev->tbrq_base, he_dev->tbrq_phys);
1609
1610 if (he_dev->tpdrq_base)
1611 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1612 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1613
1614 if (he_dev->tpd_pool)
1615 pci_pool_destroy(he_dev->tpd_pool);
1616
1617 if (he_dev->pci_dev) {
1618 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1619 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1620 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1621 }
1622
1623 if (he_dev->membase)
1624 iounmap(he_dev->membase);
1625}
1626
1627static struct he_tpd *
1628__alloc_tpd(struct he_dev *he_dev)
1629{
1630 struct he_tpd *tpd;
1631 dma_addr_t mapping;
1632
1633 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1634 if (tpd == NULL)
1635 return NULL;
1636
1637 tpd->status = TPD_ADDR(mapping);
1638 tpd->reserved = 0;
1639 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1640 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1641 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1642
1643 return tpd;
1644}
1645
1646#define AAL5_LEN(buf,len) \
1647 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1648 (((unsigned char *)(buf))[(len)-5]))
1649
1650/* 2.10.1.2 receive
1651 *
1652 * aal5 packets can optionally return the tcp checksum in the lower
1653 * 16 bits of the crc (RSR0_TCP_CKSUM)
1654 */
1655
1656#define TCP_CKSUM(buf,len) \
1657 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1658 (((unsigned char *)(buf))[(len-1)]))
1659
1660static int
1661he_service_rbrq(struct he_dev *he_dev, int group)
1662{
1663 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1664 ((unsigned long)he_dev->rbrq_base |
1665 he_dev->hsp->group[group].rbrq_tail);
1666 unsigned cid, lastcid = -1;
1667 struct sk_buff *skb;
1668 struct atm_vcc *vcc = NULL;
1669 struct he_vcc *he_vcc;
1670 struct he_buff *heb, *next;
1671 int i;
1672 int pdus_assembled = 0;
1673 int updated = 0;
1674
1675 read_lock(&vcc_sklist_lock);
1676 while (he_dev->rbrq_head != rbrq_tail) {
1677 ++updated;
1678
1679 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1680 he_dev->rbrq_head, group,
1681 RBRQ_ADDR(he_dev->rbrq_head),
1682 RBRQ_BUFLEN(he_dev->rbrq_head),
1683 RBRQ_CID(he_dev->rbrq_head),
1684 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1685 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1686 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1687 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1688 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1689 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1690
1691 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1692 heb = he_dev->rbpl_virt[i];
1693
1694 cid = RBRQ_CID(he_dev->rbrq_head);
1695 if (cid != lastcid)
1696 vcc = __find_vcc(he_dev, cid);
1697 lastcid = cid;
1698
1699 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1700 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1701 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1702 clear_bit(i, he_dev->rbpl_table);
1703 list_del(&heb->entry);
1704 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1705 }
1706
1707 goto next_rbrq_entry;
1708 }
1709
1710 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1711 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1712 atomic_inc(&vcc->stats->rx_drop);
1713 goto return_host_buffers;
1714 }
1715
1716 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1717 clear_bit(i, he_dev->rbpl_table);
1718 list_move_tail(&heb->entry, &he_vcc->buffers);
1719 he_vcc->pdu_len += heb->len;
1720
1721 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1722 lastcid = -1;
1723 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1724 wake_up(&he_vcc->rx_waitq);
1725 goto return_host_buffers;
1726 }
1727
1728 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1729 goto next_rbrq_entry;
1730
1731 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1732 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1733 HPRINTK("%s%s (%d.%d)\n",
1734 RBRQ_CRC_ERR(he_dev->rbrq_head)
1735 ? "CRC_ERR " : "",
1736 RBRQ_LEN_ERR(he_dev->rbrq_head)
1737 ? "LEN_ERR" : "",
1738 vcc->vpi, vcc->vci);
1739 atomic_inc(&vcc->stats->rx_err);
1740 goto return_host_buffers;
1741 }
1742
1743 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1744 GFP_ATOMIC);
1745 if (!skb) {
1746 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1747 goto return_host_buffers;
1748 }
1749
1750 if (rx_skb_reserve > 0)
1751 skb_reserve(skb, rx_skb_reserve);
1752
1753 __net_timestamp(skb);
1754
1755 list_for_each_entry(heb, &he_vcc->buffers, entry)
1756 memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1757
1758 switch (vcc->qos.aal) {
1759 case ATM_AAL0:
1760 /* 2.10.1.5 raw cell receive */
1761 skb->len = ATM_AAL0_SDU;
1762 skb_set_tail_pointer(skb, skb->len);
1763 break;
1764 case ATM_AAL5:
1765 /* 2.10.1.2 aal5 receive */
1766
1767 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1768 skb_set_tail_pointer(skb, skb->len);
1769#ifdef USE_CHECKSUM_HW
1770 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1771 skb->ip_summed = CHECKSUM_COMPLETE;
1772 skb->csum = TCP_CKSUM(skb->data,
1773 he_vcc->pdu_len);
1774 }
1775#endif
1776 break;
1777 }
1778
1779#ifdef should_never_happen
1780 if (skb->len > vcc->qos.rxtp.max_sdu)
1781 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1782#endif
1783
1784#ifdef notdef
1785 ATM_SKB(skb)->vcc = vcc;
1786#endif
1787 spin_unlock(&he_dev->global_lock);
1788 vcc->push(vcc, skb);
1789 spin_lock(&he_dev->global_lock);
1790
1791 atomic_inc(&vcc->stats->rx);
1792
1793return_host_buffers:
1794 ++pdus_assembled;
1795
1796 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1797 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1798 INIT_LIST_HEAD(&he_vcc->buffers);
1799 he_vcc->pdu_len = 0;
1800
1801next_rbrq_entry:
1802 he_dev->rbrq_head = (struct he_rbrq *)
1803 ((unsigned long) he_dev->rbrq_base |
1804 RBRQ_MASK(he_dev->rbrq_head + 1));
1805
1806 }
1807 read_unlock(&vcc_sklist_lock);
1808
1809 if (updated) {
1810 if (updated > he_dev->rbrq_peak)
1811 he_dev->rbrq_peak = updated;
1812
1813 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1814 G0_RBRQ_H + (group * 16));
1815 }
1816
1817 return pdus_assembled;
1818}
1819
1820static void
1821he_service_tbrq(struct he_dev *he_dev, int group)
1822{
1823 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1824 ((unsigned long)he_dev->tbrq_base |
1825 he_dev->hsp->group[group].tbrq_tail);
1826 struct he_tpd *tpd;
1827 int slot, updated = 0;
1828 struct he_tpd *__tpd;
1829
1830 /* 2.1.6 transmit buffer return queue */
1831
1832 while (he_dev->tbrq_head != tbrq_tail) {
1833 ++updated;
1834
1835 HPRINTK("tbrq%d 0x%x%s%s\n",
1836 group,
1837 TBRQ_TPD(he_dev->tbrq_head),
1838 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1839 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1840 tpd = NULL;
1841 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1842 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1843 tpd = __tpd;
1844 list_del(&__tpd->entry);
1845 break;
1846 }
1847 }
1848
1849 if (tpd == NULL) {
1850 hprintk("unable to locate tpd for dma buffer %x\n",
1851 TBRQ_TPD(he_dev->tbrq_head));
1852 goto next_tbrq_entry;
1853 }
1854
1855 if (TBRQ_EOS(he_dev->tbrq_head)) {
1856 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1857 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1858 if (tpd->vcc)
1859 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1860
1861 goto next_tbrq_entry;
1862 }
1863
1864 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1865 if (tpd->iovec[slot].addr)
1866 pci_unmap_single(he_dev->pci_dev,
1867 tpd->iovec[slot].addr,
1868 tpd->iovec[slot].len & TPD_LEN_MASK,
1869 PCI_DMA_TODEVICE);
1870 if (tpd->iovec[slot].len & TPD_LST)
1871 break;
1872
1873 }
1874
1875 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1876 if (tpd->vcc && tpd->vcc->pop)
1877 tpd->vcc->pop(tpd->vcc, tpd->skb);
1878 else
1879 dev_kfree_skb_any(tpd->skb);
1880 }
1881
1882next_tbrq_entry:
1883 if (tpd)
1884 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1885 he_dev->tbrq_head = (struct he_tbrq *)
1886 ((unsigned long) he_dev->tbrq_base |
1887 TBRQ_MASK(he_dev->tbrq_head + 1));
1888 }
1889
1890 if (updated) {
1891 if (updated > he_dev->tbrq_peak)
1892 he_dev->tbrq_peak = updated;
1893
1894 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1895 G0_TBRQ_H + (group * 16));
1896 }
1897}
1898
1899static void
1900he_service_rbpl(struct he_dev *he_dev, int group)
1901{
1902 struct he_rbp *new_tail;
1903 struct he_rbp *rbpl_head;
1904 struct he_buff *heb;
1905 dma_addr_t mapping;
1906 int i;
1907 int moved = 0;
1908
1909 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1910 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1911
1912 for (;;) {
1913 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1914 RBPL_MASK(he_dev->rbpl_tail+1));
1915
1916 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1917 if (new_tail == rbpl_head)
1918 break;
1919
1920 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1921 if (i > (RBPL_TABLE_SIZE - 1)) {
1922 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1923 if (i > (RBPL_TABLE_SIZE - 1))
1924 break;
1925 }
1926 he_dev->rbpl_hint = i + 1;
1927
1928 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1929 if (!heb)
1930 break;
1931 heb->mapping = mapping;
1932 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1933 he_dev->rbpl_virt[i] = heb;
1934 set_bit(i, he_dev->rbpl_table);
1935 new_tail->idx = i << RBP_IDX_OFFSET;
1936 new_tail->phys = mapping + offsetof(struct he_buff, data);
1937
1938 he_dev->rbpl_tail = new_tail;
1939 ++moved;
1940 }
1941
1942 if (moved)
1943 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1944}
1945
1946static void
1947he_tasklet(unsigned long data)
1948{
1949 unsigned long flags;
1950 struct he_dev *he_dev = (struct he_dev *) data;
1951 int group, type;
1952 int updated = 0;
1953
1954 HPRINTK("tasklet (0x%lx)\n", data);
1955 spin_lock_irqsave(&he_dev->global_lock, flags);
1956
1957 while (he_dev->irq_head != he_dev->irq_tail) {
1958 ++updated;
1959
1960 type = ITYPE_TYPE(he_dev->irq_head->isw);
1961 group = ITYPE_GROUP(he_dev->irq_head->isw);
1962
1963 switch (type) {
1964 case ITYPE_RBRQ_THRESH:
1965 HPRINTK("rbrq%d threshold\n", group);
1966 /* fall through */
1967 case ITYPE_RBRQ_TIMER:
1968 if (he_service_rbrq(he_dev, group))
1969 he_service_rbpl(he_dev, group);
1970 break;
1971 case ITYPE_TBRQ_THRESH:
1972 HPRINTK("tbrq%d threshold\n", group);
1973 /* fall through */
1974 case ITYPE_TPD_COMPLETE:
1975 he_service_tbrq(he_dev, group);
1976 break;
1977 case ITYPE_RBPL_THRESH:
1978 he_service_rbpl(he_dev, group);
1979 break;
1980 case ITYPE_RBPS_THRESH:
1981 /* shouldn't happen unless small buffers enabled */
1982 break;
1983 case ITYPE_PHY:
1984 HPRINTK("phy interrupt\n");
1985#ifdef CONFIG_ATM_HE_USE_SUNI
1986 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1987 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1988 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1989 spin_lock_irqsave(&he_dev->global_lock, flags);
1990#endif
1991 break;
1992 case ITYPE_OTHER:
1993 switch (type|group) {
1994 case ITYPE_PARITY:
1995 hprintk("parity error\n");
1996 break;
1997 case ITYPE_ABORT:
1998 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1999 break;
2000 }
2001 break;
2002 case ITYPE_TYPE(ITYPE_INVALID):
2003 /* see 8.1.1 -- check all queues */
2004
2005 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2006
2007 he_service_rbrq(he_dev, 0);
2008 he_service_rbpl(he_dev, 0);
2009 he_service_tbrq(he_dev, 0);
2010 break;
2011 default:
2012 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2013 }
2014
2015 he_dev->irq_head->isw = ITYPE_INVALID;
2016
2017 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2018 }
2019
2020 if (updated) {
2021 if (updated > he_dev->irq_peak)
2022 he_dev->irq_peak = updated;
2023
2024 he_writel(he_dev,
2025 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2026 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2027 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2028 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2029 }
2030 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2031}
2032
2033static irqreturn_t
2034he_irq_handler(int irq, void *dev_id)
2035{
2036 unsigned long flags;
2037 struct he_dev *he_dev = (struct he_dev * )dev_id;
2038 int handled = 0;
2039
2040 if (he_dev == NULL)
2041 return IRQ_NONE;
2042
2043 spin_lock_irqsave(&he_dev->global_lock, flags);
2044
2045 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2046 (*he_dev->irq_tailoffset << 2));
2047
2048 if (he_dev->irq_tail == he_dev->irq_head) {
2049 HPRINTK("tailoffset not updated?\n");
2050 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2051 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2052 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2053 }
2054
2055#ifdef DEBUG
2056 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2057 hprintk("spurious (or shared) interrupt?\n");
2058#endif
2059
2060 if (he_dev->irq_head != he_dev->irq_tail) {
2061 handled = 1;
2062 tasklet_schedule(&he_dev->tasklet);
2063 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2064 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2065 }
2066 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2067 return IRQ_RETVAL(handled);
2068
2069}
2070
2071static __inline__ void
2072__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2073{
2074 struct he_tpdrq *new_tail;
2075
2076 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2077 tpd, cid, he_dev->tpdrq_tail);
2078
2079 /* new_tail = he_dev->tpdrq_tail; */
2080 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2081 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2082
2083 /*
2084 * check to see if we are about to set the tail == head
2085 * if true, update the head pointer from the adapter
2086 * to see if this is really the case (reading the queue
2087 * head for every enqueue would be unnecessarily slow)
2088 */
2089
2090 if (new_tail == he_dev->tpdrq_head) {
2091 he_dev->tpdrq_head = (struct he_tpdrq *)
2092 (((unsigned long)he_dev->tpdrq_base) |
2093 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2094
2095 if (new_tail == he_dev->tpdrq_head) {
2096 int slot;
2097
2098 hprintk("tpdrq full (cid 0x%x)\n", cid);
2099 /*
2100 * FIXME
2101 * push tpd onto a transmit backlog queue
2102 * after service_tbrq, service the backlog
2103 * for now, we just drop the pdu
2104 */
2105 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2106 if (tpd->iovec[slot].addr)
2107 pci_unmap_single(he_dev->pci_dev,
2108 tpd->iovec[slot].addr,
2109 tpd->iovec[slot].len & TPD_LEN_MASK,
2110 PCI_DMA_TODEVICE);
2111 }
2112 if (tpd->skb) {
2113 if (tpd->vcc->pop)
2114 tpd->vcc->pop(tpd->vcc, tpd->skb);
2115 else
2116 dev_kfree_skb_any(tpd->skb);
2117 atomic_inc(&tpd->vcc->stats->tx_err);
2118 }
2119 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2120 return;
2121 }
2122 }
2123
2124 /* 2.1.5 transmit packet descriptor ready queue */
2125 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2126 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2127 he_dev->tpdrq_tail->cid = cid;
2128 wmb();
2129
2130 he_dev->tpdrq_tail = new_tail;
2131
2132 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2133 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2134}
2135
2136static int
2137he_open(struct atm_vcc *vcc)
2138{
2139 unsigned long flags;
2140 struct he_dev *he_dev = HE_DEV(vcc->dev);
2141 struct he_vcc *he_vcc;
2142 int err = 0;
2143 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2144 short vpi = vcc->vpi;
2145 int vci = vcc->vci;
2146
2147 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2148 return 0;
2149
2150 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2151
2152 set_bit(ATM_VF_ADDR, &vcc->flags);
2153
2154 cid = he_mkcid(he_dev, vpi, vci);
2155
2156 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2157 if (he_vcc == NULL) {
2158 hprintk("unable to allocate he_vcc during open\n");
2159 return -ENOMEM;
2160 }
2161
2162 INIT_LIST_HEAD(&he_vcc->buffers);
2163 he_vcc->pdu_len = 0;
2164 he_vcc->rc_index = -1;
2165
2166 init_waitqueue_head(&he_vcc->rx_waitq);
2167 init_waitqueue_head(&he_vcc->tx_waitq);
2168
2169 vcc->dev_data = he_vcc;
2170
2171 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2172 int pcr_goal;
2173
2174 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2175 if (pcr_goal == 0)
2176 pcr_goal = he_dev->atm_dev->link_rate;
2177 if (pcr_goal < 0) /* means round down, technically */
2178 pcr_goal = -pcr_goal;
2179
2180 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2181
2182 switch (vcc->qos.aal) {
2183 case ATM_AAL5:
2184 tsr0_aal = TSR0_AAL5;
2185 tsr4 = TSR4_AAL5;
2186 break;
2187 case ATM_AAL0:
2188 tsr0_aal = TSR0_AAL0_SDU;
2189 tsr4 = TSR4_AAL0_SDU;
2190 break;
2191 default:
2192 err = -EINVAL;
2193 goto open_failed;
2194 }
2195
2196 spin_lock_irqsave(&he_dev->global_lock, flags);
2197 tsr0 = he_readl_tsr0(he_dev, cid);
2198 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2199
2200 if (TSR0_CONN_STATE(tsr0) != 0) {
2201 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2202 err = -EBUSY;
2203 goto open_failed;
2204 }
2205
2206 switch (vcc->qos.txtp.traffic_class) {
2207 case ATM_UBR:
2208 /* 2.3.3.1 open connection ubr */
2209
2210 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2211 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2212 break;
2213
2214 case ATM_CBR:
2215 /* 2.3.3.2 open connection cbr */
2216
2217 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2218 if ((he_dev->total_bw + pcr_goal)
2219 > (he_dev->atm_dev->link_rate * 9 / 10))
2220 {
2221 err = -EBUSY;
2222 goto open_failed;
2223 }
2224
2225 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2226
2227 /* find an unused cs_stper register */
2228 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2229 if (he_dev->cs_stper[reg].inuse == 0 ||
2230 he_dev->cs_stper[reg].pcr == pcr_goal)
2231 break;
2232
2233 if (reg == HE_NUM_CS_STPER) {
2234 err = -EBUSY;
2235 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2236 goto open_failed;
2237 }
2238
2239 he_dev->total_bw += pcr_goal;
2240
2241 he_vcc->rc_index = reg;
2242 ++he_dev->cs_stper[reg].inuse;
2243 he_dev->cs_stper[reg].pcr = pcr_goal;
2244
2245 clock = he_is622(he_dev) ? 66667000 : 50000000;
2246 period = clock / pcr_goal;
2247
2248 HPRINTK("rc_index = %d period = %d\n",
2249 reg, period);
2250
2251 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2252 CS_STPER0 + reg);
2253 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2254
2255 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2256 TSR0_RC_INDEX(reg);
2257
2258 break;
2259 default:
2260 err = -EINVAL;
2261 goto open_failed;
2262 }
2263
2264 spin_lock_irqsave(&he_dev->global_lock, flags);
2265
2266 he_writel_tsr0(he_dev, tsr0, cid);
2267 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2268 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2269 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2270 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2271 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2272
2273 he_writel_tsr3(he_dev, 0x0, cid);
2274 he_writel_tsr5(he_dev, 0x0, cid);
2275 he_writel_tsr6(he_dev, 0x0, cid);
2276 he_writel_tsr7(he_dev, 0x0, cid);
2277 he_writel_tsr8(he_dev, 0x0, cid);
2278 he_writel_tsr10(he_dev, 0x0, cid);
2279 he_writel_tsr11(he_dev, 0x0, cid);
2280 he_writel_tsr12(he_dev, 0x0, cid);
2281 he_writel_tsr13(he_dev, 0x0, cid);
2282 he_writel_tsr14(he_dev, 0x0, cid);
2283 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2284 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2285 }
2286
2287 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2288 unsigned aal;
2289
2290 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2291 &HE_VCC(vcc)->rx_waitq);
2292
2293 switch (vcc->qos.aal) {
2294 case ATM_AAL5:
2295 aal = RSR0_AAL5;
2296 break;
2297 case ATM_AAL0:
2298 aal = RSR0_RAWCELL;
2299 break;
2300 default:
2301 err = -EINVAL;
2302 goto open_failed;
2303 }
2304
2305 spin_lock_irqsave(&he_dev->global_lock, flags);
2306
2307 rsr0 = he_readl_rsr0(he_dev, cid);
2308 if (rsr0 & RSR0_OPEN_CONN) {
2309 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2310
2311 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2312 err = -EBUSY;
2313 goto open_failed;
2314 }
2315
2316 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2317 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2318 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2319 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2320
2321#ifdef USE_CHECKSUM_HW
2322 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2323 rsr0 |= RSR0_TCP_CKSUM;
2324#endif
2325
2326 he_writel_rsr4(he_dev, rsr4, cid);
2327 he_writel_rsr1(he_dev, rsr1, cid);
2328 /* 5.1.11 last parameter initialized should be
2329 the open/closed indication in rsr0 */
2330 he_writel_rsr0(he_dev,
2331 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2332 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2333
2334 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2335 }
2336
2337open_failed:
2338
2339 if (err) {
2340 kfree(he_vcc);
2341 clear_bit(ATM_VF_ADDR, &vcc->flags);
2342 }
2343 else
2344 set_bit(ATM_VF_READY, &vcc->flags);
2345
2346 return err;
2347}
2348
2349static void
2350he_close(struct atm_vcc *vcc)
2351{
2352 unsigned long flags;
2353 DECLARE_WAITQUEUE(wait, current);
2354 struct he_dev *he_dev = HE_DEV(vcc->dev);
2355 struct he_tpd *tpd;
2356 unsigned cid;
2357 struct he_vcc *he_vcc = HE_VCC(vcc);
2358#define MAX_RETRY 30
2359 int retry = 0, sleep = 1, tx_inuse;
2360
2361 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2362
2363 clear_bit(ATM_VF_READY, &vcc->flags);
2364 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2365
2366 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2367 int timeout;
2368
2369 HPRINTK("close rx cid 0x%x\n", cid);
2370
2371 /* 2.7.2.2 close receive operation */
2372
2373 /* wait for previous close (if any) to finish */
2374
2375 spin_lock_irqsave(&he_dev->global_lock, flags);
2376 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2377 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2378 udelay(250);
2379 }
2380
2381 set_current_state(TASK_UNINTERRUPTIBLE);
2382 add_wait_queue(&he_vcc->rx_waitq, &wait);
2383
2384 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2385 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2386 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2387 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2388
2389 timeout = schedule_timeout(30*HZ);
2390
2391 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2392 set_current_state(TASK_RUNNING);
2393
2394 if (timeout == 0)
2395 hprintk("close rx timeout cid 0x%x\n", cid);
2396
2397 HPRINTK("close rx cid 0x%x complete\n", cid);
2398
2399 }
2400
2401 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2402 volatile unsigned tsr4, tsr0;
2403 int timeout;
2404
2405 HPRINTK("close tx cid 0x%x\n", cid);
2406
2407 /* 2.1.2
2408 *
2409 * ... the host must first stop queueing packets to the TPDRQ
2410 * on the connection to be closed, then wait for all outstanding
2411 * packets to be transmitted and their buffers returned to the
2412 * TBRQ. When the last packet on the connection arrives in the
2413 * TBRQ, the host issues the close command to the adapter.
2414 */
2415
2416 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2417 (retry < MAX_RETRY)) {
2418 msleep(sleep);
2419 if (sleep < 250)
2420 sleep = sleep * 2;
2421
2422 ++retry;
2423 }
2424
2425 if (tx_inuse > 1)
2426 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2427
2428 /* 2.3.1.1 generic close operations with flush */
2429
2430 spin_lock_irqsave(&he_dev->global_lock, flags);
2431 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2432 /* also clears TSR4_SESSION_ENDED */
2433
2434 switch (vcc->qos.txtp.traffic_class) {
2435 case ATM_UBR:
2436 he_writel_tsr1(he_dev,
2437 TSR1_MCR(rate_to_atmf(200000))
2438 | TSR1_PCR(0), cid);
2439 break;
2440 case ATM_CBR:
2441 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2442 break;
2443 }
2444 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2445
2446 tpd = __alloc_tpd(he_dev);
2447 if (tpd == NULL) {
2448 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2449 goto close_tx_incomplete;
2450 }
2451 tpd->status |= TPD_EOS | TPD_INT;
2452 tpd->skb = NULL;
2453 tpd->vcc = vcc;
2454 wmb();
2455
2456 set_current_state(TASK_UNINTERRUPTIBLE);
2457 add_wait_queue(&he_vcc->tx_waitq, &wait);
2458 __enqueue_tpd(he_dev, tpd, cid);
2459 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2460
2461 timeout = schedule_timeout(30*HZ);
2462
2463 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2464 set_current_state(TASK_RUNNING);
2465
2466 spin_lock_irqsave(&he_dev->global_lock, flags);
2467
2468 if (timeout == 0) {
2469 hprintk("close tx timeout cid 0x%x\n", cid);
2470 goto close_tx_incomplete;
2471 }
2472
2473 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2474 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2475 udelay(250);
2476 }
2477
2478 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2479 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2480 udelay(250);
2481 }
2482
2483close_tx_incomplete:
2484
2485 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2486 int reg = he_vcc->rc_index;
2487
2488 HPRINTK("cs_stper reg = %d\n", reg);
2489
2490 if (he_dev->cs_stper[reg].inuse == 0)
2491 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2492 else
2493 --he_dev->cs_stper[reg].inuse;
2494
2495 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2496 }
2497 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2498
2499 HPRINTK("close tx cid 0x%x complete\n", cid);
2500 }
2501
2502 kfree(he_vcc);
2503
2504 clear_bit(ATM_VF_ADDR, &vcc->flags);
2505}
2506
2507static int
2508he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2509{
2510 unsigned long flags;
2511 struct he_dev *he_dev = HE_DEV(vcc->dev);
2512 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2513 struct he_tpd *tpd;
2514#ifdef USE_SCATTERGATHER
2515 int i, slot = 0;
2516#endif
2517
2518#define HE_TPD_BUFSIZE 0xffff
2519
2520 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2521
2522 if ((skb->len > HE_TPD_BUFSIZE) ||
2523 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2524 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2525 if (vcc->pop)
2526 vcc->pop(vcc, skb);
2527 else
2528 dev_kfree_skb_any(skb);
2529 atomic_inc(&vcc->stats->tx_err);
2530 return -EINVAL;
2531 }
2532
2533#ifndef USE_SCATTERGATHER
2534 if (skb_shinfo(skb)->nr_frags) {
2535 hprintk("no scatter/gather support\n");
2536 if (vcc->pop)
2537 vcc->pop(vcc, skb);
2538 else
2539 dev_kfree_skb_any(skb);
2540 atomic_inc(&vcc->stats->tx_err);
2541 return -EINVAL;
2542 }
2543#endif
2544 spin_lock_irqsave(&he_dev->global_lock, flags);
2545
2546 tpd = __alloc_tpd(he_dev);
2547 if (tpd == NULL) {
2548 if (vcc->pop)
2549 vcc->pop(vcc, skb);
2550 else
2551 dev_kfree_skb_any(skb);
2552 atomic_inc(&vcc->stats->tx_err);
2553 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2554 return -ENOMEM;
2555 }
2556
2557 if (vcc->qos.aal == ATM_AAL5)
2558 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2559 else {
2560 char *pti_clp = (void *) (skb->data + 3);
2561 int clp, pti;
2562
2563 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2564 clp = (*pti_clp & ATM_HDR_CLP);
2565 tpd->status |= TPD_CELLTYPE(pti);
2566 if (clp)
2567 tpd->status |= TPD_CLP;
2568
2569 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2570 }
2571
2572#ifdef USE_SCATTERGATHER
2573 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2574 skb_headlen(skb), PCI_DMA_TODEVICE);
2575 tpd->iovec[slot].len = skb_headlen(skb);
2576 ++slot;
2577
2578 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2579 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2580
2581 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2582 tpd->vcc = vcc;
2583 tpd->skb = NULL; /* not the last fragment
2584 so dont ->push() yet */
2585 wmb();
2586
2587 __enqueue_tpd(he_dev, tpd, cid);
2588 tpd = __alloc_tpd(he_dev);
2589 if (tpd == NULL) {
2590 if (vcc->pop)
2591 vcc->pop(vcc, skb);
2592 else
2593 dev_kfree_skb_any(skb);
2594 atomic_inc(&vcc->stats->tx_err);
2595 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2596 return -ENOMEM;
2597 }
2598 tpd->status |= TPD_USERCELL;
2599 slot = 0;
2600 }
2601
2602 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2603 (void *) page_address(frag->page) + frag->page_offset,
2604 frag->size, PCI_DMA_TODEVICE);
2605 tpd->iovec[slot].len = frag->size;
2606 ++slot;
2607
2608 }
2609
2610 tpd->iovec[slot - 1].len |= TPD_LST;
2611#else
2612 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2613 tpd->length0 = skb->len | TPD_LST;
2614#endif
2615 tpd->status |= TPD_INT;
2616
2617 tpd->vcc = vcc;
2618 tpd->skb = skb;
2619 wmb();
2620 ATM_SKB(skb)->vcc = vcc;
2621
2622 __enqueue_tpd(he_dev, tpd, cid);
2623 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2624
2625 atomic_inc(&vcc->stats->tx);
2626
2627 return 0;
2628}
2629
2630static int
2631he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2632{
2633 unsigned long flags;
2634 struct he_dev *he_dev = HE_DEV(atm_dev);
2635 struct he_ioctl_reg reg;
2636 int err = 0;
2637
2638 switch (cmd) {
2639 case HE_GET_REG:
2640 if (!capable(CAP_NET_ADMIN))
2641 return -EPERM;
2642
2643 if (copy_from_user(®, arg,
2644 sizeof(struct he_ioctl_reg)))
2645 return -EFAULT;
2646
2647 spin_lock_irqsave(&he_dev->global_lock, flags);
2648 switch (reg.type) {
2649 case HE_REGTYPE_PCI:
2650 if (reg.addr >= HE_REGMAP_SIZE) {
2651 err = -EINVAL;
2652 break;
2653 }
2654
2655 reg.val = he_readl(he_dev, reg.addr);
2656 break;
2657 case HE_REGTYPE_RCM:
2658 reg.val =
2659 he_readl_rcm(he_dev, reg.addr);
2660 break;
2661 case HE_REGTYPE_TCM:
2662 reg.val =
2663 he_readl_tcm(he_dev, reg.addr);
2664 break;
2665 case HE_REGTYPE_MBOX:
2666 reg.val =
2667 he_readl_mbox(he_dev, reg.addr);
2668 break;
2669 default:
2670 err = -EINVAL;
2671 break;
2672 }
2673 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2674 if (err == 0)
2675 if (copy_to_user(arg, ®,
2676 sizeof(struct he_ioctl_reg)))
2677 return -EFAULT;
2678 break;
2679 default:
2680#ifdef CONFIG_ATM_HE_USE_SUNI
2681 if (atm_dev->phy && atm_dev->phy->ioctl)
2682 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2683#else /* CONFIG_ATM_HE_USE_SUNI */
2684 err = -EINVAL;
2685#endif /* CONFIG_ATM_HE_USE_SUNI */
2686 break;
2687 }
2688
2689 return err;
2690}
2691
2692static void
2693he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2694{
2695 unsigned long flags;
2696 struct he_dev *he_dev = HE_DEV(atm_dev);
2697
2698 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2699
2700 spin_lock_irqsave(&he_dev->global_lock, flags);
2701 he_writel(he_dev, val, FRAMER + (addr*4));
2702 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2703 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2704}
2705
2706
2707static unsigned char
2708he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2709{
2710 unsigned long flags;
2711 struct he_dev *he_dev = HE_DEV(atm_dev);
2712 unsigned reg;
2713
2714 spin_lock_irqsave(&he_dev->global_lock, flags);
2715 reg = he_readl(he_dev, FRAMER + (addr*4));
2716 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2717
2718 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2719 return reg;
2720}
2721
2722static int
2723he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2724{
2725 unsigned long flags;
2726 struct he_dev *he_dev = HE_DEV(dev);
2727 int left, i;
2728#ifdef notdef
2729 struct he_rbrq *rbrq_tail;
2730 struct he_tpdrq *tpdrq_head;
2731 int rbpl_head, rbpl_tail;
2732#endif
2733 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2734
2735
2736 left = *pos;
2737 if (!left--)
2738 return sprintf(page, "ATM he driver\n");
2739
2740 if (!left--)
2741 return sprintf(page, "%s%s\n\n",
2742 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2743
2744 if (!left--)
2745 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2746
2747 spin_lock_irqsave(&he_dev->global_lock, flags);
2748 mcc += he_readl(he_dev, MCC);
2749 oec += he_readl(he_dev, OEC);
2750 dcc += he_readl(he_dev, DCC);
2751 cec += he_readl(he_dev, CEC);
2752 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2753
2754 if (!left--)
2755 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2756 mcc, oec, dcc, cec);
2757
2758 if (!left--)
2759 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2760 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2761
2762 if (!left--)
2763 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2764 CONFIG_TPDRQ_SIZE);
2765
2766 if (!left--)
2767 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2768 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2769
2770 if (!left--)
2771 return sprintf(page, "tbrq_size = %d peak = %d\n",
2772 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2773
2774
2775#ifdef notdef
2776 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2777 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2778
2779 inuse = rbpl_head - rbpl_tail;
2780 if (inuse < 0)
2781 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2782 inuse /= sizeof(struct he_rbp);
2783
2784 if (!left--)
2785 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2786 CONFIG_RBPL_SIZE, inuse);
2787#endif
2788
2789 if (!left--)
2790 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2791
2792 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2793 if (!left--)
2794 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2795 he_dev->cs_stper[i].pcr,
2796 he_dev->cs_stper[i].inuse);
2797
2798 if (!left--)
2799 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2800 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2801
2802 return 0;
2803}
2804
2805/* eeprom routines -- see 4.7 */
2806
2807static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2808{
2809 u32 val = 0, tmp_read = 0;
2810 int i, j = 0;
2811 u8 byte_read = 0;
2812
2813 val = readl(he_dev->membase + HOST_CNTL);
2814 val &= 0xFFFFE0FF;
2815
2816 /* Turn on write enable */
2817 val |= 0x800;
2818 he_writel(he_dev, val, HOST_CNTL);
2819
2820 /* Send READ instruction */
2821 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2822 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2823 udelay(EEPROM_DELAY);
2824 }
2825
2826 /* Next, we need to send the byte address to read from */
2827 for (i = 7; i >= 0; i--) {
2828 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2829 udelay(EEPROM_DELAY);
2830 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2831 udelay(EEPROM_DELAY);
2832 }
2833
2834 j = 0;
2835
2836 val &= 0xFFFFF7FF; /* Turn off write enable */
2837 he_writel(he_dev, val, HOST_CNTL);
2838
2839 /* Now, we can read data from the EEPROM by clocking it in */
2840 for (i = 7; i >= 0; i--) {
2841 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2842 udelay(EEPROM_DELAY);
2843 tmp_read = he_readl(he_dev, HOST_CNTL);
2844 byte_read |= (unsigned char)
2845 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2846 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2847 udelay(EEPROM_DELAY);
2848 }
2849
2850 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2851 udelay(EEPROM_DELAY);
2852
2853 return byte_read;
2854}
2855
2856MODULE_LICENSE("GPL");
2857MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2858MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2859module_param(disable64, bool, 0);
2860MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2861module_param(nvpibits, short, 0);
2862MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2863module_param(nvcibits, short, 0);
2864MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2865module_param(rx_skb_reserve, short, 0);
2866MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2867module_param(irq_coalesce, bool, 0);
2868MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2869module_param(sdh, bool, 0);
2870MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2871
2872static struct pci_device_id he_pci_tbl[] = {
2873 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2874 { 0, }
2875};
2876
2877MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2878
2879static struct pci_driver he_driver = {
2880 .name = "he",
2881 .probe = he_init_one,
2882 .remove = __devexit_p(he_remove_one),
2883 .id_table = he_pci_tbl,
2884};
2885
2886static int __init he_init(void)
2887{
2888 return pci_register_driver(&he_driver);
2889}
2890
2891static void __exit he_cleanup(void)
2892{
2893 pci_unregister_driver(&he_driver);
2894}
2895
2896module_init(he_init);
2897module_exit(he_cleanup);
1/*
2
3 he.c
4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
7
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
12
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22*/
23
24/*
25
26 he.c
27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
30
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
40
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48 NOTES:
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
56#include <linux/module.h>
57#include <linux/kernel.h>
58#include <linux/skbuff.h>
59#include <linux/pci.h>
60#include <linux/errno.h>
61#include <linux/types.h>
62#include <linux/string.h>
63#include <linux/delay.h>
64#include <linux/init.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
69#include <linux/dma-mapping.h>
70#include <linux/bitmap.h>
71#include <linux/slab.h>
72#include <asm/io.h>
73#include <asm/byteorder.h>
74#include <asm/uaccess.h>
75
76#include <linux/atmdev.h>
77#include <linux/atm.h>
78#include <linux/sonet.h>
79
80#undef USE_SCATTERGATHER
81#undef USE_CHECKSUM_HW /* still confused about this */
82/* #undef HE_DEBUG */
83
84#include "he.h"
85#include "suni.h"
86#include <linux/atm_he.h>
87
88#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89
90#ifdef HE_DEBUG
91#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92#else /* !HE_DEBUG */
93#define HPRINTK(fmt,args...) do { } while (0)
94#endif /* HE_DEBUG */
95
96/* declarations */
97
98static int he_open(struct atm_vcc *vcc);
99static void he_close(struct atm_vcc *vcc);
100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102static irqreturn_t he_irq_handler(int irq, void *dev_id);
103static void he_tasklet(unsigned long data);
104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105static int he_start(struct atm_dev *dev);
106static void he_stop(struct he_dev *dev);
107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109
110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111
112/* globals */
113
114static struct he_dev *he_devs;
115static bool disable64;
116static short nvpibits = -1;
117static short nvcibits = -1;
118static short rx_skb_reserve = 16;
119static bool irq_coalesce = true;
120static bool sdh;
121
122/* Read from EEPROM = 0000 0011b */
123static unsigned int readtab[] = {
124 CS_HIGH | CLK_HIGH,
125 CS_LOW | CLK_LOW,
126 CLK_HIGH, /* 0 */
127 CLK_LOW,
128 CLK_HIGH, /* 0 */
129 CLK_LOW,
130 CLK_HIGH, /* 0 */
131 CLK_LOW,
132 CLK_HIGH, /* 0 */
133 CLK_LOW,
134 CLK_HIGH, /* 0 */
135 CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW | SI_HIGH,
138 CLK_HIGH | SI_HIGH, /* 1 */
139 CLK_LOW | SI_HIGH,
140 CLK_HIGH | SI_HIGH /* 1 */
141};
142
143/* Clock to read from/write to the EEPROM */
144static unsigned int clocktab[] = {
145 CLK_LOW,
146 CLK_HIGH,
147 CLK_LOW,
148 CLK_HIGH,
149 CLK_LOW,
150 CLK_HIGH,
151 CLK_LOW,
152 CLK_HIGH,
153 CLK_LOW,
154 CLK_HIGH,
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW
162};
163
164static struct atmdev_ops he_ops =
165{
166 .open = he_open,
167 .close = he_close,
168 .ioctl = he_ioctl,
169 .send = he_send,
170 .phy_put = he_phy_put,
171 .phy_get = he_phy_get,
172 .proc_read = he_proc_read,
173 .owner = THIS_MODULE
174};
175
176#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177#define he_readl(dev, reg) readl((dev)->membase + (reg))
178
179/* section 2.12 connection memory access */
180
181static __inline__ void
182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 unsigned flags)
184{
185 he_writel(he_dev, val, CON_DAT);
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189}
190
191#define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
193
194#define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
196
197#define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199
200static unsigned
201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202{
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 return he_readl(he_dev, CON_DAT);
206}
207
208#define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
210
211#define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
213
214#define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
216
217
218/* figure 2.2 connection id */
219
220#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221
222/* 2.5.1 per connection transmit state registers */
223
224#define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226#define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228
229#define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231
232#define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234
235#define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237
238#define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240
241 /* from page 2-20
242 *
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
247 */
248
249#define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 CON_CTL_TCM \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
255
256#define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258
259#define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261
262#define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264
265#define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267
268
269#define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271
272#define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274
275#define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277
278#define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280
281
282#define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284
285#define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287
288
289#define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291
292#define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 CON_CTL_TCM \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
298
299/* 2.7.1 per connection receive state registers */
300
301#define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303#define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305
306#define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308
309#define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311
312#define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314
315#define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317
318#define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320
321#define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323
324#define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326
327static __inline__ struct atm_vcc*
328__find_vcc(struct he_dev *he_dev, unsigned cid)
329{
330 struct hlist_head *head;
331 struct atm_vcc *vcc;
332 struct sock *s;
333 short vpi;
334 int vci;
335
336 vpi = cid >> he_dev->vcibits;
337 vci = cid & ((1 << he_dev->vcibits) - 1);
338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339
340 sk_for_each(s, head) {
341 vcc = atm_sk(s);
342 if (vcc->dev == he_dev->atm_dev &&
343 vcc->vci == vci && vcc->vpi == vpi &&
344 vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 return vcc;
346 }
347 }
348 return NULL;
349}
350
351static int he_init_one(struct pci_dev *pci_dev,
352 const struct pci_device_id *pci_ent)
353{
354 struct atm_dev *atm_dev = NULL;
355 struct he_dev *he_dev = NULL;
356 int err = 0;
357
358 printk(KERN_INFO "ATM he driver\n");
359
360 if (pci_enable_device(pci_dev))
361 return -EIO;
362 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
363 printk(KERN_WARNING "he: no suitable dma available\n");
364 err = -EIO;
365 goto init_one_failure;
366 }
367
368 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 if (!atm_dev) {
370 err = -ENODEV;
371 goto init_one_failure;
372 }
373 pci_set_drvdata(pci_dev, atm_dev);
374
375 he_dev = kzalloc(sizeof(struct he_dev),
376 GFP_KERNEL);
377 if (!he_dev) {
378 err = -ENOMEM;
379 goto init_one_failure;
380 }
381 he_dev->pci_dev = pci_dev;
382 he_dev->atm_dev = atm_dev;
383 he_dev->atm_dev->dev_data = he_dev;
384 atm_dev->dev_data = he_dev;
385 he_dev->number = atm_dev->number;
386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 spin_lock_init(&he_dev->global_lock);
388
389 if (he_start(atm_dev)) {
390 he_stop(he_dev);
391 err = -ENODEV;
392 goto init_one_failure;
393 }
394 he_dev->next = NULL;
395 if (he_devs)
396 he_dev->next = he_devs;
397 he_devs = he_dev;
398 return 0;
399
400init_one_failure:
401 if (atm_dev)
402 atm_dev_deregister(atm_dev);
403 kfree(he_dev);
404 pci_disable_device(pci_dev);
405 return err;
406}
407
408static void he_remove_one(struct pci_dev *pci_dev)
409{
410 struct atm_dev *atm_dev;
411 struct he_dev *he_dev;
412
413 atm_dev = pci_get_drvdata(pci_dev);
414 he_dev = HE_DEV(atm_dev);
415
416 /* need to remove from he_devs */
417
418 he_stop(he_dev);
419 atm_dev_deregister(atm_dev);
420 kfree(he_dev);
421
422 pci_disable_device(pci_dev);
423}
424
425
426static unsigned
427rate_to_atmf(unsigned rate) /* cps to atm forum format */
428{
429#define NONZERO (1 << 14)
430
431 unsigned exp = 0;
432
433 if (rate == 0)
434 return 0;
435
436 rate <<= 9;
437 while (rate > 0x3ff) {
438 ++exp;
439 rate >>= 1;
440 }
441
442 return (NONZERO | (exp << 9) | (rate & 0x1ff));
443}
444
445static void he_init_rx_lbfp0(struct he_dev *he_dev)
446{
447 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
448 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
449 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
450 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
451
452 lbufd_index = 0;
453 lbm_offset = he_readl(he_dev, RCMLBM_BA);
454
455 he_writel(he_dev, lbufd_index, RLBF0_H);
456
457 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
458 lbufd_index += 2;
459 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
460
461 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
462 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
463
464 if (++lbuf_count == lbufs_per_row) {
465 lbuf_count = 0;
466 row_offset += he_dev->bytes_per_row;
467 }
468 lbm_offset += 4;
469 }
470
471 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
472 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
473}
474
475static void he_init_rx_lbfp1(struct he_dev *he_dev)
476{
477 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
478 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
479 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
480 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
481
482 lbufd_index = 1;
483 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
484
485 he_writel(he_dev, lbufd_index, RLBF1_H);
486
487 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
488 lbufd_index += 2;
489 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
490
491 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
492 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
493
494 if (++lbuf_count == lbufs_per_row) {
495 lbuf_count = 0;
496 row_offset += he_dev->bytes_per_row;
497 }
498 lbm_offset += 4;
499 }
500
501 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
502 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
503}
504
505static void he_init_tx_lbfp(struct he_dev *he_dev)
506{
507 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511
512 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514
515 he_writel(he_dev, lbufd_index, TLBF_H);
516
517 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518 lbufd_index += 1;
519 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520
521 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523
524 if (++lbuf_count == lbufs_per_row) {
525 lbuf_count = 0;
526 row_offset += he_dev->bytes_per_row;
527 }
528 lbm_offset += 2;
529 }
530
531 he_writel(he_dev, lbufd_index - 1, TLBF_T);
532}
533
534static int he_init_tpdrq(struct he_dev *he_dev)
535{
536 he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 &he_dev->tpdrq_phys, GFP_KERNEL);
539 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n");
541 return -ENOMEM;
542 }
543
544 he_dev->tpdrq_tail = he_dev->tpdrq_base;
545 he_dev->tpdrq_head = he_dev->tpdrq_base;
546
547 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
548 he_writel(he_dev, 0, TPDRQ_T);
549 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
550
551 return 0;
552}
553
554static void he_init_cs_block(struct he_dev *he_dev)
555{
556 unsigned clock, rate, delta;
557 int reg;
558
559 /* 5.1.7 cs block initialization */
560
561 for (reg = 0; reg < 0x20; ++reg)
562 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
563
564 /* rate grid timer reload values */
565
566 clock = he_is622(he_dev) ? 66667000 : 50000000;
567 rate = he_dev->atm_dev->link_rate;
568 delta = rate / 16 / 2;
569
570 for (reg = 0; reg < 0x10; ++reg) {
571 /* 2.4 internal transmit function
572 *
573 * we initialize the first row in the rate grid.
574 * values are period (in clock cycles) of timer
575 */
576 unsigned period = clock / rate;
577
578 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
579 rate -= delta;
580 }
581
582 if (he_is622(he_dev)) {
583 /* table 5.2 (4 cells per lbuf) */
584 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
585 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
586 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
587 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
588 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
589
590 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
591 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
592 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
593 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
594 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
595 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
596 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
597
598 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
599
600 /* table 5.8 */
601 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
602 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
603 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
604 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
605 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
606 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
607
608 /* table 5.9 */
609 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
610 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
611 } else {
612 /* table 5.1 (4 cells per lbuf) */
613 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
614 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
615 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
616 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
617 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
618
619 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
620 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
621 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
622 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
623 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
624 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
625 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
626
627 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
628
629 /* table 5.8 */
630 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
631 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
632 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
633 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
634 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
635 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
636
637 /* table 5.9 */
638 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
639 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
640 }
641
642 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
643
644 for (reg = 0; reg < 0x8; ++reg)
645 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
646
647}
648
649static int he_init_cs_block_rcm(struct he_dev *he_dev)
650{
651 unsigned (*rategrid)[16][16];
652 unsigned rate, delta;
653 int i, j, reg;
654
655 unsigned rate_atmf, exp, man;
656 unsigned long long rate_cps;
657 int mult, buf, buf_limit = 4;
658
659 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
660 if (!rategrid)
661 return -ENOMEM;
662
663 /* initialize rate grid group table */
664
665 for (reg = 0x0; reg < 0xff; ++reg)
666 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
667
668 /* initialize rate controller groups */
669
670 for (reg = 0x100; reg < 0x1ff; ++reg)
671 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
672
673 /* initialize tNrm lookup table */
674
675 /* the manual makes reference to a routine in a sample driver
676 for proper configuration; fortunately, we only need this
677 in order to support abr connection */
678
679 /* initialize rate to group table */
680
681 rate = he_dev->atm_dev->link_rate;
682 delta = rate / 32;
683
684 /*
685 * 2.4 transmit internal functions
686 *
687 * we construct a copy of the rate grid used by the scheduler
688 * in order to construct the rate to group table below
689 */
690
691 for (j = 0; j < 16; j++) {
692 (*rategrid)[0][j] = rate;
693 rate -= delta;
694 }
695
696 for (i = 1; i < 16; i++)
697 for (j = 0; j < 16; j++)
698 if (i > 14)
699 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
700 else
701 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
702
703 /*
704 * 2.4 transmit internal function
705 *
706 * this table maps the upper 5 bits of exponent and mantissa
707 * of the atm forum representation of the rate into an index
708 * on rate grid
709 */
710
711 rate_atmf = 0;
712 while (rate_atmf < 0x400) {
713 man = (rate_atmf & 0x1f) << 4;
714 exp = rate_atmf >> 5;
715
716 /*
717 instead of '/ 512', use '>> 9' to prevent a call
718 to divdu3 on x86 platforms
719 */
720 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
721
722 if (rate_cps < 10)
723 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
724
725 for (i = 255; i > 0; i--)
726 if ((*rategrid)[i/16][i%16] >= rate_cps)
727 break; /* pick nearest rate instead? */
728
729 /*
730 * each table entry is 16 bits: (rate grid index (8 bits)
731 * and a buffer limit (8 bits)
732 * there are two table entries in each 32-bit register
733 */
734
735#ifdef notdef
736 buf = rate_cps * he_dev->tx_numbuffs /
737 (he_dev->atm_dev->link_rate * 2);
738#else
739 /* this is pretty, but avoids _divdu3 and is mostly correct */
740 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
741 if (rate_cps > (272 * mult))
742 buf = 4;
743 else if (rate_cps > (204 * mult))
744 buf = 3;
745 else if (rate_cps > (136 * mult))
746 buf = 2;
747 else if (rate_cps > (68 * mult))
748 buf = 1;
749 else
750 buf = 0;
751#endif
752 if (buf > buf_limit)
753 buf = buf_limit;
754 reg = (reg << 16) | ((i << 8) | buf);
755
756#define RTGTBL_OFFSET 0x400
757
758 if (rate_atmf & 0x1)
759 he_writel_rcm(he_dev, reg,
760 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
761
762 ++rate_atmf;
763 }
764
765 kfree(rategrid);
766 return 0;
767}
768
769static int he_init_group(struct he_dev *he_dev, int group)
770{
771 struct he_buff *heb, *next;
772 dma_addr_t mapping;
773 int i;
774
775 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
776 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
777 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
778 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
779 G0_RBPS_BS + (group * 32));
780
781 /* bitmap table */
782 he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
783 * sizeof(unsigned long), GFP_KERNEL);
784 if (!he_dev->rbpl_table) {
785 hprintk("unable to allocate rbpl bitmap table\n");
786 return -ENOMEM;
787 }
788 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
789
790 /* rbpl_virt 64-bit pointers */
791 he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
792 * sizeof(struct he_buff *), GFP_KERNEL);
793 if (!he_dev->rbpl_virt) {
794 hprintk("unable to allocate rbpl virt table\n");
795 goto out_free_rbpl_table;
796 }
797
798 /* large buffer pool */
799 he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
800 CONFIG_RBPL_BUFSIZE, 64, 0);
801 if (he_dev->rbpl_pool == NULL) {
802 hprintk("unable to create rbpl pool\n");
803 goto out_free_rbpl_virt;
804 }
805
806 he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
807 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
808 &he_dev->rbpl_phys, GFP_KERNEL);
809 if (he_dev->rbpl_base == NULL) {
810 hprintk("failed to alloc rbpl_base\n");
811 goto out_destroy_rbpl_pool;
812 }
813
814 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
815
816 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
817
818 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
819 if (!heb)
820 goto out_free_rbpl;
821 heb->mapping = mapping;
822 list_add(&heb->entry, &he_dev->rbpl_outstanding);
823
824 set_bit(i, he_dev->rbpl_table);
825 he_dev->rbpl_virt[i] = heb;
826 he_dev->rbpl_hint = i + 1;
827 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
828 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
829 }
830 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
831
832 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
833 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
834 G0_RBPL_T + (group * 32));
835 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
836 G0_RBPL_BS + (group * 32));
837 he_writel(he_dev,
838 RBP_THRESH(CONFIG_RBPL_THRESH) |
839 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
840 RBP_INT_ENB,
841 G0_RBPL_QI + (group * 32));
842
843 /* rx buffer ready queue */
844
845 he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
846 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
847 &he_dev->rbrq_phys, GFP_KERNEL);
848 if (he_dev->rbrq_base == NULL) {
849 hprintk("failed to allocate rbrq\n");
850 goto out_free_rbpl;
851 }
852
853 he_dev->rbrq_head = he_dev->rbrq_base;
854 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
855 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
856 he_writel(he_dev,
857 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
858 G0_RBRQ_Q + (group * 16));
859 if (irq_coalesce) {
860 hprintk("coalescing interrupts\n");
861 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
862 G0_RBRQ_I + (group * 16));
863 } else
864 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
865 G0_RBRQ_I + (group * 16));
866
867 /* tx buffer ready queue */
868
869 he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
870 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
871 &he_dev->tbrq_phys, GFP_KERNEL);
872 if (he_dev->tbrq_base == NULL) {
873 hprintk("failed to allocate tbrq\n");
874 goto out_free_rbpq_base;
875 }
876
877 he_dev->tbrq_head = he_dev->tbrq_base;
878
879 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
880 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
881 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
882 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
883
884 return 0;
885
886out_free_rbpq_base:
887 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
888 sizeof(struct he_rbrq), he_dev->rbrq_base,
889 he_dev->rbrq_phys);
890out_free_rbpl:
891 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
892 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
893
894 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
895 sizeof(struct he_rbp), he_dev->rbpl_base,
896 he_dev->rbpl_phys);
897out_destroy_rbpl_pool:
898 dma_pool_destroy(he_dev->rbpl_pool);
899out_free_rbpl_virt:
900 kfree(he_dev->rbpl_virt);
901out_free_rbpl_table:
902 kfree(he_dev->rbpl_table);
903
904 return -ENOMEM;
905}
906
907static int he_init_irq(struct he_dev *he_dev)
908{
909 int i;
910
911 /* 2.9.3.5 tail offset for each interrupt queue is located after the
912 end of the interrupt queue */
913
914 he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
915 (CONFIG_IRQ_SIZE + 1)
916 * sizeof(struct he_irq),
917 &he_dev->irq_phys,
918 GFP_KERNEL);
919 if (he_dev->irq_base == NULL) {
920 hprintk("failed to allocate irq\n");
921 return -ENOMEM;
922 }
923 he_dev->irq_tailoffset = (unsigned *)
924 &he_dev->irq_base[CONFIG_IRQ_SIZE];
925 *he_dev->irq_tailoffset = 0;
926 he_dev->irq_head = he_dev->irq_base;
927 he_dev->irq_tail = he_dev->irq_base;
928
929 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
930 he_dev->irq_base[i].isw = ITYPE_INVALID;
931
932 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
933 he_writel(he_dev,
934 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
935 IRQ0_HEAD);
936 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
937 he_writel(he_dev, 0x0, IRQ0_DATA);
938
939 he_writel(he_dev, 0x0, IRQ1_BASE);
940 he_writel(he_dev, 0x0, IRQ1_HEAD);
941 he_writel(he_dev, 0x0, IRQ1_CNTL);
942 he_writel(he_dev, 0x0, IRQ1_DATA);
943
944 he_writel(he_dev, 0x0, IRQ2_BASE);
945 he_writel(he_dev, 0x0, IRQ2_HEAD);
946 he_writel(he_dev, 0x0, IRQ2_CNTL);
947 he_writel(he_dev, 0x0, IRQ2_DATA);
948
949 he_writel(he_dev, 0x0, IRQ3_BASE);
950 he_writel(he_dev, 0x0, IRQ3_HEAD);
951 he_writel(he_dev, 0x0, IRQ3_CNTL);
952 he_writel(he_dev, 0x0, IRQ3_DATA);
953
954 /* 2.9.3.2 interrupt queue mapping registers */
955
956 he_writel(he_dev, 0x0, GRP_10_MAP);
957 he_writel(he_dev, 0x0, GRP_32_MAP);
958 he_writel(he_dev, 0x0, GRP_54_MAP);
959 he_writel(he_dev, 0x0, GRP_76_MAP);
960
961 if (request_irq(he_dev->pci_dev->irq,
962 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
963 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
964 return -EINVAL;
965 }
966
967 he_dev->irq = he_dev->pci_dev->irq;
968
969 return 0;
970}
971
972static int he_start(struct atm_dev *dev)
973{
974 struct he_dev *he_dev;
975 struct pci_dev *pci_dev;
976 unsigned long membase;
977
978 u16 command;
979 u32 gen_cntl_0, host_cntl, lb_swap;
980 u8 cache_size, timer;
981
982 unsigned err;
983 unsigned int status, reg;
984 int i, group;
985
986 he_dev = HE_DEV(dev);
987 pci_dev = he_dev->pci_dev;
988
989 membase = pci_resource_start(pci_dev, 0);
990 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
991
992 /*
993 * pci bus controller initialization
994 */
995
996 /* 4.3 pci bus controller-specific initialization */
997 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
998 hprintk("can't read GEN_CNTL_0\n");
999 return -EINVAL;
1000 }
1001 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1002 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1003 hprintk("can't write GEN_CNTL_0.\n");
1004 return -EINVAL;
1005 }
1006
1007 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1008 hprintk("can't read PCI_COMMAND.\n");
1009 return -EINVAL;
1010 }
1011
1012 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1013 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1014 hprintk("can't enable memory.\n");
1015 return -EINVAL;
1016 }
1017
1018 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1019 hprintk("can't read cache line size?\n");
1020 return -EINVAL;
1021 }
1022
1023 if (cache_size < 16) {
1024 cache_size = 16;
1025 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1026 hprintk("can't set cache line size to %d\n", cache_size);
1027 }
1028
1029 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1030 hprintk("can't read latency timer?\n");
1031 return -EINVAL;
1032 }
1033
1034 /* from table 3.9
1035 *
1036 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1037 *
1038 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1039 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1040 *
1041 */
1042#define LAT_TIMER 209
1043 if (timer < LAT_TIMER) {
1044 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1045 timer = LAT_TIMER;
1046 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1047 hprintk("can't set latency timer to %d\n", timer);
1048 }
1049
1050 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1051 hprintk("can't set up page mapping\n");
1052 return -EINVAL;
1053 }
1054
1055 /* 4.4 card reset */
1056 he_writel(he_dev, 0x0, RESET_CNTL);
1057 he_writel(he_dev, 0xff, RESET_CNTL);
1058
1059 msleep(16); /* 16 ms */
1060 status = he_readl(he_dev, RESET_CNTL);
1061 if ((status & BOARD_RST_STATUS) == 0) {
1062 hprintk("reset failed\n");
1063 return -EINVAL;
1064 }
1065
1066 /* 4.5 set bus width */
1067 host_cntl = he_readl(he_dev, HOST_CNTL);
1068 if (host_cntl & PCI_BUS_SIZE64)
1069 gen_cntl_0 |= ENBL_64;
1070 else
1071 gen_cntl_0 &= ~ENBL_64;
1072
1073 if (disable64 == 1) {
1074 hprintk("disabling 64-bit pci bus transfers\n");
1075 gen_cntl_0 &= ~ENBL_64;
1076 }
1077
1078 if (gen_cntl_0 & ENBL_64)
1079 hprintk("64-bit transfers enabled\n");
1080
1081 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1082
1083 /* 4.7 read prom contents */
1084 for (i = 0; i < PROD_ID_LEN; ++i)
1085 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1086
1087 he_dev->media = read_prom_byte(he_dev, MEDIA);
1088
1089 for (i = 0; i < 6; ++i)
1090 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1091
1092 hprintk("%s%s, %pM\n", he_dev->prod_id,
1093 he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1094 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1095 ATM_OC12_PCR : ATM_OC3_PCR;
1096
1097 /* 4.6 set host endianess */
1098 lb_swap = he_readl(he_dev, LB_SWAP);
1099 if (he_is622(he_dev))
1100 lb_swap &= ~XFER_SIZE; /* 4 cells */
1101 else
1102 lb_swap |= XFER_SIZE; /* 8 cells */
1103#ifdef __BIG_ENDIAN
1104 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1105#else
1106 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1107 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1108#endif /* __BIG_ENDIAN */
1109 he_writel(he_dev, lb_swap, LB_SWAP);
1110
1111 /* 4.8 sdram controller initialization */
1112 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1113
1114 /* 4.9 initialize rnum value */
1115 lb_swap |= SWAP_RNUM_MAX(0xf);
1116 he_writel(he_dev, lb_swap, LB_SWAP);
1117
1118 /* 4.10 initialize the interrupt queues */
1119 if ((err = he_init_irq(he_dev)) != 0)
1120 return err;
1121
1122 /* 4.11 enable pci bus controller state machines */
1123 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1124 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1125 he_writel(he_dev, host_cntl, HOST_CNTL);
1126
1127 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1128 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1129
1130 /*
1131 * atm network controller initialization
1132 */
1133
1134 /* 5.1.1 generic configuration state */
1135
1136 /*
1137 * local (cell) buffer memory map
1138 *
1139 * HE155 HE622
1140 *
1141 * 0 ____________1023 bytes 0 _______________________2047 bytes
1142 * | | | | |
1143 * | utility | | rx0 | |
1144 * 5|____________| 255|___________________| u |
1145 * 6| | 256| | t |
1146 * | | | | i |
1147 * | rx0 | row | tx | l |
1148 * | | | | i |
1149 * | | 767|___________________| t |
1150 * 517|____________| 768| | y |
1151 * row 518| | | rx1 | |
1152 * | | 1023|___________________|___|
1153 * | |
1154 * | tx |
1155 * | |
1156 * | |
1157 * 1535|____________|
1158 * 1536| |
1159 * | rx1 |
1160 * 2047|____________|
1161 *
1162 */
1163
1164 /* total 4096 connections */
1165 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1166 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1167
1168 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1169 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1170 return -ENODEV;
1171 }
1172
1173 if (nvpibits != -1) {
1174 he_dev->vpibits = nvpibits;
1175 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1176 }
1177
1178 if (nvcibits != -1) {
1179 he_dev->vcibits = nvcibits;
1180 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1181 }
1182
1183
1184 if (he_is622(he_dev)) {
1185 he_dev->cells_per_row = 40;
1186 he_dev->bytes_per_row = 2048;
1187 he_dev->r0_numrows = 256;
1188 he_dev->tx_numrows = 512;
1189 he_dev->r1_numrows = 256;
1190 he_dev->r0_startrow = 0;
1191 he_dev->tx_startrow = 256;
1192 he_dev->r1_startrow = 768;
1193 } else {
1194 he_dev->cells_per_row = 20;
1195 he_dev->bytes_per_row = 1024;
1196 he_dev->r0_numrows = 512;
1197 he_dev->tx_numrows = 1018;
1198 he_dev->r1_numrows = 512;
1199 he_dev->r0_startrow = 6;
1200 he_dev->tx_startrow = 518;
1201 he_dev->r1_startrow = 1536;
1202 }
1203
1204 he_dev->cells_per_lbuf = 4;
1205 he_dev->buffer_limit = 4;
1206 he_dev->r0_numbuffs = he_dev->r0_numrows *
1207 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1208 if (he_dev->r0_numbuffs > 2560)
1209 he_dev->r0_numbuffs = 2560;
1210
1211 he_dev->r1_numbuffs = he_dev->r1_numrows *
1212 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1213 if (he_dev->r1_numbuffs > 2560)
1214 he_dev->r1_numbuffs = 2560;
1215
1216 he_dev->tx_numbuffs = he_dev->tx_numrows *
1217 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1218 if (he_dev->tx_numbuffs > 5120)
1219 he_dev->tx_numbuffs = 5120;
1220
1221 /* 5.1.2 configure hardware dependent registers */
1222
1223 he_writel(he_dev,
1224 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1225 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1226 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1227 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1228 LBARB);
1229
1230 he_writel(he_dev, BANK_ON |
1231 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1232 SDRAMCON);
1233
1234 he_writel(he_dev,
1235 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1236 RM_RW_WAIT(1), RCMCONFIG);
1237 he_writel(he_dev,
1238 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1239 TM_RW_WAIT(1), TCMCONFIG);
1240
1241 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1242
1243 he_writel(he_dev,
1244 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1245 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1246 RX_VALVP(he_dev->vpibits) |
1247 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1248
1249 he_writel(he_dev, DRF_THRESH(0x20) |
1250 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1251 TX_VCI_MASK(he_dev->vcibits) |
1252 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1253
1254 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1255
1256 he_writel(he_dev, PHY_INT_ENB |
1257 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1258 RH_CONFIG);
1259
1260 /* 5.1.3 initialize connection memory */
1261
1262 for (i = 0; i < TCM_MEM_SIZE; ++i)
1263 he_writel_tcm(he_dev, 0, i);
1264
1265 for (i = 0; i < RCM_MEM_SIZE; ++i)
1266 he_writel_rcm(he_dev, 0, i);
1267
1268 /*
1269 * transmit connection memory map
1270 *
1271 * tx memory
1272 * 0x0 ___________________
1273 * | |
1274 * | |
1275 * | TSRa |
1276 * | |
1277 * | |
1278 * 0x8000|___________________|
1279 * | |
1280 * | TSRb |
1281 * 0xc000|___________________|
1282 * | |
1283 * | TSRc |
1284 * 0xe000|___________________|
1285 * | TSRd |
1286 * 0xf000|___________________|
1287 * | tmABR |
1288 * 0x10000|___________________|
1289 * | |
1290 * | tmTPD |
1291 * |___________________|
1292 * | |
1293 * ....
1294 * 0x1ffff|___________________|
1295 *
1296 *
1297 */
1298
1299 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1300 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1301 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1302 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1303 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1304
1305
1306 /*
1307 * receive connection memory map
1308 *
1309 * 0x0 ___________________
1310 * | |
1311 * | |
1312 * | RSRa |
1313 * | |
1314 * | |
1315 * 0x8000|___________________|
1316 * | |
1317 * | rx0/1 |
1318 * | LBM | link lists of local
1319 * | tx | buffer memory
1320 * | |
1321 * 0xd000|___________________|
1322 * | |
1323 * | rmABR |
1324 * 0xe000|___________________|
1325 * | |
1326 * | RSRb |
1327 * |___________________|
1328 * | |
1329 * ....
1330 * 0xffff|___________________|
1331 */
1332
1333 he_writel(he_dev, 0x08000, RCMLBM_BA);
1334 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1335 he_writel(he_dev, 0x0d800, RCMABR_BA);
1336
1337 /* 5.1.4 initialize local buffer free pools linked lists */
1338
1339 he_init_rx_lbfp0(he_dev);
1340 he_init_rx_lbfp1(he_dev);
1341
1342 he_writel(he_dev, 0x0, RLBC_H);
1343 he_writel(he_dev, 0x0, RLBC_T);
1344 he_writel(he_dev, 0x0, RLBC_H2);
1345
1346 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1347 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1348
1349 he_init_tx_lbfp(he_dev);
1350
1351 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1352
1353 /* 5.1.5 initialize intermediate receive queues */
1354
1355 if (he_is622(he_dev)) {
1356 he_writel(he_dev, 0x000f, G0_INMQ_S);
1357 he_writel(he_dev, 0x200f, G0_INMQ_L);
1358
1359 he_writel(he_dev, 0x001f, G1_INMQ_S);
1360 he_writel(he_dev, 0x201f, G1_INMQ_L);
1361
1362 he_writel(he_dev, 0x002f, G2_INMQ_S);
1363 he_writel(he_dev, 0x202f, G2_INMQ_L);
1364
1365 he_writel(he_dev, 0x003f, G3_INMQ_S);
1366 he_writel(he_dev, 0x203f, G3_INMQ_L);
1367
1368 he_writel(he_dev, 0x004f, G4_INMQ_S);
1369 he_writel(he_dev, 0x204f, G4_INMQ_L);
1370
1371 he_writel(he_dev, 0x005f, G5_INMQ_S);
1372 he_writel(he_dev, 0x205f, G5_INMQ_L);
1373
1374 he_writel(he_dev, 0x006f, G6_INMQ_S);
1375 he_writel(he_dev, 0x206f, G6_INMQ_L);
1376
1377 he_writel(he_dev, 0x007f, G7_INMQ_S);
1378 he_writel(he_dev, 0x207f, G7_INMQ_L);
1379 } else {
1380 he_writel(he_dev, 0x0000, G0_INMQ_S);
1381 he_writel(he_dev, 0x0008, G0_INMQ_L);
1382
1383 he_writel(he_dev, 0x0001, G1_INMQ_S);
1384 he_writel(he_dev, 0x0009, G1_INMQ_L);
1385
1386 he_writel(he_dev, 0x0002, G2_INMQ_S);
1387 he_writel(he_dev, 0x000a, G2_INMQ_L);
1388
1389 he_writel(he_dev, 0x0003, G3_INMQ_S);
1390 he_writel(he_dev, 0x000b, G3_INMQ_L);
1391
1392 he_writel(he_dev, 0x0004, G4_INMQ_S);
1393 he_writel(he_dev, 0x000c, G4_INMQ_L);
1394
1395 he_writel(he_dev, 0x0005, G5_INMQ_S);
1396 he_writel(he_dev, 0x000d, G5_INMQ_L);
1397
1398 he_writel(he_dev, 0x0006, G6_INMQ_S);
1399 he_writel(he_dev, 0x000e, G6_INMQ_L);
1400
1401 he_writel(he_dev, 0x0007, G7_INMQ_S);
1402 he_writel(he_dev, 0x000f, G7_INMQ_L);
1403 }
1404
1405 /* 5.1.6 application tunable parameters */
1406
1407 he_writel(he_dev, 0x0, MCC);
1408 he_writel(he_dev, 0x0, OEC);
1409 he_writel(he_dev, 0x0, DCC);
1410 he_writel(he_dev, 0x0, CEC);
1411
1412 /* 5.1.7 cs block initialization */
1413
1414 he_init_cs_block(he_dev);
1415
1416 /* 5.1.8 cs block connection memory initialization */
1417
1418 if (he_init_cs_block_rcm(he_dev) < 0)
1419 return -ENOMEM;
1420
1421 /* 5.1.10 initialize host structures */
1422
1423 he_init_tpdrq(he_dev);
1424
1425 he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1426 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1427 if (he_dev->tpd_pool == NULL) {
1428 hprintk("unable to create tpd dma_pool\n");
1429 return -ENOMEM;
1430 }
1431
1432 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1433
1434 if (he_init_group(he_dev, 0) != 0)
1435 return -ENOMEM;
1436
1437 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1438 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1439 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1440 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1441 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1442 G0_RBPS_BS + (group * 32));
1443
1444 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1445 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1446 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1447 G0_RBPL_QI + (group * 32));
1448 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1449
1450 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1451 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1452 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1453 G0_RBRQ_Q + (group * 16));
1454 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1455
1456 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1457 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1458 he_writel(he_dev, TBRQ_THRESH(0x1),
1459 G0_TBRQ_THRESH + (group * 16));
1460 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1461 }
1462
1463 /* host status page */
1464
1465 he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
1466 sizeof(struct he_hsp),
1467 &he_dev->hsp_phys, GFP_KERNEL);
1468 if (he_dev->hsp == NULL) {
1469 hprintk("failed to allocate host status page\n");
1470 return -ENOMEM;
1471 }
1472 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1473
1474 /* initialize framer */
1475
1476#ifdef CONFIG_ATM_HE_USE_SUNI
1477 if (he_isMM(he_dev))
1478 suni_init(he_dev->atm_dev);
1479 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1480 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1481#endif /* CONFIG_ATM_HE_USE_SUNI */
1482
1483 if (sdh) {
1484 /* this really should be in suni.c but for now... */
1485 int val;
1486
1487 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1488 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1489 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1490 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1491 }
1492
1493 /* 5.1.12 enable transmit and receive */
1494
1495 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1496 reg |= TX_ENABLE|ER_ENABLE;
1497 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1498
1499 reg = he_readl(he_dev, RC_CONFIG);
1500 reg |= RX_ENABLE;
1501 he_writel(he_dev, reg, RC_CONFIG);
1502
1503 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1504 he_dev->cs_stper[i].inuse = 0;
1505 he_dev->cs_stper[i].pcr = -1;
1506 }
1507 he_dev->total_bw = 0;
1508
1509
1510 /* atm linux initialization */
1511
1512 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1513 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1514
1515 he_dev->irq_peak = 0;
1516 he_dev->rbrq_peak = 0;
1517 he_dev->rbpl_peak = 0;
1518 he_dev->tbrq_peak = 0;
1519
1520 HPRINTK("hell bent for leather!\n");
1521
1522 return 0;
1523}
1524
1525static void
1526he_stop(struct he_dev *he_dev)
1527{
1528 struct he_buff *heb, *next;
1529 struct pci_dev *pci_dev;
1530 u32 gen_cntl_0, reg;
1531 u16 command;
1532
1533 pci_dev = he_dev->pci_dev;
1534
1535 /* disable interrupts */
1536
1537 if (he_dev->membase) {
1538 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1539 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1540 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1541
1542 tasklet_disable(&he_dev->tasklet);
1543
1544 /* disable recv and transmit */
1545
1546 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1547 reg &= ~(TX_ENABLE|ER_ENABLE);
1548 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1549
1550 reg = he_readl(he_dev, RC_CONFIG);
1551 reg &= ~(RX_ENABLE);
1552 he_writel(he_dev, reg, RC_CONFIG);
1553 }
1554
1555#ifdef CONFIG_ATM_HE_USE_SUNI
1556 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1557 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1558#endif /* CONFIG_ATM_HE_USE_SUNI */
1559
1560 if (he_dev->irq)
1561 free_irq(he_dev->irq, he_dev);
1562
1563 if (he_dev->irq_base)
1564 dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1565 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1566
1567 if (he_dev->hsp)
1568 dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1569 he_dev->hsp, he_dev->hsp_phys);
1570
1571 if (he_dev->rbpl_base) {
1572 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1573 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1574
1575 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1576 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1577 }
1578
1579 kfree(he_dev->rbpl_virt);
1580 kfree(he_dev->rbpl_table);
1581 dma_pool_destroy(he_dev->rbpl_pool);
1582
1583 if (he_dev->rbrq_base)
1584 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1585 he_dev->rbrq_base, he_dev->rbrq_phys);
1586
1587 if (he_dev->tbrq_base)
1588 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1589 he_dev->tbrq_base, he_dev->tbrq_phys);
1590
1591 if (he_dev->tpdrq_base)
1592 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1593 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1594
1595 dma_pool_destroy(he_dev->tpd_pool);
1596
1597 if (he_dev->pci_dev) {
1598 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1599 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1600 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1601 }
1602
1603 if (he_dev->membase)
1604 iounmap(he_dev->membase);
1605}
1606
1607static struct he_tpd *
1608__alloc_tpd(struct he_dev *he_dev)
1609{
1610 struct he_tpd *tpd;
1611 dma_addr_t mapping;
1612
1613 tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1614 if (tpd == NULL)
1615 return NULL;
1616
1617 tpd->status = TPD_ADDR(mapping);
1618 tpd->reserved = 0;
1619 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1620 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1621 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1622
1623 return tpd;
1624}
1625
1626#define AAL5_LEN(buf,len) \
1627 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1628 (((unsigned char *)(buf))[(len)-5]))
1629
1630/* 2.10.1.2 receive
1631 *
1632 * aal5 packets can optionally return the tcp checksum in the lower
1633 * 16 bits of the crc (RSR0_TCP_CKSUM)
1634 */
1635
1636#define TCP_CKSUM(buf,len) \
1637 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1638 (((unsigned char *)(buf))[(len-1)]))
1639
1640static int
1641he_service_rbrq(struct he_dev *he_dev, int group)
1642{
1643 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1644 ((unsigned long)he_dev->rbrq_base |
1645 he_dev->hsp->group[group].rbrq_tail);
1646 unsigned cid, lastcid = -1;
1647 struct sk_buff *skb;
1648 struct atm_vcc *vcc = NULL;
1649 struct he_vcc *he_vcc;
1650 struct he_buff *heb, *next;
1651 int i;
1652 int pdus_assembled = 0;
1653 int updated = 0;
1654
1655 read_lock(&vcc_sklist_lock);
1656 while (he_dev->rbrq_head != rbrq_tail) {
1657 ++updated;
1658
1659 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1660 he_dev->rbrq_head, group,
1661 RBRQ_ADDR(he_dev->rbrq_head),
1662 RBRQ_BUFLEN(he_dev->rbrq_head),
1663 RBRQ_CID(he_dev->rbrq_head),
1664 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1665 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1666 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1667 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1668 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1669 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1670
1671 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1672 heb = he_dev->rbpl_virt[i];
1673
1674 cid = RBRQ_CID(he_dev->rbrq_head);
1675 if (cid != lastcid)
1676 vcc = __find_vcc(he_dev, cid);
1677 lastcid = cid;
1678
1679 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1680 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1681 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1682 clear_bit(i, he_dev->rbpl_table);
1683 list_del(&heb->entry);
1684 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1685 }
1686
1687 goto next_rbrq_entry;
1688 }
1689
1690 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1691 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1692 atomic_inc(&vcc->stats->rx_drop);
1693 goto return_host_buffers;
1694 }
1695
1696 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1697 clear_bit(i, he_dev->rbpl_table);
1698 list_move_tail(&heb->entry, &he_vcc->buffers);
1699 he_vcc->pdu_len += heb->len;
1700
1701 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1702 lastcid = -1;
1703 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1704 wake_up(&he_vcc->rx_waitq);
1705 goto return_host_buffers;
1706 }
1707
1708 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1709 goto next_rbrq_entry;
1710
1711 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1712 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1713 HPRINTK("%s%s (%d.%d)\n",
1714 RBRQ_CRC_ERR(he_dev->rbrq_head)
1715 ? "CRC_ERR " : "",
1716 RBRQ_LEN_ERR(he_dev->rbrq_head)
1717 ? "LEN_ERR" : "",
1718 vcc->vpi, vcc->vci);
1719 atomic_inc(&vcc->stats->rx_err);
1720 goto return_host_buffers;
1721 }
1722
1723 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1724 GFP_ATOMIC);
1725 if (!skb) {
1726 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1727 goto return_host_buffers;
1728 }
1729
1730 if (rx_skb_reserve > 0)
1731 skb_reserve(skb, rx_skb_reserve);
1732
1733 __net_timestamp(skb);
1734
1735 list_for_each_entry(heb, &he_vcc->buffers, entry)
1736 memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1737
1738 switch (vcc->qos.aal) {
1739 case ATM_AAL0:
1740 /* 2.10.1.5 raw cell receive */
1741 skb->len = ATM_AAL0_SDU;
1742 skb_set_tail_pointer(skb, skb->len);
1743 break;
1744 case ATM_AAL5:
1745 /* 2.10.1.2 aal5 receive */
1746
1747 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1748 skb_set_tail_pointer(skb, skb->len);
1749#ifdef USE_CHECKSUM_HW
1750 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1751 skb->ip_summed = CHECKSUM_COMPLETE;
1752 skb->csum = TCP_CKSUM(skb->data,
1753 he_vcc->pdu_len);
1754 }
1755#endif
1756 break;
1757 }
1758
1759#ifdef should_never_happen
1760 if (skb->len > vcc->qos.rxtp.max_sdu)
1761 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1762#endif
1763
1764#ifdef notdef
1765 ATM_SKB(skb)->vcc = vcc;
1766#endif
1767 spin_unlock(&he_dev->global_lock);
1768 vcc->push(vcc, skb);
1769 spin_lock(&he_dev->global_lock);
1770
1771 atomic_inc(&vcc->stats->rx);
1772
1773return_host_buffers:
1774 ++pdus_assembled;
1775
1776 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1777 dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1778 INIT_LIST_HEAD(&he_vcc->buffers);
1779 he_vcc->pdu_len = 0;
1780
1781next_rbrq_entry:
1782 he_dev->rbrq_head = (struct he_rbrq *)
1783 ((unsigned long) he_dev->rbrq_base |
1784 RBRQ_MASK(he_dev->rbrq_head + 1));
1785
1786 }
1787 read_unlock(&vcc_sklist_lock);
1788
1789 if (updated) {
1790 if (updated > he_dev->rbrq_peak)
1791 he_dev->rbrq_peak = updated;
1792
1793 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1794 G0_RBRQ_H + (group * 16));
1795 }
1796
1797 return pdus_assembled;
1798}
1799
1800static void
1801he_service_tbrq(struct he_dev *he_dev, int group)
1802{
1803 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1804 ((unsigned long)he_dev->tbrq_base |
1805 he_dev->hsp->group[group].tbrq_tail);
1806 struct he_tpd *tpd;
1807 int slot, updated = 0;
1808 struct he_tpd *__tpd;
1809
1810 /* 2.1.6 transmit buffer return queue */
1811
1812 while (he_dev->tbrq_head != tbrq_tail) {
1813 ++updated;
1814
1815 HPRINTK("tbrq%d 0x%x%s%s\n",
1816 group,
1817 TBRQ_TPD(he_dev->tbrq_head),
1818 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1819 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1820 tpd = NULL;
1821 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1822 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1823 tpd = __tpd;
1824 list_del(&__tpd->entry);
1825 break;
1826 }
1827 }
1828
1829 if (tpd == NULL) {
1830 hprintk("unable to locate tpd for dma buffer %x\n",
1831 TBRQ_TPD(he_dev->tbrq_head));
1832 goto next_tbrq_entry;
1833 }
1834
1835 if (TBRQ_EOS(he_dev->tbrq_head)) {
1836 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1837 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1838 if (tpd->vcc)
1839 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1840
1841 goto next_tbrq_entry;
1842 }
1843
1844 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1845 if (tpd->iovec[slot].addr)
1846 dma_unmap_single(&he_dev->pci_dev->dev,
1847 tpd->iovec[slot].addr,
1848 tpd->iovec[slot].len & TPD_LEN_MASK,
1849 DMA_TO_DEVICE);
1850 if (tpd->iovec[slot].len & TPD_LST)
1851 break;
1852
1853 }
1854
1855 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1856 if (tpd->vcc && tpd->vcc->pop)
1857 tpd->vcc->pop(tpd->vcc, tpd->skb);
1858 else
1859 dev_kfree_skb_any(tpd->skb);
1860 }
1861
1862next_tbrq_entry:
1863 if (tpd)
1864 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1865 he_dev->tbrq_head = (struct he_tbrq *)
1866 ((unsigned long) he_dev->tbrq_base |
1867 TBRQ_MASK(he_dev->tbrq_head + 1));
1868 }
1869
1870 if (updated) {
1871 if (updated > he_dev->tbrq_peak)
1872 he_dev->tbrq_peak = updated;
1873
1874 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1875 G0_TBRQ_H + (group * 16));
1876 }
1877}
1878
1879static void
1880he_service_rbpl(struct he_dev *he_dev, int group)
1881{
1882 struct he_rbp *new_tail;
1883 struct he_rbp *rbpl_head;
1884 struct he_buff *heb;
1885 dma_addr_t mapping;
1886 int i;
1887 int moved = 0;
1888
1889 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1890 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1891
1892 for (;;) {
1893 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1894 RBPL_MASK(he_dev->rbpl_tail+1));
1895
1896 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1897 if (new_tail == rbpl_head)
1898 break;
1899
1900 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1901 if (i > (RBPL_TABLE_SIZE - 1)) {
1902 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1903 if (i > (RBPL_TABLE_SIZE - 1))
1904 break;
1905 }
1906 he_dev->rbpl_hint = i + 1;
1907
1908 heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1909 if (!heb)
1910 break;
1911 heb->mapping = mapping;
1912 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1913 he_dev->rbpl_virt[i] = heb;
1914 set_bit(i, he_dev->rbpl_table);
1915 new_tail->idx = i << RBP_IDX_OFFSET;
1916 new_tail->phys = mapping + offsetof(struct he_buff, data);
1917
1918 he_dev->rbpl_tail = new_tail;
1919 ++moved;
1920 }
1921
1922 if (moved)
1923 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1924}
1925
1926static void
1927he_tasklet(unsigned long data)
1928{
1929 unsigned long flags;
1930 struct he_dev *he_dev = (struct he_dev *) data;
1931 int group, type;
1932 int updated = 0;
1933
1934 HPRINTK("tasklet (0x%lx)\n", data);
1935 spin_lock_irqsave(&he_dev->global_lock, flags);
1936
1937 while (he_dev->irq_head != he_dev->irq_tail) {
1938 ++updated;
1939
1940 type = ITYPE_TYPE(he_dev->irq_head->isw);
1941 group = ITYPE_GROUP(he_dev->irq_head->isw);
1942
1943 switch (type) {
1944 case ITYPE_RBRQ_THRESH:
1945 HPRINTK("rbrq%d threshold\n", group);
1946 /* fall through */
1947 case ITYPE_RBRQ_TIMER:
1948 if (he_service_rbrq(he_dev, group))
1949 he_service_rbpl(he_dev, group);
1950 break;
1951 case ITYPE_TBRQ_THRESH:
1952 HPRINTK("tbrq%d threshold\n", group);
1953 /* fall through */
1954 case ITYPE_TPD_COMPLETE:
1955 he_service_tbrq(he_dev, group);
1956 break;
1957 case ITYPE_RBPL_THRESH:
1958 he_service_rbpl(he_dev, group);
1959 break;
1960 case ITYPE_RBPS_THRESH:
1961 /* shouldn't happen unless small buffers enabled */
1962 break;
1963 case ITYPE_PHY:
1964 HPRINTK("phy interrupt\n");
1965#ifdef CONFIG_ATM_HE_USE_SUNI
1966 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1967 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1968 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1969 spin_lock_irqsave(&he_dev->global_lock, flags);
1970#endif
1971 break;
1972 case ITYPE_OTHER:
1973 switch (type|group) {
1974 case ITYPE_PARITY:
1975 hprintk("parity error\n");
1976 break;
1977 case ITYPE_ABORT:
1978 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1979 break;
1980 }
1981 break;
1982 case ITYPE_TYPE(ITYPE_INVALID):
1983 /* see 8.1.1 -- check all queues */
1984
1985 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1986
1987 he_service_rbrq(he_dev, 0);
1988 he_service_rbpl(he_dev, 0);
1989 he_service_tbrq(he_dev, 0);
1990 break;
1991 default:
1992 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1993 }
1994
1995 he_dev->irq_head->isw = ITYPE_INVALID;
1996
1997 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1998 }
1999
2000 if (updated) {
2001 if (updated > he_dev->irq_peak)
2002 he_dev->irq_peak = updated;
2003
2004 he_writel(he_dev,
2005 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2006 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2007 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2008 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2009 }
2010 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2011}
2012
2013static irqreturn_t
2014he_irq_handler(int irq, void *dev_id)
2015{
2016 unsigned long flags;
2017 struct he_dev *he_dev = (struct he_dev * )dev_id;
2018 int handled = 0;
2019
2020 if (he_dev == NULL)
2021 return IRQ_NONE;
2022
2023 spin_lock_irqsave(&he_dev->global_lock, flags);
2024
2025 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2026 (*he_dev->irq_tailoffset << 2));
2027
2028 if (he_dev->irq_tail == he_dev->irq_head) {
2029 HPRINTK("tailoffset not updated?\n");
2030 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2031 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2032 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2033 }
2034
2035#ifdef DEBUG
2036 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2037 hprintk("spurious (or shared) interrupt?\n");
2038#endif
2039
2040 if (he_dev->irq_head != he_dev->irq_tail) {
2041 handled = 1;
2042 tasklet_schedule(&he_dev->tasklet);
2043 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2044 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2045 }
2046 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2047 return IRQ_RETVAL(handled);
2048
2049}
2050
2051static __inline__ void
2052__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2053{
2054 struct he_tpdrq *new_tail;
2055
2056 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2057 tpd, cid, he_dev->tpdrq_tail);
2058
2059 /* new_tail = he_dev->tpdrq_tail; */
2060 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2061 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2062
2063 /*
2064 * check to see if we are about to set the tail == head
2065 * if true, update the head pointer from the adapter
2066 * to see if this is really the case (reading the queue
2067 * head for every enqueue would be unnecessarily slow)
2068 */
2069
2070 if (new_tail == he_dev->tpdrq_head) {
2071 he_dev->tpdrq_head = (struct he_tpdrq *)
2072 (((unsigned long)he_dev->tpdrq_base) |
2073 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2074
2075 if (new_tail == he_dev->tpdrq_head) {
2076 int slot;
2077
2078 hprintk("tpdrq full (cid 0x%x)\n", cid);
2079 /*
2080 * FIXME
2081 * push tpd onto a transmit backlog queue
2082 * after service_tbrq, service the backlog
2083 * for now, we just drop the pdu
2084 */
2085 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2086 if (tpd->iovec[slot].addr)
2087 dma_unmap_single(&he_dev->pci_dev->dev,
2088 tpd->iovec[slot].addr,
2089 tpd->iovec[slot].len & TPD_LEN_MASK,
2090 DMA_TO_DEVICE);
2091 }
2092 if (tpd->skb) {
2093 if (tpd->vcc->pop)
2094 tpd->vcc->pop(tpd->vcc, tpd->skb);
2095 else
2096 dev_kfree_skb_any(tpd->skb);
2097 atomic_inc(&tpd->vcc->stats->tx_err);
2098 }
2099 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2100 return;
2101 }
2102 }
2103
2104 /* 2.1.5 transmit packet descriptor ready queue */
2105 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2106 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2107 he_dev->tpdrq_tail->cid = cid;
2108 wmb();
2109
2110 he_dev->tpdrq_tail = new_tail;
2111
2112 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2113 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2114}
2115
2116static int
2117he_open(struct atm_vcc *vcc)
2118{
2119 unsigned long flags;
2120 struct he_dev *he_dev = HE_DEV(vcc->dev);
2121 struct he_vcc *he_vcc;
2122 int err = 0;
2123 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2124 short vpi = vcc->vpi;
2125 int vci = vcc->vci;
2126
2127 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2128 return 0;
2129
2130 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2131
2132 set_bit(ATM_VF_ADDR, &vcc->flags);
2133
2134 cid = he_mkcid(he_dev, vpi, vci);
2135
2136 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2137 if (he_vcc == NULL) {
2138 hprintk("unable to allocate he_vcc during open\n");
2139 return -ENOMEM;
2140 }
2141
2142 INIT_LIST_HEAD(&he_vcc->buffers);
2143 he_vcc->pdu_len = 0;
2144 he_vcc->rc_index = -1;
2145
2146 init_waitqueue_head(&he_vcc->rx_waitq);
2147 init_waitqueue_head(&he_vcc->tx_waitq);
2148
2149 vcc->dev_data = he_vcc;
2150
2151 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2152 int pcr_goal;
2153
2154 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2155 if (pcr_goal == 0)
2156 pcr_goal = he_dev->atm_dev->link_rate;
2157 if (pcr_goal < 0) /* means round down, technically */
2158 pcr_goal = -pcr_goal;
2159
2160 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2161
2162 switch (vcc->qos.aal) {
2163 case ATM_AAL5:
2164 tsr0_aal = TSR0_AAL5;
2165 tsr4 = TSR4_AAL5;
2166 break;
2167 case ATM_AAL0:
2168 tsr0_aal = TSR0_AAL0_SDU;
2169 tsr4 = TSR4_AAL0_SDU;
2170 break;
2171 default:
2172 err = -EINVAL;
2173 goto open_failed;
2174 }
2175
2176 spin_lock_irqsave(&he_dev->global_lock, flags);
2177 tsr0 = he_readl_tsr0(he_dev, cid);
2178 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2179
2180 if (TSR0_CONN_STATE(tsr0) != 0) {
2181 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2182 err = -EBUSY;
2183 goto open_failed;
2184 }
2185
2186 switch (vcc->qos.txtp.traffic_class) {
2187 case ATM_UBR:
2188 /* 2.3.3.1 open connection ubr */
2189
2190 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2191 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2192 break;
2193
2194 case ATM_CBR:
2195 /* 2.3.3.2 open connection cbr */
2196
2197 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2198 if ((he_dev->total_bw + pcr_goal)
2199 > (he_dev->atm_dev->link_rate * 9 / 10))
2200 {
2201 err = -EBUSY;
2202 goto open_failed;
2203 }
2204
2205 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2206
2207 /* find an unused cs_stper register */
2208 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2209 if (he_dev->cs_stper[reg].inuse == 0 ||
2210 he_dev->cs_stper[reg].pcr == pcr_goal)
2211 break;
2212
2213 if (reg == HE_NUM_CS_STPER) {
2214 err = -EBUSY;
2215 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2216 goto open_failed;
2217 }
2218
2219 he_dev->total_bw += pcr_goal;
2220
2221 he_vcc->rc_index = reg;
2222 ++he_dev->cs_stper[reg].inuse;
2223 he_dev->cs_stper[reg].pcr = pcr_goal;
2224
2225 clock = he_is622(he_dev) ? 66667000 : 50000000;
2226 period = clock / pcr_goal;
2227
2228 HPRINTK("rc_index = %d period = %d\n",
2229 reg, period);
2230
2231 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2232 CS_STPER0 + reg);
2233 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2234
2235 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2236 TSR0_RC_INDEX(reg);
2237
2238 break;
2239 default:
2240 err = -EINVAL;
2241 goto open_failed;
2242 }
2243
2244 spin_lock_irqsave(&he_dev->global_lock, flags);
2245
2246 he_writel_tsr0(he_dev, tsr0, cid);
2247 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2248 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2249 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2250 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2251 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2252
2253 he_writel_tsr3(he_dev, 0x0, cid);
2254 he_writel_tsr5(he_dev, 0x0, cid);
2255 he_writel_tsr6(he_dev, 0x0, cid);
2256 he_writel_tsr7(he_dev, 0x0, cid);
2257 he_writel_tsr8(he_dev, 0x0, cid);
2258 he_writel_tsr10(he_dev, 0x0, cid);
2259 he_writel_tsr11(he_dev, 0x0, cid);
2260 he_writel_tsr12(he_dev, 0x0, cid);
2261 he_writel_tsr13(he_dev, 0x0, cid);
2262 he_writel_tsr14(he_dev, 0x0, cid);
2263 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2264 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2265 }
2266
2267 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2268 unsigned aal;
2269
2270 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2271 &HE_VCC(vcc)->rx_waitq);
2272
2273 switch (vcc->qos.aal) {
2274 case ATM_AAL5:
2275 aal = RSR0_AAL5;
2276 break;
2277 case ATM_AAL0:
2278 aal = RSR0_RAWCELL;
2279 break;
2280 default:
2281 err = -EINVAL;
2282 goto open_failed;
2283 }
2284
2285 spin_lock_irqsave(&he_dev->global_lock, flags);
2286
2287 rsr0 = he_readl_rsr0(he_dev, cid);
2288 if (rsr0 & RSR0_OPEN_CONN) {
2289 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2290
2291 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2292 err = -EBUSY;
2293 goto open_failed;
2294 }
2295
2296 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2297 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2298 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2299 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2300
2301#ifdef USE_CHECKSUM_HW
2302 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2303 rsr0 |= RSR0_TCP_CKSUM;
2304#endif
2305
2306 he_writel_rsr4(he_dev, rsr4, cid);
2307 he_writel_rsr1(he_dev, rsr1, cid);
2308 /* 5.1.11 last parameter initialized should be
2309 the open/closed indication in rsr0 */
2310 he_writel_rsr0(he_dev,
2311 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2312 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2313
2314 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2315 }
2316
2317open_failed:
2318
2319 if (err) {
2320 kfree(he_vcc);
2321 clear_bit(ATM_VF_ADDR, &vcc->flags);
2322 }
2323 else
2324 set_bit(ATM_VF_READY, &vcc->flags);
2325
2326 return err;
2327}
2328
2329static void
2330he_close(struct atm_vcc *vcc)
2331{
2332 unsigned long flags;
2333 DECLARE_WAITQUEUE(wait, current);
2334 struct he_dev *he_dev = HE_DEV(vcc->dev);
2335 struct he_tpd *tpd;
2336 unsigned cid;
2337 struct he_vcc *he_vcc = HE_VCC(vcc);
2338#define MAX_RETRY 30
2339 int retry = 0, sleep = 1, tx_inuse;
2340
2341 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2342
2343 clear_bit(ATM_VF_READY, &vcc->flags);
2344 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2345
2346 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2347 int timeout;
2348
2349 HPRINTK("close rx cid 0x%x\n", cid);
2350
2351 /* 2.7.2.2 close receive operation */
2352
2353 /* wait for previous close (if any) to finish */
2354
2355 spin_lock_irqsave(&he_dev->global_lock, flags);
2356 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2357 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2358 udelay(250);
2359 }
2360
2361 set_current_state(TASK_UNINTERRUPTIBLE);
2362 add_wait_queue(&he_vcc->rx_waitq, &wait);
2363
2364 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2365 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2366 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2367 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2368
2369 timeout = schedule_timeout(30*HZ);
2370
2371 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2372 set_current_state(TASK_RUNNING);
2373
2374 if (timeout == 0)
2375 hprintk("close rx timeout cid 0x%x\n", cid);
2376
2377 HPRINTK("close rx cid 0x%x complete\n", cid);
2378
2379 }
2380
2381 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2382 volatile unsigned tsr4, tsr0;
2383 int timeout;
2384
2385 HPRINTK("close tx cid 0x%x\n", cid);
2386
2387 /* 2.1.2
2388 *
2389 * ... the host must first stop queueing packets to the TPDRQ
2390 * on the connection to be closed, then wait for all outstanding
2391 * packets to be transmitted and their buffers returned to the
2392 * TBRQ. When the last packet on the connection arrives in the
2393 * TBRQ, the host issues the close command to the adapter.
2394 */
2395
2396 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2397 (retry < MAX_RETRY)) {
2398 msleep(sleep);
2399 if (sleep < 250)
2400 sleep = sleep * 2;
2401
2402 ++retry;
2403 }
2404
2405 if (tx_inuse > 1)
2406 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2407
2408 /* 2.3.1.1 generic close operations with flush */
2409
2410 spin_lock_irqsave(&he_dev->global_lock, flags);
2411 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2412 /* also clears TSR4_SESSION_ENDED */
2413
2414 switch (vcc->qos.txtp.traffic_class) {
2415 case ATM_UBR:
2416 he_writel_tsr1(he_dev,
2417 TSR1_MCR(rate_to_atmf(200000))
2418 | TSR1_PCR(0), cid);
2419 break;
2420 case ATM_CBR:
2421 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2422 break;
2423 }
2424 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2425
2426 tpd = __alloc_tpd(he_dev);
2427 if (tpd == NULL) {
2428 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2429 goto close_tx_incomplete;
2430 }
2431 tpd->status |= TPD_EOS | TPD_INT;
2432 tpd->skb = NULL;
2433 tpd->vcc = vcc;
2434 wmb();
2435
2436 set_current_state(TASK_UNINTERRUPTIBLE);
2437 add_wait_queue(&he_vcc->tx_waitq, &wait);
2438 __enqueue_tpd(he_dev, tpd, cid);
2439 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2440
2441 timeout = schedule_timeout(30*HZ);
2442
2443 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2444 set_current_state(TASK_RUNNING);
2445
2446 spin_lock_irqsave(&he_dev->global_lock, flags);
2447
2448 if (timeout == 0) {
2449 hprintk("close tx timeout cid 0x%x\n", cid);
2450 goto close_tx_incomplete;
2451 }
2452
2453 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2454 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2455 udelay(250);
2456 }
2457
2458 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2459 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2460 udelay(250);
2461 }
2462
2463close_tx_incomplete:
2464
2465 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2466 int reg = he_vcc->rc_index;
2467
2468 HPRINTK("cs_stper reg = %d\n", reg);
2469
2470 if (he_dev->cs_stper[reg].inuse == 0)
2471 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2472 else
2473 --he_dev->cs_stper[reg].inuse;
2474
2475 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2476 }
2477 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2478
2479 HPRINTK("close tx cid 0x%x complete\n", cid);
2480 }
2481
2482 kfree(he_vcc);
2483
2484 clear_bit(ATM_VF_ADDR, &vcc->flags);
2485}
2486
2487static int
2488he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2489{
2490 unsigned long flags;
2491 struct he_dev *he_dev = HE_DEV(vcc->dev);
2492 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2493 struct he_tpd *tpd;
2494#ifdef USE_SCATTERGATHER
2495 int i, slot = 0;
2496#endif
2497
2498#define HE_TPD_BUFSIZE 0xffff
2499
2500 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2501
2502 if ((skb->len > HE_TPD_BUFSIZE) ||
2503 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2504 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2505 if (vcc->pop)
2506 vcc->pop(vcc, skb);
2507 else
2508 dev_kfree_skb_any(skb);
2509 atomic_inc(&vcc->stats->tx_err);
2510 return -EINVAL;
2511 }
2512
2513#ifndef USE_SCATTERGATHER
2514 if (skb_shinfo(skb)->nr_frags) {
2515 hprintk("no scatter/gather support\n");
2516 if (vcc->pop)
2517 vcc->pop(vcc, skb);
2518 else
2519 dev_kfree_skb_any(skb);
2520 atomic_inc(&vcc->stats->tx_err);
2521 return -EINVAL;
2522 }
2523#endif
2524 spin_lock_irqsave(&he_dev->global_lock, flags);
2525
2526 tpd = __alloc_tpd(he_dev);
2527 if (tpd == NULL) {
2528 if (vcc->pop)
2529 vcc->pop(vcc, skb);
2530 else
2531 dev_kfree_skb_any(skb);
2532 atomic_inc(&vcc->stats->tx_err);
2533 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2534 return -ENOMEM;
2535 }
2536
2537 if (vcc->qos.aal == ATM_AAL5)
2538 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2539 else {
2540 char *pti_clp = (void *) (skb->data + 3);
2541 int clp, pti;
2542
2543 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2544 clp = (*pti_clp & ATM_HDR_CLP);
2545 tpd->status |= TPD_CELLTYPE(pti);
2546 if (clp)
2547 tpd->status |= TPD_CLP;
2548
2549 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2550 }
2551
2552#ifdef USE_SCATTERGATHER
2553 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2554 skb_headlen(skb), DMA_TO_DEVICE);
2555 tpd->iovec[slot].len = skb_headlen(skb);
2556 ++slot;
2557
2558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2559 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2560
2561 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2562 tpd->vcc = vcc;
2563 tpd->skb = NULL; /* not the last fragment
2564 so dont ->push() yet */
2565 wmb();
2566
2567 __enqueue_tpd(he_dev, tpd, cid);
2568 tpd = __alloc_tpd(he_dev);
2569 if (tpd == NULL) {
2570 if (vcc->pop)
2571 vcc->pop(vcc, skb);
2572 else
2573 dev_kfree_skb_any(skb);
2574 atomic_inc(&vcc->stats->tx_err);
2575 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2576 return -ENOMEM;
2577 }
2578 tpd->status |= TPD_USERCELL;
2579 slot = 0;
2580 }
2581
2582 tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
2583 (void *) page_address(frag->page) + frag->page_offset,
2584 frag->size, DMA_TO_DEVICE);
2585 tpd->iovec[slot].len = frag->size;
2586 ++slot;
2587
2588 }
2589
2590 tpd->iovec[slot - 1].len |= TPD_LST;
2591#else
2592 tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2593 tpd->length0 = skb->len | TPD_LST;
2594#endif
2595 tpd->status |= TPD_INT;
2596
2597 tpd->vcc = vcc;
2598 tpd->skb = skb;
2599 wmb();
2600 ATM_SKB(skb)->vcc = vcc;
2601
2602 __enqueue_tpd(he_dev, tpd, cid);
2603 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2604
2605 atomic_inc(&vcc->stats->tx);
2606
2607 return 0;
2608}
2609
2610static int
2611he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2612{
2613 unsigned long flags;
2614 struct he_dev *he_dev = HE_DEV(atm_dev);
2615 struct he_ioctl_reg reg;
2616 int err = 0;
2617
2618 switch (cmd) {
2619 case HE_GET_REG:
2620 if (!capable(CAP_NET_ADMIN))
2621 return -EPERM;
2622
2623 if (copy_from_user(®, arg,
2624 sizeof(struct he_ioctl_reg)))
2625 return -EFAULT;
2626
2627 spin_lock_irqsave(&he_dev->global_lock, flags);
2628 switch (reg.type) {
2629 case HE_REGTYPE_PCI:
2630 if (reg.addr >= HE_REGMAP_SIZE) {
2631 err = -EINVAL;
2632 break;
2633 }
2634
2635 reg.val = he_readl(he_dev, reg.addr);
2636 break;
2637 case HE_REGTYPE_RCM:
2638 reg.val =
2639 he_readl_rcm(he_dev, reg.addr);
2640 break;
2641 case HE_REGTYPE_TCM:
2642 reg.val =
2643 he_readl_tcm(he_dev, reg.addr);
2644 break;
2645 case HE_REGTYPE_MBOX:
2646 reg.val =
2647 he_readl_mbox(he_dev, reg.addr);
2648 break;
2649 default:
2650 err = -EINVAL;
2651 break;
2652 }
2653 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2654 if (err == 0)
2655 if (copy_to_user(arg, ®,
2656 sizeof(struct he_ioctl_reg)))
2657 return -EFAULT;
2658 break;
2659 default:
2660#ifdef CONFIG_ATM_HE_USE_SUNI
2661 if (atm_dev->phy && atm_dev->phy->ioctl)
2662 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2663#else /* CONFIG_ATM_HE_USE_SUNI */
2664 err = -EINVAL;
2665#endif /* CONFIG_ATM_HE_USE_SUNI */
2666 break;
2667 }
2668
2669 return err;
2670}
2671
2672static void
2673he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2674{
2675 unsigned long flags;
2676 struct he_dev *he_dev = HE_DEV(atm_dev);
2677
2678 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2679
2680 spin_lock_irqsave(&he_dev->global_lock, flags);
2681 he_writel(he_dev, val, FRAMER + (addr*4));
2682 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2683 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2684}
2685
2686
2687static unsigned char
2688he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2689{
2690 unsigned long flags;
2691 struct he_dev *he_dev = HE_DEV(atm_dev);
2692 unsigned reg;
2693
2694 spin_lock_irqsave(&he_dev->global_lock, flags);
2695 reg = he_readl(he_dev, FRAMER + (addr*4));
2696 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2697
2698 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2699 return reg;
2700}
2701
2702static int
2703he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2704{
2705 unsigned long flags;
2706 struct he_dev *he_dev = HE_DEV(dev);
2707 int left, i;
2708#ifdef notdef
2709 struct he_rbrq *rbrq_tail;
2710 struct he_tpdrq *tpdrq_head;
2711 int rbpl_head, rbpl_tail;
2712#endif
2713 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2714
2715
2716 left = *pos;
2717 if (!left--)
2718 return sprintf(page, "ATM he driver\n");
2719
2720 if (!left--)
2721 return sprintf(page, "%s%s\n\n",
2722 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2723
2724 if (!left--)
2725 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2726
2727 spin_lock_irqsave(&he_dev->global_lock, flags);
2728 mcc += he_readl(he_dev, MCC);
2729 oec += he_readl(he_dev, OEC);
2730 dcc += he_readl(he_dev, DCC);
2731 cec += he_readl(he_dev, CEC);
2732 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2733
2734 if (!left--)
2735 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2736 mcc, oec, dcc, cec);
2737
2738 if (!left--)
2739 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2740 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2741
2742 if (!left--)
2743 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2744 CONFIG_TPDRQ_SIZE);
2745
2746 if (!left--)
2747 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2748 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2749
2750 if (!left--)
2751 return sprintf(page, "tbrq_size = %d peak = %d\n",
2752 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2753
2754
2755#ifdef notdef
2756 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2757 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2758
2759 inuse = rbpl_head - rbpl_tail;
2760 if (inuse < 0)
2761 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2762 inuse /= sizeof(struct he_rbp);
2763
2764 if (!left--)
2765 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2766 CONFIG_RBPL_SIZE, inuse);
2767#endif
2768
2769 if (!left--)
2770 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2771
2772 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2773 if (!left--)
2774 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2775 he_dev->cs_stper[i].pcr,
2776 he_dev->cs_stper[i].inuse);
2777
2778 if (!left--)
2779 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2780 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2781
2782 return 0;
2783}
2784
2785/* eeprom routines -- see 4.7 */
2786
2787static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2788{
2789 u32 val = 0, tmp_read = 0;
2790 int i, j = 0;
2791 u8 byte_read = 0;
2792
2793 val = readl(he_dev->membase + HOST_CNTL);
2794 val &= 0xFFFFE0FF;
2795
2796 /* Turn on write enable */
2797 val |= 0x800;
2798 he_writel(he_dev, val, HOST_CNTL);
2799
2800 /* Send READ instruction */
2801 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2802 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2803 udelay(EEPROM_DELAY);
2804 }
2805
2806 /* Next, we need to send the byte address to read from */
2807 for (i = 7; i >= 0; i--) {
2808 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2809 udelay(EEPROM_DELAY);
2810 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2811 udelay(EEPROM_DELAY);
2812 }
2813
2814 j = 0;
2815
2816 val &= 0xFFFFF7FF; /* Turn off write enable */
2817 he_writel(he_dev, val, HOST_CNTL);
2818
2819 /* Now, we can read data from the EEPROM by clocking it in */
2820 for (i = 7; i >= 0; i--) {
2821 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2822 udelay(EEPROM_DELAY);
2823 tmp_read = he_readl(he_dev, HOST_CNTL);
2824 byte_read |= (unsigned char)
2825 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2826 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2827 udelay(EEPROM_DELAY);
2828 }
2829
2830 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2831 udelay(EEPROM_DELAY);
2832
2833 return byte_read;
2834}
2835
2836MODULE_LICENSE("GPL");
2837MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2838MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2839module_param(disable64, bool, 0);
2840MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2841module_param(nvpibits, short, 0);
2842MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2843module_param(nvcibits, short, 0);
2844MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2845module_param(rx_skb_reserve, short, 0);
2846MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2847module_param(irq_coalesce, bool, 0);
2848MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2849module_param(sdh, bool, 0);
2850MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2851
2852static struct pci_device_id he_pci_tbl[] = {
2853 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2854 { 0, }
2855};
2856
2857MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2858
2859static struct pci_driver he_driver = {
2860 .name = "he",
2861 .probe = he_init_one,
2862 .remove = he_remove_one,
2863 .id_table = he_pci_tbl,
2864};
2865
2866module_pci_driver(he_driver);