Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2
   3  he.c
   4
   5  ForeRunnerHE ATM Adapter driver for ATM on Linux
   6  Copyright (C) 1999-2001  Naval Research Laboratory
   7
   8  This library is free software; you can redistribute it and/or
   9  modify it under the terms of the GNU Lesser General Public
  10  License as published by the Free Software Foundation; either
  11  version 2.1 of the License, or (at your option) any later version.
  12
  13  This library is distributed in the hope that it will be useful,
  14  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  Lesser General Public License for more details.
  17
  18  You should have received a copy of the GNU Lesser General Public
  19  License along with this library; if not, write to the Free Software
  20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21
  22*/
  23
  24/*
  25
  26  he.c
  27
  28  ForeRunnerHE ATM Adapter driver for ATM on Linux
  29  Copyright (C) 1999-2001  Naval Research Laboratory
  30
  31  Permission to use, copy, modify and distribute this software and its
  32  documentation is hereby granted, provided that both the copyright
  33  notice and this permission notice appear in all copies of the software,
  34  derivative works or modified versions, and any portions thereof, and
  35  that both notices appear in supporting documentation.
  36
  37  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
  38  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
  39  RESULTING FROM THE USE OF THIS SOFTWARE.
  40
  41  This driver was written using the "Programmer's Reference Manual for
  42  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
  43
  44  AUTHORS:
  45	chas williams <chas@cmf.nrl.navy.mil>
  46	eric kinzie <ekinzie@cmf.nrl.navy.mil>
  47
  48  NOTES:
  49	4096 supported 'connections'
  50	group 0 is used for all traffic
  51	interrupt queue 0 is used for all interrupts
  52	aal0 support (based on work from ulrich.u.muller@nokia.com)
  53
  54 */
  55
  56#include <linux/module.h>
  57#include <linux/kernel.h>
  58#include <linux/skbuff.h>
  59#include <linux/pci.h>
  60#include <linux/errno.h>
  61#include <linux/types.h>
  62#include <linux/string.h>
  63#include <linux/delay.h>
  64#include <linux/init.h>
  65#include <linux/mm.h>
  66#include <linux/sched.h>
  67#include <linux/timer.h>
  68#include <linux/interrupt.h>
  69#include <linux/dma-mapping.h>
  70#include <linux/bitmap.h>
  71#include <linux/slab.h>
  72#include <asm/io.h>
  73#include <asm/byteorder.h>
  74#include <linux/uaccess.h>
  75
  76#include <linux/atmdev.h>
  77#include <linux/atm.h>
  78#include <linux/sonet.h>
  79
  80#undef USE_SCATTERGATHER
  81#undef USE_CHECKSUM_HW			/* still confused about this */
  82/* #undef HE_DEBUG */
  83
  84#include "he.h"
  85#include "suni.h"
  86#include <linux/atm_he.h>
  87
  88#define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  89
  90#ifdef HE_DEBUG
  91#define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  92#else /* !HE_DEBUG */
  93#define HPRINTK(fmt,args...)	do { } while (0)
  94#endif /* HE_DEBUG */
  95
  96/* declarations */
  97
  98static int he_open(struct atm_vcc *vcc);
  99static void he_close(struct atm_vcc *vcc);
 100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
 101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
 102static irqreturn_t he_irq_handler(int irq, void *dev_id);
 103static void he_tasklet(unsigned long data);
 104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
 105static int he_start(struct atm_dev *dev);
 106static void he_stop(struct he_dev *dev);
 107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
 108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
 109
 110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
 111
 112/* globals */
 113
 114static struct he_dev *he_devs;
 115static bool disable64;
 116static short nvpibits = -1;
 117static short nvcibits = -1;
 118static short rx_skb_reserve = 16;
 119static bool irq_coalesce = true;
 120static bool sdh;
 121
 122/* Read from EEPROM = 0000 0011b */
 123static unsigned int readtab[] = {
 124	CS_HIGH | CLK_HIGH,
 125	CS_LOW | CLK_LOW,
 126	CLK_HIGH,               /* 0 */
 127	CLK_LOW,
 128	CLK_HIGH,               /* 0 */
 129	CLK_LOW,
 130	CLK_HIGH,               /* 0 */
 131	CLK_LOW,
 132	CLK_HIGH,               /* 0 */
 133	CLK_LOW,
 134	CLK_HIGH,               /* 0 */
 135	CLK_LOW,
 136	CLK_HIGH,               /* 0 */
 137	CLK_LOW | SI_HIGH,
 138	CLK_HIGH | SI_HIGH,     /* 1 */
 139	CLK_LOW | SI_HIGH,
 140	CLK_HIGH | SI_HIGH      /* 1 */
 141};     
 142 
 143/* Clock to read from/write to the EEPROM */
 144static unsigned int clocktab[] = {
 145	CLK_LOW,
 146	CLK_HIGH,
 147	CLK_LOW,
 148	CLK_HIGH,
 149	CLK_LOW,
 150	CLK_HIGH,
 151	CLK_LOW,
 152	CLK_HIGH,
 153	CLK_LOW,
 154	CLK_HIGH,
 155	CLK_LOW,
 156	CLK_HIGH,
 157	CLK_LOW,
 158	CLK_HIGH,
 159	CLK_LOW,
 160	CLK_HIGH,
 161	CLK_LOW
 162};     
 163
 164static struct atmdev_ops he_ops =
 165{
 166	.open =		he_open,
 167	.close =	he_close,	
 168	.ioctl =	he_ioctl,	
 169	.send =		he_send,
 170	.phy_put =	he_phy_put,
 171	.phy_get =	he_phy_get,
 172	.proc_read =	he_proc_read,
 173	.owner =	THIS_MODULE
 174};
 175
 176#define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
 177#define he_readl(dev, reg)		readl((dev)->membase + (reg))
 178
 179/* section 2.12 connection memory access */
 180
 181static __inline__ void
 182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
 183								unsigned flags)
 184{
 185	he_writel(he_dev, val, CON_DAT);
 186	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
 187	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
 188	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 189}
 190
 191#define he_writel_rcm(dev, val, reg) 				\
 192			he_writel_internal(dev, val, reg, CON_CTL_RCM)
 193
 194#define he_writel_tcm(dev, val, reg) 				\
 195			he_writel_internal(dev, val, reg, CON_CTL_TCM)
 196
 197#define he_writel_mbox(dev, val, reg) 				\
 198			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
 199
 200static unsigned
 201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
 202{
 203	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
 204	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 205	return he_readl(he_dev, CON_DAT);
 206}
 207
 208#define he_readl_rcm(dev, reg) \
 209			he_readl_internal(dev, reg, CON_CTL_RCM)
 210
 211#define he_readl_tcm(dev, reg) \
 212			he_readl_internal(dev, reg, CON_CTL_TCM)
 213
 214#define he_readl_mbox(dev, reg) \
 215			he_readl_internal(dev, reg, CON_CTL_MBOX)
 216
 217
 218/* figure 2.2 connection id */
 219
 220#define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
 221
 222/* 2.5.1 per connection transmit state registers */
 223
 224#define he_writel_tsr0(dev, val, cid) \
 225		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
 226#define he_readl_tsr0(dev, cid) \
 227		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
 228
 229#define he_writel_tsr1(dev, val, cid) \
 230		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
 231
 232#define he_writel_tsr2(dev, val, cid) \
 233		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
 234
 235#define he_writel_tsr3(dev, val, cid) \
 236		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
 237
 238#define he_writel_tsr4(dev, val, cid) \
 239		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
 240
 241	/* from page 2-20
 242	 *
 243	 * NOTE While the transmit connection is active, bits 23 through 0
 244	 *      of this register must not be written by the host.  Byte
 245	 *      enables should be used during normal operation when writing
 246	 *      the most significant byte.
 247	 */
 248
 249#define he_writel_tsr4_upper(dev, val, cid) \
 250		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
 251							CON_CTL_TCM \
 252							| CON_BYTE_DISABLE_2 \
 253							| CON_BYTE_DISABLE_1 \
 254							| CON_BYTE_DISABLE_0)
 255
 256#define he_readl_tsr4(dev, cid) \
 257		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
 258
 259#define he_writel_tsr5(dev, val, cid) \
 260		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
 261
 262#define he_writel_tsr6(dev, val, cid) \
 263		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
 264
 265#define he_writel_tsr7(dev, val, cid) \
 266		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
 267
 268
 269#define he_writel_tsr8(dev, val, cid) \
 270		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
 271
 272#define he_writel_tsr9(dev, val, cid) \
 273		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
 274
 275#define he_writel_tsr10(dev, val, cid) \
 276		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
 277
 278#define he_writel_tsr11(dev, val, cid) \
 279		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
 280
 281
 282#define he_writel_tsr12(dev, val, cid) \
 283		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
 284
 285#define he_writel_tsr13(dev, val, cid) \
 286		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
 287
 288
 289#define he_writel_tsr14(dev, val, cid) \
 290		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
 291
 292#define he_writel_tsr14_upper(dev, val, cid) \
 293		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
 294							CON_CTL_TCM \
 295							| CON_BYTE_DISABLE_2 \
 296							| CON_BYTE_DISABLE_1 \
 297							| CON_BYTE_DISABLE_0)
 298
 299/* 2.7.1 per connection receive state registers */
 300
 301#define he_writel_rsr0(dev, val, cid) \
 302		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
 303#define he_readl_rsr0(dev, cid) \
 304		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
 305
 306#define he_writel_rsr1(dev, val, cid) \
 307		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
 308
 309#define he_writel_rsr2(dev, val, cid) \
 310		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
 311
 312#define he_writel_rsr3(dev, val, cid) \
 313		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
 314
 315#define he_writel_rsr4(dev, val, cid) \
 316		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
 317
 318#define he_writel_rsr5(dev, val, cid) \
 319		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
 320
 321#define he_writel_rsr6(dev, val, cid) \
 322		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
 323
 324#define he_writel_rsr7(dev, val, cid) \
 325		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
 326
 327static __inline__ struct atm_vcc*
 328__find_vcc(struct he_dev *he_dev, unsigned cid)
 329{
 330	struct hlist_head *head;
 331	struct atm_vcc *vcc;
 332	struct sock *s;
 333	short vpi;
 334	int vci;
 335
 336	vpi = cid >> he_dev->vcibits;
 337	vci = cid & ((1 << he_dev->vcibits) - 1);
 338	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
 339
 340	sk_for_each(s, head) {
 341		vcc = atm_sk(s);
 342		if (vcc->dev == he_dev->atm_dev &&
 343		    vcc->vci == vci && vcc->vpi == vpi &&
 344		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
 345				return vcc;
 346		}
 347	}
 348	return NULL;
 349}
 350
 351static int he_init_one(struct pci_dev *pci_dev,
 352		       const struct pci_device_id *pci_ent)
 353{
 354	struct atm_dev *atm_dev = NULL;
 355	struct he_dev *he_dev = NULL;
 356	int err = 0;
 357
 358	printk(KERN_INFO "ATM he driver\n");
 359
 360	if (pci_enable_device(pci_dev))
 361		return -EIO;
 362	if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
 363		printk(KERN_WARNING "he: no suitable dma available\n");
 364		err = -EIO;
 365		goto init_one_failure;
 366	}
 367
 368	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
 369	if (!atm_dev) {
 370		err = -ENODEV;
 371		goto init_one_failure;
 372	}
 373	pci_set_drvdata(pci_dev, atm_dev);
 374
 375	he_dev = kzalloc(sizeof(struct he_dev),
 376							GFP_KERNEL);
 377	if (!he_dev) {
 378		err = -ENOMEM;
 379		goto init_one_failure;
 380	}
 381	he_dev->pci_dev = pci_dev;
 382	he_dev->atm_dev = atm_dev;
 383	he_dev->atm_dev->dev_data = he_dev;
 384	atm_dev->dev_data = he_dev;
 385	he_dev->number = atm_dev->number;
 386	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
 387	spin_lock_init(&he_dev->global_lock);
 388
 389	if (he_start(atm_dev)) {
 390		he_stop(he_dev);
 391		err = -ENODEV;
 392		goto init_one_failure;
 393	}
 394	he_dev->next = NULL;
 395	if (he_devs)
 396		he_dev->next = he_devs;
 397	he_devs = he_dev;
 398	return 0;
 399
 400init_one_failure:
 401	if (atm_dev)
 402		atm_dev_deregister(atm_dev);
 403	kfree(he_dev);
 404	pci_disable_device(pci_dev);
 405	return err;
 406}
 407
 408static void he_remove_one(struct pci_dev *pci_dev)
 409{
 410	struct atm_dev *atm_dev;
 411	struct he_dev *he_dev;
 412
 413	atm_dev = pci_get_drvdata(pci_dev);
 414	he_dev = HE_DEV(atm_dev);
 415
 416	/* need to remove from he_devs */
 417
 418	he_stop(he_dev);
 419	atm_dev_deregister(atm_dev);
 420	kfree(he_dev);
 421
 422	pci_disable_device(pci_dev);
 423}
 424
 425
 426static unsigned
 427rate_to_atmf(unsigned rate)		/* cps to atm forum format */
 428{
 429#define NONZERO (1 << 14)
 430
 431	unsigned exp = 0;
 432
 433	if (rate == 0)
 434		return 0;
 435
 436	rate <<= 9;
 437	while (rate > 0x3ff) {
 438		++exp;
 439		rate >>= 1;
 440	}
 441
 442	return (NONZERO | (exp << 9) | (rate & 0x1ff));
 443}
 444
 445static void he_init_rx_lbfp0(struct he_dev *he_dev)
 446{
 447	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 448	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 449	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 450	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
 451	
 452	lbufd_index = 0;
 453	lbm_offset = he_readl(he_dev, RCMLBM_BA);
 454
 455	he_writel(he_dev, lbufd_index, RLBF0_H);
 456
 457	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
 458		lbufd_index += 2;
 459		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 460
 461		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 462		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 463
 464		if (++lbuf_count == lbufs_per_row) {
 465			lbuf_count = 0;
 466			row_offset += he_dev->bytes_per_row;
 467		}
 468		lbm_offset += 4;
 469	}
 470		
 471	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
 472	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
 473}
 474
 475static void he_init_rx_lbfp1(struct he_dev *he_dev)
 476{
 477	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 478	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 479	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 480	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
 481	
 482	lbufd_index = 1;
 483	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 484
 485	he_writel(he_dev, lbufd_index, RLBF1_H);
 486
 487	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
 488		lbufd_index += 2;
 489		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 490
 491		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 492		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 493
 494		if (++lbuf_count == lbufs_per_row) {
 495			lbuf_count = 0;
 496			row_offset += he_dev->bytes_per_row;
 497		}
 498		lbm_offset += 4;
 499	}
 500		
 501	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
 502	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
 503}
 504
 505static void he_init_tx_lbfp(struct he_dev *he_dev)
 506{
 507	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 508	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 509	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 510	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
 511	
 512	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
 513	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 514
 515	he_writel(he_dev, lbufd_index, TLBF_H);
 516
 517	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
 518		lbufd_index += 1;
 519		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 520
 521		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 522		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 523
 524		if (++lbuf_count == lbufs_per_row) {
 525			lbuf_count = 0;
 526			row_offset += he_dev->bytes_per_row;
 527		}
 528		lbm_offset += 2;
 529	}
 530		
 531	he_writel(he_dev, lbufd_index - 1, TLBF_T);
 532}
 533
 534static int he_init_tpdrq(struct he_dev *he_dev)
 535{
 536	he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 537						 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
 538						 &he_dev->tpdrq_phys, GFP_KERNEL);
 539	if (he_dev->tpdrq_base == NULL) {
 540		hprintk("failed to alloc tpdrq\n");
 541		return -ENOMEM;
 542	}
 543
 544	he_dev->tpdrq_tail = he_dev->tpdrq_base;
 545	he_dev->tpdrq_head = he_dev->tpdrq_base;
 546
 547	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
 548	he_writel(he_dev, 0, TPDRQ_T);	
 549	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
 550
 551	return 0;
 552}
 553
 554static void he_init_cs_block(struct he_dev *he_dev)
 555{
 556	unsigned clock, rate, delta;
 557	int reg;
 558
 559	/* 5.1.7 cs block initialization */
 560
 561	for (reg = 0; reg < 0x20; ++reg)
 562		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
 563
 564	/* rate grid timer reload values */
 565
 566	clock = he_is622(he_dev) ? 66667000 : 50000000;
 567	rate = he_dev->atm_dev->link_rate;
 568	delta = rate / 16 / 2;
 569
 570	for (reg = 0; reg < 0x10; ++reg) {
 571		/* 2.4 internal transmit function
 572		 *
 573	 	 * we initialize the first row in the rate grid.
 574		 * values are period (in clock cycles) of timer
 575		 */
 576		unsigned period = clock / rate;
 577
 578		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
 579		rate -= delta;
 580	}
 581
 582	if (he_is622(he_dev)) {
 583		/* table 5.2 (4 cells per lbuf) */
 584		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
 585		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
 586		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
 587		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
 588		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
 589
 590		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 591		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
 592		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
 593		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
 594		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 595		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
 596		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
 597
 598		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 599
 600		/* table 5.8 */
 601		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
 602		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
 603		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
 604		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
 605		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
 606		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
 607
 608		/* table 5.9 */
 609		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
 610		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
 611	} else {
 612		/* table 5.1 (4 cells per lbuf) */
 613		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
 614		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
 615		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
 616		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
 617		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
 618
 619		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 620		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
 621		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
 622		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
 623		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 624		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
 625		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
 626
 627		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 628
 629		/* table 5.8 */
 630		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
 631		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
 632		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
 633		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
 634		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
 635		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
 636
 637		/* table 5.9 */
 638		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
 639		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
 640	}
 641
 642	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
 643
 644	for (reg = 0; reg < 0x8; ++reg)
 645		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
 646
 647}
 648
 649static int he_init_cs_block_rcm(struct he_dev *he_dev)
 650{
 651	unsigned (*rategrid)[16][16];
 652	unsigned rate, delta;
 653	int i, j, reg;
 654
 655	unsigned rate_atmf, exp, man;
 656	unsigned long long rate_cps;
 657	int mult, buf, buf_limit = 4;
 658
 659	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
 660	if (!rategrid)
 661		return -ENOMEM;
 662
 663	/* initialize rate grid group table */
 664
 665	for (reg = 0x0; reg < 0xff; ++reg)
 666		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 667
 668	/* initialize rate controller groups */
 669
 670	for (reg = 0x100; reg < 0x1ff; ++reg)
 671		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 672	
 673	/* initialize tNrm lookup table */
 674
 675	/* the manual makes reference to a routine in a sample driver
 676	   for proper configuration; fortunately, we only need this
 677	   in order to support abr connection */
 678	
 679	/* initialize rate to group table */
 680
 681	rate = he_dev->atm_dev->link_rate;
 682	delta = rate / 32;
 683
 684	/*
 685	 * 2.4 transmit internal functions
 686	 * 
 687	 * we construct a copy of the rate grid used by the scheduler
 688	 * in order to construct the rate to group table below
 689	 */
 690
 691	for (j = 0; j < 16; j++) {
 692		(*rategrid)[0][j] = rate;
 693		rate -= delta;
 694	}
 695
 696	for (i = 1; i < 16; i++)
 697		for (j = 0; j < 16; j++)
 698			if (i > 14)
 699				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
 700			else
 701				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
 702
 703	/*
 704	 * 2.4 transmit internal function
 705	 *
 706	 * this table maps the upper 5 bits of exponent and mantissa
 707	 * of the atm forum representation of the rate into an index
 708	 * on rate grid  
 709	 */
 710
 711	rate_atmf = 0;
 712	while (rate_atmf < 0x400) {
 713		man = (rate_atmf & 0x1f) << 4;
 714		exp = rate_atmf >> 5;
 715
 716		/* 
 717			instead of '/ 512', use '>> 9' to prevent a call
 718			to divdu3 on x86 platforms
 719		*/
 720		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
 721
 722		if (rate_cps < 10)
 723			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
 724
 725		for (i = 255; i > 0; i--)
 726			if ((*rategrid)[i/16][i%16] >= rate_cps)
 727				break;	 /* pick nearest rate instead? */
 728
 729		/*
 730		 * each table entry is 16 bits: (rate grid index (8 bits)
 731		 * and a buffer limit (8 bits)
 732		 * there are two table entries in each 32-bit register
 733		 */
 734
 735#ifdef notdef
 736		buf = rate_cps * he_dev->tx_numbuffs /
 737				(he_dev->atm_dev->link_rate * 2);
 738#else
 739		/* this is pretty, but avoids _divdu3 and is mostly correct */
 740		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
 741		if (rate_cps > (272 * mult))
 742			buf = 4;
 743		else if (rate_cps > (204 * mult))
 744			buf = 3;
 745		else if (rate_cps > (136 * mult))
 746			buf = 2;
 747		else if (rate_cps > (68 * mult))
 748			buf = 1;
 749		else
 750			buf = 0;
 751#endif
 752		if (buf > buf_limit)
 753			buf = buf_limit;
 754		reg = (reg << 16) | ((i << 8) | buf);
 755
 756#define RTGTBL_OFFSET 0x400
 757	  
 758		if (rate_atmf & 0x1)
 759			he_writel_rcm(he_dev, reg,
 760				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
 761
 762		++rate_atmf;
 763	}
 764
 765	kfree(rategrid);
 766	return 0;
 767}
 768
 769static int he_init_group(struct he_dev *he_dev, int group)
 770{
 771	struct he_buff *heb, *next;
 772	dma_addr_t mapping;
 773	int i;
 774
 775	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
 776	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
 777	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
 778	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
 779		  G0_RBPS_BS + (group * 32));
 780
 781	/* bitmap table */
 782	he_dev->rbpl_table = kmalloc_array(BITS_TO_LONGS(RBPL_TABLE_SIZE),
 783					   sizeof(*he_dev->rbpl_table),
 784					   GFP_KERNEL);
 785	if (!he_dev->rbpl_table) {
 786		hprintk("unable to allocate rbpl bitmap table\n");
 787		return -ENOMEM;
 788	}
 789	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
 790
 791	/* rbpl_virt 64-bit pointers */
 792	he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
 793					  sizeof(*he_dev->rbpl_virt),
 794					  GFP_KERNEL);
 795	if (!he_dev->rbpl_virt) {
 796		hprintk("unable to allocate rbpl virt table\n");
 797		goto out_free_rbpl_table;
 798	}
 799
 800	/* large buffer pool */
 801	he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
 802					    CONFIG_RBPL_BUFSIZE, 64, 0);
 803	if (he_dev->rbpl_pool == NULL) {
 804		hprintk("unable to create rbpl pool\n");
 805		goto out_free_rbpl_virt;
 806	}
 807
 808	he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 809						CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
 810						&he_dev->rbpl_phys, GFP_KERNEL);
 811	if (he_dev->rbpl_base == NULL) {
 812		hprintk("failed to alloc rbpl_base\n");
 813		goto out_destroy_rbpl_pool;
 814	}
 815
 816	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
 817
 818	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
 819
 820		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
 821		if (!heb)
 822			goto out_free_rbpl;
 823		heb->mapping = mapping;
 824		list_add(&heb->entry, &he_dev->rbpl_outstanding);
 825
 826		set_bit(i, he_dev->rbpl_table);
 827		he_dev->rbpl_virt[i] = heb;
 828		he_dev->rbpl_hint = i + 1;
 829		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
 830		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
 831	}
 832	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
 833
 834	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
 835	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
 836						G0_RBPL_T + (group * 32));
 837	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
 838						G0_RBPL_BS + (group * 32));
 839	he_writel(he_dev,
 840			RBP_THRESH(CONFIG_RBPL_THRESH) |
 841			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
 842			RBP_INT_ENB,
 843						G0_RBPL_QI + (group * 32));
 844
 845	/* rx buffer ready queue */
 846
 847	he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 848						CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
 849						&he_dev->rbrq_phys, GFP_KERNEL);
 850	if (he_dev->rbrq_base == NULL) {
 851		hprintk("failed to allocate rbrq\n");
 852		goto out_free_rbpl;
 853	}
 854
 855	he_dev->rbrq_head = he_dev->rbrq_base;
 856	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
 857	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
 858	he_writel(he_dev,
 859		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
 860						G0_RBRQ_Q + (group * 16));
 861	if (irq_coalesce) {
 862		hprintk("coalescing interrupts\n");
 863		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
 864						G0_RBRQ_I + (group * 16));
 865	} else
 866		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
 867						G0_RBRQ_I + (group * 16));
 868
 869	/* tx buffer ready queue */
 870
 871	he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 872						CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
 873						&he_dev->tbrq_phys, GFP_KERNEL);
 874	if (he_dev->tbrq_base == NULL) {
 875		hprintk("failed to allocate tbrq\n");
 876		goto out_free_rbpq_base;
 877	}
 878
 879	he_dev->tbrq_head = he_dev->tbrq_base;
 880
 881	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
 882	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
 883	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
 884	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
 885
 886	return 0;
 887
 888out_free_rbpq_base:
 889	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
 890			  sizeof(struct he_rbrq), he_dev->rbrq_base,
 891			  he_dev->rbrq_phys);
 892out_free_rbpl:
 893	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
 894		dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 895
 896	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
 897			  sizeof(struct he_rbp), he_dev->rbpl_base,
 898			  he_dev->rbpl_phys);
 899out_destroy_rbpl_pool:
 900	dma_pool_destroy(he_dev->rbpl_pool);
 901out_free_rbpl_virt:
 902	kfree(he_dev->rbpl_virt);
 903out_free_rbpl_table:
 904	kfree(he_dev->rbpl_table);
 905
 906	return -ENOMEM;
 907}
 908
 909static int he_init_irq(struct he_dev *he_dev)
 910{
 911	int i;
 912
 913	/* 2.9.3.5  tail offset for each interrupt queue is located after the
 914		    end of the interrupt queue */
 915
 916	he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 917					       (CONFIG_IRQ_SIZE + 1)
 918					       * sizeof(struct he_irq),
 919					       &he_dev->irq_phys,
 920					       GFP_KERNEL);
 921	if (he_dev->irq_base == NULL) {
 922		hprintk("failed to allocate irq\n");
 923		return -ENOMEM;
 924	}
 925	he_dev->irq_tailoffset = (unsigned *)
 926					&he_dev->irq_base[CONFIG_IRQ_SIZE];
 927	*he_dev->irq_tailoffset = 0;
 928	he_dev->irq_head = he_dev->irq_base;
 929	he_dev->irq_tail = he_dev->irq_base;
 930
 931	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
 932		he_dev->irq_base[i].isw = ITYPE_INVALID;
 933
 934	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
 935	he_writel(he_dev,
 936		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
 937								IRQ0_HEAD);
 938	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
 939	he_writel(he_dev, 0x0, IRQ0_DATA);
 940
 941	he_writel(he_dev, 0x0, IRQ1_BASE);
 942	he_writel(he_dev, 0x0, IRQ1_HEAD);
 943	he_writel(he_dev, 0x0, IRQ1_CNTL);
 944	he_writel(he_dev, 0x0, IRQ1_DATA);
 945
 946	he_writel(he_dev, 0x0, IRQ2_BASE);
 947	he_writel(he_dev, 0x0, IRQ2_HEAD);
 948	he_writel(he_dev, 0x0, IRQ2_CNTL);
 949	he_writel(he_dev, 0x0, IRQ2_DATA);
 950
 951	he_writel(he_dev, 0x0, IRQ3_BASE);
 952	he_writel(he_dev, 0x0, IRQ3_HEAD);
 953	he_writel(he_dev, 0x0, IRQ3_CNTL);
 954	he_writel(he_dev, 0x0, IRQ3_DATA);
 955
 956	/* 2.9.3.2 interrupt queue mapping registers */
 957
 958	he_writel(he_dev, 0x0, GRP_10_MAP);
 959	he_writel(he_dev, 0x0, GRP_32_MAP);
 960	he_writel(he_dev, 0x0, GRP_54_MAP);
 961	he_writel(he_dev, 0x0, GRP_76_MAP);
 962
 963	if (request_irq(he_dev->pci_dev->irq,
 964			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
 965		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
 966		return -EINVAL;
 967	}   
 968
 969	he_dev->irq = he_dev->pci_dev->irq;
 970
 971	return 0;
 972}
 973
 974static int he_start(struct atm_dev *dev)
 975{
 976	struct he_dev *he_dev;
 977	struct pci_dev *pci_dev;
 978	unsigned long membase;
 979
 980	u16 command;
 981	u32 gen_cntl_0, host_cntl, lb_swap;
 982	u8 cache_size, timer;
 983	
 984	unsigned err;
 985	unsigned int status, reg;
 986	int i, group;
 987
 988	he_dev = HE_DEV(dev);
 989	pci_dev = he_dev->pci_dev;
 990
 991	membase = pci_resource_start(pci_dev, 0);
 992	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
 993
 994	/*
 995	 * pci bus controller initialization 
 996	 */
 997
 998	/* 4.3 pci bus controller-specific initialization */
 999	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1000		hprintk("can't read GEN_CNTL_0\n");
1001		return -EINVAL;
1002	}
1003	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1004	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1005		hprintk("can't write GEN_CNTL_0.\n");
1006		return -EINVAL;
1007	}
1008
1009	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1010		hprintk("can't read PCI_COMMAND.\n");
1011		return -EINVAL;
1012	}
1013
1014	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1015	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1016		hprintk("can't enable memory.\n");
1017		return -EINVAL;
1018	}
1019
1020	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1021		hprintk("can't read cache line size?\n");
1022		return -EINVAL;
1023	}
1024
1025	if (cache_size < 16) {
1026		cache_size = 16;
1027		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1028			hprintk("can't set cache line size to %d\n", cache_size);
1029	}
1030
1031	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1032		hprintk("can't read latency timer?\n");
1033		return -EINVAL;
1034	}
1035
1036	/* from table 3.9
1037	 *
1038	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1039	 * 
1040	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1041	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1042	 *
1043	 */ 
1044#define LAT_TIMER 209
1045	if (timer < LAT_TIMER) {
1046		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1047		timer = LAT_TIMER;
1048		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1049			hprintk("can't set latency timer to %d\n", timer);
1050	}
1051
1052	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1053		hprintk("can't set up page mapping\n");
1054		return -EINVAL;
1055	}
1056
1057	/* 4.4 card reset */
1058	he_writel(he_dev, 0x0, RESET_CNTL);
1059	he_writel(he_dev, 0xff, RESET_CNTL);
1060
1061	msleep(16);	/* 16 ms */
1062	status = he_readl(he_dev, RESET_CNTL);
1063	if ((status & BOARD_RST_STATUS) == 0) {
1064		hprintk("reset failed\n");
1065		return -EINVAL;
1066	}
1067
1068	/* 4.5 set bus width */
1069	host_cntl = he_readl(he_dev, HOST_CNTL);
1070	if (host_cntl & PCI_BUS_SIZE64)
1071		gen_cntl_0 |= ENBL_64;
1072	else
1073		gen_cntl_0 &= ~ENBL_64;
1074
1075	if (disable64 == 1) {
1076		hprintk("disabling 64-bit pci bus transfers\n");
1077		gen_cntl_0 &= ~ENBL_64;
1078	}
1079
1080	if (gen_cntl_0 & ENBL_64)
1081		hprintk("64-bit transfers enabled\n");
1082
1083	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1084
1085	/* 4.7 read prom contents */
1086	for (i = 0; i < PROD_ID_LEN; ++i)
1087		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1088
1089	he_dev->media = read_prom_byte(he_dev, MEDIA);
1090
1091	for (i = 0; i < 6; ++i)
1092		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1093
1094	hprintk("%s%s, %pM\n", he_dev->prod_id,
1095		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1096	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1097						ATM_OC12_PCR : ATM_OC3_PCR;
1098
1099	/* 4.6 set host endianess */
1100	lb_swap = he_readl(he_dev, LB_SWAP);
1101	if (he_is622(he_dev))
1102		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1103	else
1104		lb_swap |= XFER_SIZE;		/* 8 cells */
1105#ifdef __BIG_ENDIAN
1106	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1107#else
1108	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1109			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1110#endif /* __BIG_ENDIAN */
1111	he_writel(he_dev, lb_swap, LB_SWAP);
1112
1113	/* 4.8 sdram controller initialization */
1114	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1115
1116	/* 4.9 initialize rnum value */
1117	lb_swap |= SWAP_RNUM_MAX(0xf);
1118	he_writel(he_dev, lb_swap, LB_SWAP);
1119
1120	/* 4.10 initialize the interrupt queues */
1121	if ((err = he_init_irq(he_dev)) != 0)
1122		return err;
1123
1124	/* 4.11 enable pci bus controller state machines */
1125	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1126				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1127	he_writel(he_dev, host_cntl, HOST_CNTL);
1128
1129	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1130	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1131
1132	/*
1133	 * atm network controller initialization
1134	 */
1135
1136	/* 5.1.1 generic configuration state */
1137
1138	/*
1139	 *		local (cell) buffer memory map
1140	 *                    
1141	 *             HE155                          HE622
1142	 *                                                      
1143	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1144	 *         |            |            |                   |   |
1145	 *         |  utility   |            |        rx0        |   |
1146	 *        5|____________|         255|___________________| u |
1147	 *        6|            |         256|                   | t |
1148	 *         |            |            |                   | i |
1149	 *         |    rx0     |     row    |        tx         | l |
1150	 *         |            |            |                   | i |
1151	 *         |            |         767|___________________| t |
1152	 *      517|____________|         768|                   | y |
1153	 * row  518|            |            |        rx1        |   |
1154	 *         |            |        1023|___________________|___|
1155	 *         |            |
1156	 *         |    tx      |
1157	 *         |            |
1158	 *         |            |
1159	 *     1535|____________|
1160	 *     1536|            |
1161	 *         |    rx1     |
1162	 *     2047|____________|
1163	 *
1164	 */
1165
1166	/* total 4096 connections */
1167	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1168	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1169
1170	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1171		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1172		return -ENODEV;
1173	}
1174
1175	if (nvpibits != -1) {
1176		he_dev->vpibits = nvpibits;
1177		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1178	}
1179
1180	if (nvcibits != -1) {
1181		he_dev->vcibits = nvcibits;
1182		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1183	}
1184
1185
1186	if (he_is622(he_dev)) {
1187		he_dev->cells_per_row = 40;
1188		he_dev->bytes_per_row = 2048;
1189		he_dev->r0_numrows = 256;
1190		he_dev->tx_numrows = 512;
1191		he_dev->r1_numrows = 256;
1192		he_dev->r0_startrow = 0;
1193		he_dev->tx_startrow = 256;
1194		he_dev->r1_startrow = 768;
1195	} else {
1196		he_dev->cells_per_row = 20;
1197		he_dev->bytes_per_row = 1024;
1198		he_dev->r0_numrows = 512;
1199		he_dev->tx_numrows = 1018;
1200		he_dev->r1_numrows = 512;
1201		he_dev->r0_startrow = 6;
1202		he_dev->tx_startrow = 518;
1203		he_dev->r1_startrow = 1536;
1204	}
1205
1206	he_dev->cells_per_lbuf = 4;
1207	he_dev->buffer_limit = 4;
1208	he_dev->r0_numbuffs = he_dev->r0_numrows *
1209				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1210	if (he_dev->r0_numbuffs > 2560)
1211		he_dev->r0_numbuffs = 2560;
1212
1213	he_dev->r1_numbuffs = he_dev->r1_numrows *
1214				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1215	if (he_dev->r1_numbuffs > 2560)
1216		he_dev->r1_numbuffs = 2560;
1217
1218	he_dev->tx_numbuffs = he_dev->tx_numrows *
1219				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1220	if (he_dev->tx_numbuffs > 5120)
1221		he_dev->tx_numbuffs = 5120;
1222
1223	/* 5.1.2 configure hardware dependent registers */
1224
1225	he_writel(he_dev, 
1226		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1227		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1228		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1229		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1230								LBARB);
1231
1232	he_writel(he_dev, BANK_ON |
1233		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1234								SDRAMCON);
1235
1236	he_writel(he_dev,
1237		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1238						RM_RW_WAIT(1), RCMCONFIG);
1239	he_writel(he_dev,
1240		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1241						TM_RW_WAIT(1), TCMCONFIG);
1242
1243	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1244
1245	he_writel(he_dev, 
1246		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1247		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1248		RX_VALVP(he_dev->vpibits) |
1249		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1250
1251	he_writel(he_dev, DRF_THRESH(0x20) |
1252		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1253		TX_VCI_MASK(he_dev->vcibits) |
1254		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1255
1256	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1257
1258	he_writel(he_dev, PHY_INT_ENB |
1259		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1260								RH_CONFIG);
1261
1262	/* 5.1.3 initialize connection memory */
1263
1264	for (i = 0; i < TCM_MEM_SIZE; ++i)
1265		he_writel_tcm(he_dev, 0, i);
1266
1267	for (i = 0; i < RCM_MEM_SIZE; ++i)
1268		he_writel_rcm(he_dev, 0, i);
1269
1270	/*
1271	 *	transmit connection memory map
1272	 *
1273	 *                  tx memory
1274	 *          0x0 ___________________
1275	 *             |                   |
1276	 *             |                   |
1277	 *             |       TSRa        |
1278	 *             |                   |
1279	 *             |                   |
1280	 *       0x8000|___________________|
1281	 *             |                   |
1282	 *             |       TSRb        |
1283	 *       0xc000|___________________|
1284	 *             |                   |
1285	 *             |       TSRc        |
1286	 *       0xe000|___________________|
1287	 *             |       TSRd        |
1288	 *       0xf000|___________________|
1289	 *             |       tmABR       |
1290	 *      0x10000|___________________|
1291	 *             |                   |
1292	 *             |       tmTPD       |
1293	 *             |___________________|
1294	 *             |                   |
1295	 *                      ....
1296	 *      0x1ffff|___________________|
1297	 *
1298	 *
1299	 */
1300
1301	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1302	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1303	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1304	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1305	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1306
1307
1308	/*
1309	 *	receive connection memory map
1310	 *
1311	 *          0x0 ___________________
1312	 *             |                   |
1313	 *             |                   |
1314	 *             |       RSRa        |
1315	 *             |                   |
1316	 *             |                   |
1317	 *       0x8000|___________________|
1318	 *             |                   |
1319	 *             |             rx0/1 |
1320	 *             |       LBM         |   link lists of local
1321	 *             |             tx    |   buffer memory 
1322	 *             |                   |
1323	 *       0xd000|___________________|
1324	 *             |                   |
1325	 *             |      rmABR        |
1326	 *       0xe000|___________________|
1327	 *             |                   |
1328	 *             |       RSRb        |
1329	 *             |___________________|
1330	 *             |                   |
1331	 *                      ....
1332	 *       0xffff|___________________|
1333	 */
1334
1335	he_writel(he_dev, 0x08000, RCMLBM_BA);
1336	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1337	he_writel(he_dev, 0x0d800, RCMABR_BA);
1338
1339	/* 5.1.4 initialize local buffer free pools linked lists */
1340
1341	he_init_rx_lbfp0(he_dev);
1342	he_init_rx_lbfp1(he_dev);
1343
1344	he_writel(he_dev, 0x0, RLBC_H);
1345	he_writel(he_dev, 0x0, RLBC_T);
1346	he_writel(he_dev, 0x0, RLBC_H2);
1347
1348	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1349	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1350
1351	he_init_tx_lbfp(he_dev);
1352
1353	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1354
1355	/* 5.1.5 initialize intermediate receive queues */
1356
1357	if (he_is622(he_dev)) {
1358		he_writel(he_dev, 0x000f, G0_INMQ_S);
1359		he_writel(he_dev, 0x200f, G0_INMQ_L);
1360
1361		he_writel(he_dev, 0x001f, G1_INMQ_S);
1362		he_writel(he_dev, 0x201f, G1_INMQ_L);
1363
1364		he_writel(he_dev, 0x002f, G2_INMQ_S);
1365		he_writel(he_dev, 0x202f, G2_INMQ_L);
1366
1367		he_writel(he_dev, 0x003f, G3_INMQ_S);
1368		he_writel(he_dev, 0x203f, G3_INMQ_L);
1369
1370		he_writel(he_dev, 0x004f, G4_INMQ_S);
1371		he_writel(he_dev, 0x204f, G4_INMQ_L);
1372
1373		he_writel(he_dev, 0x005f, G5_INMQ_S);
1374		he_writel(he_dev, 0x205f, G5_INMQ_L);
1375
1376		he_writel(he_dev, 0x006f, G6_INMQ_S);
1377		he_writel(he_dev, 0x206f, G6_INMQ_L);
1378
1379		he_writel(he_dev, 0x007f, G7_INMQ_S);
1380		he_writel(he_dev, 0x207f, G7_INMQ_L);
1381	} else {
1382		he_writel(he_dev, 0x0000, G0_INMQ_S);
1383		he_writel(he_dev, 0x0008, G0_INMQ_L);
1384
1385		he_writel(he_dev, 0x0001, G1_INMQ_S);
1386		he_writel(he_dev, 0x0009, G1_INMQ_L);
1387
1388		he_writel(he_dev, 0x0002, G2_INMQ_S);
1389		he_writel(he_dev, 0x000a, G2_INMQ_L);
1390
1391		he_writel(he_dev, 0x0003, G3_INMQ_S);
1392		he_writel(he_dev, 0x000b, G3_INMQ_L);
1393
1394		he_writel(he_dev, 0x0004, G4_INMQ_S);
1395		he_writel(he_dev, 0x000c, G4_INMQ_L);
1396
1397		he_writel(he_dev, 0x0005, G5_INMQ_S);
1398		he_writel(he_dev, 0x000d, G5_INMQ_L);
1399
1400		he_writel(he_dev, 0x0006, G6_INMQ_S);
1401		he_writel(he_dev, 0x000e, G6_INMQ_L);
1402
1403		he_writel(he_dev, 0x0007, G7_INMQ_S);
1404		he_writel(he_dev, 0x000f, G7_INMQ_L);
1405	}
1406
1407	/* 5.1.6 application tunable parameters */
1408
1409	he_writel(he_dev, 0x0, MCC);
1410	he_writel(he_dev, 0x0, OEC);
1411	he_writel(he_dev, 0x0, DCC);
1412	he_writel(he_dev, 0x0, CEC);
1413	
1414	/* 5.1.7 cs block initialization */
1415
1416	he_init_cs_block(he_dev);
1417
1418	/* 5.1.8 cs block connection memory initialization */
1419	
1420	if (he_init_cs_block_rcm(he_dev) < 0)
1421		return -ENOMEM;
1422
1423	/* 5.1.10 initialize host structures */
1424
1425	he_init_tpdrq(he_dev);
1426
1427	he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1428					   sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1429	if (he_dev->tpd_pool == NULL) {
1430		hprintk("unable to create tpd dma_pool\n");
1431		return -ENOMEM;         
1432	}
1433
1434	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1435
1436	if (he_init_group(he_dev, 0) != 0)
1437		return -ENOMEM;
1438
1439	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1440		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1441		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1442		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1443		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1444						G0_RBPS_BS + (group * 32));
1445
1446		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1447		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1448		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1449						G0_RBPL_QI + (group * 32));
1450		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1451
1452		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1453		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1454		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1455						G0_RBRQ_Q + (group * 16));
1456		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1457
1458		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1459		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1460		he_writel(he_dev, TBRQ_THRESH(0x1),
1461						G0_TBRQ_THRESH + (group * 16));
1462		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1463	}
1464
1465	/* host status page */
1466
1467	he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
1468					  sizeof(struct he_hsp),
1469					  &he_dev->hsp_phys, GFP_KERNEL);
1470	if (he_dev->hsp == NULL) {
1471		hprintk("failed to allocate host status page\n");
1472		return -ENOMEM;
1473	}
1474	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1475
1476	/* initialize framer */
1477
1478#ifdef CONFIG_ATM_HE_USE_SUNI
1479	if (he_isMM(he_dev))
1480		suni_init(he_dev->atm_dev);
1481	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1482		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1483#endif /* CONFIG_ATM_HE_USE_SUNI */
1484
1485	if (sdh) {
1486		/* this really should be in suni.c but for now... */
1487		int val;
1488
1489		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1490		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1491		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1492		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1493	}
1494
1495	/* 5.1.12 enable transmit and receive */
1496
1497	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1498	reg |= TX_ENABLE|ER_ENABLE;
1499	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1500
1501	reg = he_readl(he_dev, RC_CONFIG);
1502	reg |= RX_ENABLE;
1503	he_writel(he_dev, reg, RC_CONFIG);
1504
1505	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1506		he_dev->cs_stper[i].inuse = 0;
1507		he_dev->cs_stper[i].pcr = -1;
1508	}
1509	he_dev->total_bw = 0;
1510
1511
1512	/* atm linux initialization */
1513
1514	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1515	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1516
1517	he_dev->irq_peak = 0;
1518	he_dev->rbrq_peak = 0;
1519	he_dev->rbpl_peak = 0;
1520	he_dev->tbrq_peak = 0;
1521
1522	HPRINTK("hell bent for leather!\n");
1523
1524	return 0;
1525}
1526
1527static void
1528he_stop(struct he_dev *he_dev)
1529{
1530	struct he_buff *heb, *next;
1531	struct pci_dev *pci_dev;
1532	u32 gen_cntl_0, reg;
1533	u16 command;
1534
1535	pci_dev = he_dev->pci_dev;
1536
1537	/* disable interrupts */
1538
1539	if (he_dev->membase) {
1540		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1541		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1542		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1543
1544		tasklet_disable(&he_dev->tasklet);
1545
1546		/* disable recv and transmit */
1547
1548		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1549		reg &= ~(TX_ENABLE|ER_ENABLE);
1550		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1551
1552		reg = he_readl(he_dev, RC_CONFIG);
1553		reg &= ~(RX_ENABLE);
1554		he_writel(he_dev, reg, RC_CONFIG);
1555	}
1556
1557#ifdef CONFIG_ATM_HE_USE_SUNI
1558	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1559		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1560#endif /* CONFIG_ATM_HE_USE_SUNI */
1561
1562	if (he_dev->irq)
1563		free_irq(he_dev->irq, he_dev);
1564
1565	if (he_dev->irq_base)
1566		dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1567				  * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1568
1569	if (he_dev->hsp)
1570		dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1571				  he_dev->hsp, he_dev->hsp_phys);
1572
1573	if (he_dev->rbpl_base) {
1574		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1575			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1576
1577		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1578				  * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1579	}
1580
1581	kfree(he_dev->rbpl_virt);
1582	kfree(he_dev->rbpl_table);
1583	dma_pool_destroy(he_dev->rbpl_pool);
1584
1585	if (he_dev->rbrq_base)
1586		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1587				  he_dev->rbrq_base, he_dev->rbrq_phys);
1588
1589	if (he_dev->tbrq_base)
1590		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1591				  he_dev->tbrq_base, he_dev->tbrq_phys);
1592
1593	if (he_dev->tpdrq_base)
1594		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1595				  he_dev->tpdrq_base, he_dev->tpdrq_phys);
1596
1597	dma_pool_destroy(he_dev->tpd_pool);
1598
1599	if (he_dev->pci_dev) {
1600		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1601		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1602		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1603	}
1604	
1605	if (he_dev->membase)
1606		iounmap(he_dev->membase);
1607}
1608
1609static struct he_tpd *
1610__alloc_tpd(struct he_dev *he_dev)
1611{
1612	struct he_tpd *tpd;
1613	dma_addr_t mapping;
1614
1615	tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1616	if (tpd == NULL)
1617		return NULL;
1618			
1619	tpd->status = TPD_ADDR(mapping);
1620	tpd->reserved = 0; 
1621	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1622	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1623	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1624
1625	return tpd;
1626}
1627
1628#define AAL5_LEN(buf,len) 						\
1629			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1630				(((unsigned char *)(buf))[(len)-5]))
1631
1632/* 2.10.1.2 receive
1633 *
1634 * aal5 packets can optionally return the tcp checksum in the lower
1635 * 16 bits of the crc (RSR0_TCP_CKSUM)
1636 */
1637
1638#define TCP_CKSUM(buf,len) 						\
1639			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1640				(((unsigned char *)(buf))[(len-1)]))
1641
1642static int
1643he_service_rbrq(struct he_dev *he_dev, int group)
1644{
1645	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1646				((unsigned long)he_dev->rbrq_base |
1647					he_dev->hsp->group[group].rbrq_tail);
1648	unsigned cid, lastcid = -1;
1649	struct sk_buff *skb;
1650	struct atm_vcc *vcc = NULL;
1651	struct he_vcc *he_vcc;
1652	struct he_buff *heb, *next;
1653	int i;
1654	int pdus_assembled = 0;
1655	int updated = 0;
1656
1657	read_lock(&vcc_sklist_lock);
1658	while (he_dev->rbrq_head != rbrq_tail) {
1659		++updated;
1660
1661		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1662			he_dev->rbrq_head, group,
1663			RBRQ_ADDR(he_dev->rbrq_head),
1664			RBRQ_BUFLEN(he_dev->rbrq_head),
1665			RBRQ_CID(he_dev->rbrq_head),
1666			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1667			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1668			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1669			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1670			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1671			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1672
1673		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1674		heb = he_dev->rbpl_virt[i];
1675
1676		cid = RBRQ_CID(he_dev->rbrq_head);
1677		if (cid != lastcid)
1678			vcc = __find_vcc(he_dev, cid);
1679		lastcid = cid;
1680
1681		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1682			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1683			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1684				clear_bit(i, he_dev->rbpl_table);
1685				list_del(&heb->entry);
1686				dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1687			}
1688					
1689			goto next_rbrq_entry;
1690		}
1691
1692		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1693			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1694				atomic_inc(&vcc->stats->rx_drop);
1695			goto return_host_buffers;
1696		}
1697
1698		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1699		clear_bit(i, he_dev->rbpl_table);
1700		list_move_tail(&heb->entry, &he_vcc->buffers);
1701		he_vcc->pdu_len += heb->len;
1702
1703		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1704			lastcid = -1;
1705			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1706			wake_up(&he_vcc->rx_waitq);
1707			goto return_host_buffers;
1708		}
1709
1710		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1711			goto next_rbrq_entry;
1712
1713		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1714				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1715			HPRINTK("%s%s (%d.%d)\n",
1716				RBRQ_CRC_ERR(he_dev->rbrq_head)
1717							? "CRC_ERR " : "",
1718				RBRQ_LEN_ERR(he_dev->rbrq_head)
1719							? "LEN_ERR" : "",
1720							vcc->vpi, vcc->vci);
1721			atomic_inc(&vcc->stats->rx_err);
1722			goto return_host_buffers;
1723		}
1724
1725		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1726							GFP_ATOMIC);
1727		if (!skb) {
1728			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1729			goto return_host_buffers;
1730		}
1731
1732		if (rx_skb_reserve > 0)
1733			skb_reserve(skb, rx_skb_reserve);
1734
1735		__net_timestamp(skb);
1736
1737		list_for_each_entry(heb, &he_vcc->buffers, entry)
1738			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1739
1740		switch (vcc->qos.aal) {
1741			case ATM_AAL0:
1742				/* 2.10.1.5 raw cell receive */
1743				skb->len = ATM_AAL0_SDU;
1744				skb_set_tail_pointer(skb, skb->len);
1745				break;
1746			case ATM_AAL5:
1747				/* 2.10.1.2 aal5 receive */
1748
1749				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1750				skb_set_tail_pointer(skb, skb->len);
1751#ifdef USE_CHECKSUM_HW
1752				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1753					skb->ip_summed = CHECKSUM_COMPLETE;
1754					skb->csum = TCP_CKSUM(skb->data,
1755							he_vcc->pdu_len);
1756				}
1757#endif
1758				break;
1759		}
1760
1761#ifdef should_never_happen
1762		if (skb->len > vcc->qos.rxtp.max_sdu)
1763			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1764#endif
1765
1766#ifdef notdef
1767		ATM_SKB(skb)->vcc = vcc;
1768#endif
1769		spin_unlock(&he_dev->global_lock);
1770		vcc->push(vcc, skb);
1771		spin_lock(&he_dev->global_lock);
1772
1773		atomic_inc(&vcc->stats->rx);
1774
1775return_host_buffers:
1776		++pdus_assembled;
1777
1778		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1779			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1780		INIT_LIST_HEAD(&he_vcc->buffers);
1781		he_vcc->pdu_len = 0;
1782
1783next_rbrq_entry:
1784		he_dev->rbrq_head = (struct he_rbrq *)
1785				((unsigned long) he_dev->rbrq_base |
1786					RBRQ_MASK(he_dev->rbrq_head + 1));
1787
1788	}
1789	read_unlock(&vcc_sklist_lock);
1790
1791	if (updated) {
1792		if (updated > he_dev->rbrq_peak)
1793			he_dev->rbrq_peak = updated;
1794
1795		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1796						G0_RBRQ_H + (group * 16));
1797	}
1798
1799	return pdus_assembled;
1800}
1801
1802static void
1803he_service_tbrq(struct he_dev *he_dev, int group)
1804{
1805	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1806				((unsigned long)he_dev->tbrq_base |
1807					he_dev->hsp->group[group].tbrq_tail);
1808	struct he_tpd *tpd;
1809	int slot, updated = 0;
1810	struct he_tpd *__tpd;
1811
1812	/* 2.1.6 transmit buffer return queue */
1813
1814	while (he_dev->tbrq_head != tbrq_tail) {
1815		++updated;
1816
1817		HPRINTK("tbrq%d 0x%x%s%s\n",
1818			group,
1819			TBRQ_TPD(he_dev->tbrq_head), 
1820			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1821			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1822		tpd = NULL;
1823		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1824			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1825				tpd = __tpd;
1826				list_del(&__tpd->entry);
1827				break;
1828			}
1829		}
1830
1831		if (tpd == NULL) {
1832			hprintk("unable to locate tpd for dma buffer %x\n",
1833						TBRQ_TPD(he_dev->tbrq_head));
1834			goto next_tbrq_entry;
1835		}
1836
1837		if (TBRQ_EOS(he_dev->tbrq_head)) {
1838			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1839				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1840			if (tpd->vcc)
1841				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1842
1843			goto next_tbrq_entry;
1844		}
1845
1846		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1847			if (tpd->iovec[slot].addr)
1848				dma_unmap_single(&he_dev->pci_dev->dev,
1849					tpd->iovec[slot].addr,
1850					tpd->iovec[slot].len & TPD_LEN_MASK,
1851							DMA_TO_DEVICE);
1852			if (tpd->iovec[slot].len & TPD_LST)
1853				break;
1854				
1855		}
1856
1857		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1858			if (tpd->vcc && tpd->vcc->pop)
1859				tpd->vcc->pop(tpd->vcc, tpd->skb);
1860			else
1861				dev_kfree_skb_any(tpd->skb);
1862		}
1863
1864next_tbrq_entry:
1865		if (tpd)
1866			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1867		he_dev->tbrq_head = (struct he_tbrq *)
1868				((unsigned long) he_dev->tbrq_base |
1869					TBRQ_MASK(he_dev->tbrq_head + 1));
1870	}
1871
1872	if (updated) {
1873		if (updated > he_dev->tbrq_peak)
1874			he_dev->tbrq_peak = updated;
1875
1876		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1877						G0_TBRQ_H + (group * 16));
1878	}
1879}
1880
1881static void
1882he_service_rbpl(struct he_dev *he_dev, int group)
1883{
1884	struct he_rbp *new_tail;
1885	struct he_rbp *rbpl_head;
1886	struct he_buff *heb;
1887	dma_addr_t mapping;
1888	int i;
1889	int moved = 0;
1890
1891	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1892					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1893
1894	for (;;) {
1895		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1896						RBPL_MASK(he_dev->rbpl_tail+1));
1897
1898		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1899		if (new_tail == rbpl_head)
1900			break;
1901
1902		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1903		if (i > (RBPL_TABLE_SIZE - 1)) {
1904			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1905			if (i > (RBPL_TABLE_SIZE - 1))
1906				break;
1907		}
1908		he_dev->rbpl_hint = i + 1;
1909
1910		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1911		if (!heb)
1912			break;
1913		heb->mapping = mapping;
1914		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1915		he_dev->rbpl_virt[i] = heb;
1916		set_bit(i, he_dev->rbpl_table);
1917		new_tail->idx = i << RBP_IDX_OFFSET;
1918		new_tail->phys = mapping + offsetof(struct he_buff, data);
1919
1920		he_dev->rbpl_tail = new_tail;
1921		++moved;
1922	} 
1923
1924	if (moved)
1925		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1926}
1927
1928static void
1929he_tasklet(unsigned long data)
1930{
1931	unsigned long flags;
1932	struct he_dev *he_dev = (struct he_dev *) data;
1933	int group, type;
1934	int updated = 0;
1935
1936	HPRINTK("tasklet (0x%lx)\n", data);
1937	spin_lock_irqsave(&he_dev->global_lock, flags);
1938
1939	while (he_dev->irq_head != he_dev->irq_tail) {
1940		++updated;
1941
1942		type = ITYPE_TYPE(he_dev->irq_head->isw);
1943		group = ITYPE_GROUP(he_dev->irq_head->isw);
1944
1945		switch (type) {
1946			case ITYPE_RBRQ_THRESH:
1947				HPRINTK("rbrq%d threshold\n", group);
1948				/* fall through */
1949			case ITYPE_RBRQ_TIMER:
1950				if (he_service_rbrq(he_dev, group))
1951					he_service_rbpl(he_dev, group);
1952				break;
1953			case ITYPE_TBRQ_THRESH:
1954				HPRINTK("tbrq%d threshold\n", group);
1955				/* fall through */
1956			case ITYPE_TPD_COMPLETE:
1957				he_service_tbrq(he_dev, group);
1958				break;
1959			case ITYPE_RBPL_THRESH:
1960				he_service_rbpl(he_dev, group);
1961				break;
1962			case ITYPE_RBPS_THRESH:
1963				/* shouldn't happen unless small buffers enabled */
1964				break;
1965			case ITYPE_PHY:
1966				HPRINTK("phy interrupt\n");
1967#ifdef CONFIG_ATM_HE_USE_SUNI
1968				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1969				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1970					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1971				spin_lock_irqsave(&he_dev->global_lock, flags);
1972#endif
1973				break;
1974			case ITYPE_OTHER:
1975				switch (type|group) {
1976					case ITYPE_PARITY:
1977						hprintk("parity error\n");
1978						break;
1979					case ITYPE_ABORT:
1980						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1981						break;
1982				}
1983				break;
1984			case ITYPE_TYPE(ITYPE_INVALID):
1985				/* see 8.1.1 -- check all queues */
1986
1987				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1988
1989				he_service_rbrq(he_dev, 0);
1990				he_service_rbpl(he_dev, 0);
1991				he_service_tbrq(he_dev, 0);
1992				break;
1993			default:
1994				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1995		}
1996
1997		he_dev->irq_head->isw = ITYPE_INVALID;
1998
1999		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2000	}
2001
2002	if (updated) {
2003		if (updated > he_dev->irq_peak)
2004			he_dev->irq_peak = updated;
2005
2006		he_writel(he_dev,
2007			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2008			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2009			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2010		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2011	}
2012	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2013}
2014
2015static irqreturn_t
2016he_irq_handler(int irq, void *dev_id)
2017{
2018	unsigned long flags;
2019	struct he_dev *he_dev = (struct he_dev * )dev_id;
2020	int handled = 0;
2021
2022	if (he_dev == NULL)
2023		return IRQ_NONE;
2024
2025	spin_lock_irqsave(&he_dev->global_lock, flags);
2026
2027	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2028						(*he_dev->irq_tailoffset << 2));
2029
2030	if (he_dev->irq_tail == he_dev->irq_head) {
2031		HPRINTK("tailoffset not updated?\n");
2032		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2033			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2034		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2035	}
2036
2037#ifdef DEBUG
2038	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2039		hprintk("spurious (or shared) interrupt?\n");
2040#endif
2041
2042	if (he_dev->irq_head != he_dev->irq_tail) {
2043		handled = 1;
2044		tasklet_schedule(&he_dev->tasklet);
2045		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2046		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2047	}
2048	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2049	return IRQ_RETVAL(handled);
2050
2051}
2052
2053static __inline__ void
2054__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2055{
2056	struct he_tpdrq *new_tail;
2057
2058	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2059					tpd, cid, he_dev->tpdrq_tail);
2060
2061	/* new_tail = he_dev->tpdrq_tail; */
2062	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2063					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2064
2065	/*
2066	 * check to see if we are about to set the tail == head
2067	 * if true, update the head pointer from the adapter
2068	 * to see if this is really the case (reading the queue
2069	 * head for every enqueue would be unnecessarily slow)
2070	 */
2071
2072	if (new_tail == he_dev->tpdrq_head) {
2073		he_dev->tpdrq_head = (struct he_tpdrq *)
2074			(((unsigned long)he_dev->tpdrq_base) |
2075				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2076
2077		if (new_tail == he_dev->tpdrq_head) {
2078			int slot;
2079
2080			hprintk("tpdrq full (cid 0x%x)\n", cid);
2081			/*
2082			 * FIXME
2083			 * push tpd onto a transmit backlog queue
2084			 * after service_tbrq, service the backlog
2085			 * for now, we just drop the pdu
2086			 */
2087			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2088				if (tpd->iovec[slot].addr)
2089					dma_unmap_single(&he_dev->pci_dev->dev,
2090						tpd->iovec[slot].addr,
2091						tpd->iovec[slot].len & TPD_LEN_MASK,
2092								DMA_TO_DEVICE);
2093			}
2094			if (tpd->skb) {
2095				if (tpd->vcc->pop)
2096					tpd->vcc->pop(tpd->vcc, tpd->skb);
2097				else
2098					dev_kfree_skb_any(tpd->skb);
2099				atomic_inc(&tpd->vcc->stats->tx_err);
2100			}
2101			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2102			return;
2103		}
2104	}
2105
2106	/* 2.1.5 transmit packet descriptor ready queue */
2107	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2108	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2109	he_dev->tpdrq_tail->cid = cid;
2110	wmb();
2111
2112	he_dev->tpdrq_tail = new_tail;
2113
2114	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2115	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2116}
2117
2118static int
2119he_open(struct atm_vcc *vcc)
2120{
2121	unsigned long flags;
2122	struct he_dev *he_dev = HE_DEV(vcc->dev);
2123	struct he_vcc *he_vcc;
2124	int err = 0;
2125	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2126	short vpi = vcc->vpi;
2127	int vci = vcc->vci;
2128
2129	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2130		return 0;
2131
2132	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2133
2134	set_bit(ATM_VF_ADDR, &vcc->flags);
2135
2136	cid = he_mkcid(he_dev, vpi, vci);
2137
2138	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2139	if (he_vcc == NULL) {
2140		hprintk("unable to allocate he_vcc during open\n");
2141		return -ENOMEM;
2142	}
2143
2144	INIT_LIST_HEAD(&he_vcc->buffers);
2145	he_vcc->pdu_len = 0;
2146	he_vcc->rc_index = -1;
2147
2148	init_waitqueue_head(&he_vcc->rx_waitq);
2149	init_waitqueue_head(&he_vcc->tx_waitq);
2150
2151	vcc->dev_data = he_vcc;
2152
2153	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2154		int pcr_goal;
2155
2156		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2157		if (pcr_goal == 0)
2158			pcr_goal = he_dev->atm_dev->link_rate;
2159		if (pcr_goal < 0)	/* means round down, technically */
2160			pcr_goal = -pcr_goal;
2161
2162		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2163
2164		switch (vcc->qos.aal) {
2165			case ATM_AAL5:
2166				tsr0_aal = TSR0_AAL5;
2167				tsr4 = TSR4_AAL5;
2168				break;
2169			case ATM_AAL0:
2170				tsr0_aal = TSR0_AAL0_SDU;
2171				tsr4 = TSR4_AAL0_SDU;
2172				break;
2173			default:
2174				err = -EINVAL;
2175				goto open_failed;
2176		}
2177
2178		spin_lock_irqsave(&he_dev->global_lock, flags);
2179		tsr0 = he_readl_tsr0(he_dev, cid);
2180		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2181
2182		if (TSR0_CONN_STATE(tsr0) != 0) {
2183			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2184			err = -EBUSY;
2185			goto open_failed;
2186		}
2187
2188		switch (vcc->qos.txtp.traffic_class) {
2189			case ATM_UBR:
2190				/* 2.3.3.1 open connection ubr */
2191
2192				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2193					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2194				break;
2195
2196			case ATM_CBR:
2197				/* 2.3.3.2 open connection cbr */
2198
2199				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2200				if ((he_dev->total_bw + pcr_goal)
2201					> (he_dev->atm_dev->link_rate * 9 / 10))
2202				{
2203					err = -EBUSY;
2204					goto open_failed;
2205				}
2206
2207				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2208
2209				/* find an unused cs_stper register */
2210				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2211					if (he_dev->cs_stper[reg].inuse == 0 || 
2212					    he_dev->cs_stper[reg].pcr == pcr_goal)
2213							break;
2214
2215				if (reg == HE_NUM_CS_STPER) {
2216					err = -EBUSY;
2217					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2218					goto open_failed;
2219				}
2220
2221				he_dev->total_bw += pcr_goal;
2222
2223				he_vcc->rc_index = reg;
2224				++he_dev->cs_stper[reg].inuse;
2225				he_dev->cs_stper[reg].pcr = pcr_goal;
2226
2227				clock = he_is622(he_dev) ? 66667000 : 50000000;
2228				period = clock / pcr_goal;
2229				
2230				HPRINTK("rc_index = %d period = %d\n",
2231								reg, period);
2232
2233				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2234							CS_STPER0 + reg);
2235				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2236
2237				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2238							TSR0_RC_INDEX(reg);
2239
2240				break;
2241			default:
2242				err = -EINVAL;
2243				goto open_failed;
2244		}
2245
2246		spin_lock_irqsave(&he_dev->global_lock, flags);
2247
2248		he_writel_tsr0(he_dev, tsr0, cid);
2249		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2250		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2251					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2252		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2253		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2254
2255		he_writel_tsr3(he_dev, 0x0, cid);
2256		he_writel_tsr5(he_dev, 0x0, cid);
2257		he_writel_tsr6(he_dev, 0x0, cid);
2258		he_writel_tsr7(he_dev, 0x0, cid);
2259		he_writel_tsr8(he_dev, 0x0, cid);
2260		he_writel_tsr10(he_dev, 0x0, cid);
2261		he_writel_tsr11(he_dev, 0x0, cid);
2262		he_writel_tsr12(he_dev, 0x0, cid);
2263		he_writel_tsr13(he_dev, 0x0, cid);
2264		he_writel_tsr14(he_dev, 0x0, cid);
2265		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2266		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2267	}
2268
2269	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2270		unsigned aal;
2271
2272		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2273		 				&HE_VCC(vcc)->rx_waitq);
2274
2275		switch (vcc->qos.aal) {
2276			case ATM_AAL5:
2277				aal = RSR0_AAL5;
2278				break;
2279			case ATM_AAL0:
2280				aal = RSR0_RAWCELL;
2281				break;
2282			default:
2283				err = -EINVAL;
2284				goto open_failed;
2285		}
2286
2287		spin_lock_irqsave(&he_dev->global_lock, flags);
2288
2289		rsr0 = he_readl_rsr0(he_dev, cid);
2290		if (rsr0 & RSR0_OPEN_CONN) {
2291			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2292
2293			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2294			err = -EBUSY;
2295			goto open_failed;
2296		}
2297
2298		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2299		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2300		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2301				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2302
2303#ifdef USE_CHECKSUM_HW
2304		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2305			rsr0 |= RSR0_TCP_CKSUM;
2306#endif
2307
2308		he_writel_rsr4(he_dev, rsr4, cid);
2309		he_writel_rsr1(he_dev, rsr1, cid);
2310		/* 5.1.11 last parameter initialized should be
2311			  the open/closed indication in rsr0 */
2312		he_writel_rsr0(he_dev,
2313			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2314		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2315
2316		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2317	}
2318
2319open_failed:
2320
2321	if (err) {
2322		kfree(he_vcc);
2323		clear_bit(ATM_VF_ADDR, &vcc->flags);
2324	}
2325	else
2326		set_bit(ATM_VF_READY, &vcc->flags);
2327
2328	return err;
2329}
2330
2331static void
2332he_close(struct atm_vcc *vcc)
2333{
2334	unsigned long flags;
2335	DECLARE_WAITQUEUE(wait, current);
2336	struct he_dev *he_dev = HE_DEV(vcc->dev);
2337	struct he_tpd *tpd;
2338	unsigned cid;
2339	struct he_vcc *he_vcc = HE_VCC(vcc);
2340#define MAX_RETRY 30
2341	int retry = 0, sleep = 1, tx_inuse;
2342
2343	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2344
2345	clear_bit(ATM_VF_READY, &vcc->flags);
2346	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2347
2348	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2349		int timeout;
2350
2351		HPRINTK("close rx cid 0x%x\n", cid);
2352
2353		/* 2.7.2.2 close receive operation */
2354
2355		/* wait for previous close (if any) to finish */
2356
2357		spin_lock_irqsave(&he_dev->global_lock, flags);
2358		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2359			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2360			udelay(250);
2361		}
2362
2363		set_current_state(TASK_UNINTERRUPTIBLE);
2364		add_wait_queue(&he_vcc->rx_waitq, &wait);
2365
2366		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2367		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2368		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2369		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2370
2371		timeout = schedule_timeout(30*HZ);
2372
2373		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2374		set_current_state(TASK_RUNNING);
2375
2376		if (timeout == 0)
2377			hprintk("close rx timeout cid 0x%x\n", cid);
2378
2379		HPRINTK("close rx cid 0x%x complete\n", cid);
2380
2381	}
2382
2383	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2384		volatile unsigned tsr4, tsr0;
2385		int timeout;
2386
2387		HPRINTK("close tx cid 0x%x\n", cid);
2388		
2389		/* 2.1.2
2390		 *
2391		 * ... the host must first stop queueing packets to the TPDRQ
2392		 * on the connection to be closed, then wait for all outstanding
2393		 * packets to be transmitted and their buffers returned to the
2394		 * TBRQ. When the last packet on the connection arrives in the
2395		 * TBRQ, the host issues the close command to the adapter.
2396		 */
2397
2398		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2399		       (retry < MAX_RETRY)) {
2400			msleep(sleep);
2401			if (sleep < 250)
2402				sleep = sleep * 2;
2403
2404			++retry;
2405		}
2406
2407		if (tx_inuse > 1)
2408			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2409
2410		/* 2.3.1.1 generic close operations with flush */
2411
2412		spin_lock_irqsave(&he_dev->global_lock, flags);
2413		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2414					/* also clears TSR4_SESSION_ENDED */
2415
2416		switch (vcc->qos.txtp.traffic_class) {
2417			case ATM_UBR:
2418				he_writel_tsr1(he_dev, 
2419					TSR1_MCR(rate_to_atmf(200000))
2420					| TSR1_PCR(0), cid);
2421				break;
2422			case ATM_CBR:
2423				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2424				break;
2425		}
2426		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2427
2428		tpd = __alloc_tpd(he_dev);
2429		if (tpd == NULL) {
2430			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2431			goto close_tx_incomplete;
2432		}
2433		tpd->status |= TPD_EOS | TPD_INT;
2434		tpd->skb = NULL;
2435		tpd->vcc = vcc;
2436		wmb();
2437
2438		set_current_state(TASK_UNINTERRUPTIBLE);
2439		add_wait_queue(&he_vcc->tx_waitq, &wait);
2440		__enqueue_tpd(he_dev, tpd, cid);
2441		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2442
2443		timeout = schedule_timeout(30*HZ);
2444
2445		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2446		set_current_state(TASK_RUNNING);
2447
2448		spin_lock_irqsave(&he_dev->global_lock, flags);
2449
2450		if (timeout == 0) {
2451			hprintk("close tx timeout cid 0x%x\n", cid);
2452			goto close_tx_incomplete;
2453		}
2454
2455		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2456			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2457			udelay(250);
2458		}
2459
2460		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2461			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2462			udelay(250);
2463		}
2464
2465close_tx_incomplete:
2466
2467		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2468			int reg = he_vcc->rc_index;
2469
2470			HPRINTK("cs_stper reg = %d\n", reg);
2471
2472			if (he_dev->cs_stper[reg].inuse == 0)
2473				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2474			else
2475				--he_dev->cs_stper[reg].inuse;
2476
2477			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2478		}
2479		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2480
2481		HPRINTK("close tx cid 0x%x complete\n", cid);
2482	}
2483
2484	kfree(he_vcc);
2485
2486	clear_bit(ATM_VF_ADDR, &vcc->flags);
2487}
2488
2489static int
2490he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2491{
2492	unsigned long flags;
2493	struct he_dev *he_dev = HE_DEV(vcc->dev);
2494	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2495	struct he_tpd *tpd;
2496#ifdef USE_SCATTERGATHER
2497	int i, slot = 0;
2498#endif
2499
2500#define HE_TPD_BUFSIZE 0xffff
2501
2502	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2503
2504	if ((skb->len > HE_TPD_BUFSIZE) ||
2505	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2506		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2507		if (vcc->pop)
2508			vcc->pop(vcc, skb);
2509		else
2510			dev_kfree_skb_any(skb);
2511		atomic_inc(&vcc->stats->tx_err);
2512		return -EINVAL;
2513	}
2514
2515#ifndef USE_SCATTERGATHER
2516	if (skb_shinfo(skb)->nr_frags) {
2517		hprintk("no scatter/gather support\n");
2518		if (vcc->pop)
2519			vcc->pop(vcc, skb);
2520		else
2521			dev_kfree_skb_any(skb);
2522		atomic_inc(&vcc->stats->tx_err);
2523		return -EINVAL;
2524	}
2525#endif
2526	spin_lock_irqsave(&he_dev->global_lock, flags);
2527
2528	tpd = __alloc_tpd(he_dev);
2529	if (tpd == NULL) {
2530		if (vcc->pop)
2531			vcc->pop(vcc, skb);
2532		else
2533			dev_kfree_skb_any(skb);
2534		atomic_inc(&vcc->stats->tx_err);
2535		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2536		return -ENOMEM;
2537	}
2538
2539	if (vcc->qos.aal == ATM_AAL5)
2540		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2541	else {
2542		char *pti_clp = (void *) (skb->data + 3);
2543		int clp, pti;
2544
2545		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2546		clp = (*pti_clp & ATM_HDR_CLP);
2547		tpd->status |= TPD_CELLTYPE(pti);
2548		if (clp)
2549			tpd->status |= TPD_CLP;
2550
2551		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2552	}
2553
2554#ifdef USE_SCATTERGATHER
2555	tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2556				skb_headlen(skb), DMA_TO_DEVICE);
2557	tpd->iovec[slot].len = skb_headlen(skb);
2558	++slot;
2559
2560	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562
2563		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2564			tpd->vcc = vcc;
2565			tpd->skb = NULL;	/* not the last fragment
2566						   so dont ->push() yet */
2567			wmb();
2568
2569			__enqueue_tpd(he_dev, tpd, cid);
2570			tpd = __alloc_tpd(he_dev);
2571			if (tpd == NULL) {
2572				if (vcc->pop)
2573					vcc->pop(vcc, skb);
2574				else
2575					dev_kfree_skb_any(skb);
2576				atomic_inc(&vcc->stats->tx_err);
2577				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2578				return -ENOMEM;
2579			}
2580			tpd->status |= TPD_USERCELL;
2581			slot = 0;
2582		}
2583
2584		tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
2585			(void *) page_address(frag->page) + frag->page_offset,
2586				frag->size, DMA_TO_DEVICE);
2587		tpd->iovec[slot].len = frag->size;
2588		++slot;
2589
2590	}
2591
2592	tpd->iovec[slot - 1].len |= TPD_LST;
2593#else
2594	tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2595	tpd->length0 = skb->len | TPD_LST;
2596#endif
2597	tpd->status |= TPD_INT;
2598
2599	tpd->vcc = vcc;
2600	tpd->skb = skb;
2601	wmb();
2602	ATM_SKB(skb)->vcc = vcc;
2603
2604	__enqueue_tpd(he_dev, tpd, cid);
2605	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2606
2607	atomic_inc(&vcc->stats->tx);
2608
2609	return 0;
2610}
2611
2612static int
2613he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2614{
2615	unsigned long flags;
2616	struct he_dev *he_dev = HE_DEV(atm_dev);
2617	struct he_ioctl_reg reg;
2618	int err = 0;
2619
2620	switch (cmd) {
2621		case HE_GET_REG:
2622			if (!capable(CAP_NET_ADMIN))
2623				return -EPERM;
2624
2625			if (copy_from_user(&reg, arg,
2626					   sizeof(struct he_ioctl_reg)))
2627				return -EFAULT;
2628
2629			spin_lock_irqsave(&he_dev->global_lock, flags);
2630			switch (reg.type) {
2631				case HE_REGTYPE_PCI:
2632					if (reg.addr >= HE_REGMAP_SIZE) {
2633						err = -EINVAL;
2634						break;
2635					}
2636
2637					reg.val = he_readl(he_dev, reg.addr);
2638					break;
2639				case HE_REGTYPE_RCM:
2640					reg.val =
2641						he_readl_rcm(he_dev, reg.addr);
2642					break;
2643				case HE_REGTYPE_TCM:
2644					reg.val =
2645						he_readl_tcm(he_dev, reg.addr);
2646					break;
2647				case HE_REGTYPE_MBOX:
2648					reg.val =
2649						he_readl_mbox(he_dev, reg.addr);
2650					break;
2651				default:
2652					err = -EINVAL;
2653					break;
2654			}
2655			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2656			if (err == 0)
2657				if (copy_to_user(arg, &reg,
2658							sizeof(struct he_ioctl_reg)))
2659					return -EFAULT;
2660			break;
2661		default:
2662#ifdef CONFIG_ATM_HE_USE_SUNI
2663			if (atm_dev->phy && atm_dev->phy->ioctl)
2664				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2665#else /* CONFIG_ATM_HE_USE_SUNI */
2666			err = -EINVAL;
2667#endif /* CONFIG_ATM_HE_USE_SUNI */
2668			break;
2669	}
2670
2671	return err;
2672}
2673
2674static void
2675he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2676{
2677	unsigned long flags;
2678	struct he_dev *he_dev = HE_DEV(atm_dev);
2679
2680	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2681
2682	spin_lock_irqsave(&he_dev->global_lock, flags);
2683	he_writel(he_dev, val, FRAMER + (addr*4));
2684	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2685	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2686}
2687 
2688	
2689static unsigned char
2690he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2691{ 
2692	unsigned long flags;
2693	struct he_dev *he_dev = HE_DEV(atm_dev);
2694	unsigned reg;
2695
2696	spin_lock_irqsave(&he_dev->global_lock, flags);
2697	reg = he_readl(he_dev, FRAMER + (addr*4));
2698	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2699
2700	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2701	return reg;
2702}
2703
2704static int
2705he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2706{
2707	unsigned long flags;
2708	struct he_dev *he_dev = HE_DEV(dev);
2709	int left, i;
2710#ifdef notdef
2711	struct he_rbrq *rbrq_tail;
2712	struct he_tpdrq *tpdrq_head;
2713	int rbpl_head, rbpl_tail;
2714#endif
2715	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2716
2717
2718	left = *pos;
2719	if (!left--)
2720		return sprintf(page, "ATM he driver\n");
2721
2722	if (!left--)
2723		return sprintf(page, "%s%s\n\n",
2724			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2725
2726	if (!left--)
2727		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2728
2729	spin_lock_irqsave(&he_dev->global_lock, flags);
2730	mcc += he_readl(he_dev, MCC);
2731	oec += he_readl(he_dev, OEC);
2732	dcc += he_readl(he_dev, DCC);
2733	cec += he_readl(he_dev, CEC);
2734	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2735
2736	if (!left--)
2737		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2738							mcc, oec, dcc, cec);
2739
2740	if (!left--)
2741		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2742				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2743
2744	if (!left--)
2745		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2746						CONFIG_TPDRQ_SIZE);
2747
2748	if (!left--)
2749		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2750				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2751
2752	if (!left--)
2753		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2754					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2755
2756
2757#ifdef notdef
2758	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2759	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2760
2761	inuse = rbpl_head - rbpl_tail;
2762	if (inuse < 0)
2763		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2764	inuse /= sizeof(struct he_rbp);
2765
2766	if (!left--)
2767		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2768						CONFIG_RBPL_SIZE, inuse);
2769#endif
2770
2771	if (!left--)
2772		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2773
2774	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2775		if (!left--)
2776			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2777						he_dev->cs_stper[i].pcr,
2778						he_dev->cs_stper[i].inuse);
2779
2780	if (!left--)
2781		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2782			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2783
2784	return 0;
2785}
2786
2787/* eeprom routines  -- see 4.7 */
2788
2789static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2790{
2791	u32 val = 0, tmp_read = 0;
2792	int i, j = 0;
2793	u8 byte_read = 0;
2794
2795	val = readl(he_dev->membase + HOST_CNTL);
2796	val &= 0xFFFFE0FF;
2797       
2798	/* Turn on write enable */
2799	val |= 0x800;
2800	he_writel(he_dev, val, HOST_CNTL);
2801       
2802	/* Send READ instruction */
2803	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2804		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2805		udelay(EEPROM_DELAY);
2806	}
2807       
2808	/* Next, we need to send the byte address to read from */
2809	for (i = 7; i >= 0; i--) {
2810		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2811		udelay(EEPROM_DELAY);
2812		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2813		udelay(EEPROM_DELAY);
2814	}
2815       
2816	j = 0;
2817
2818	val &= 0xFFFFF7FF;      /* Turn off write enable */
2819	he_writel(he_dev, val, HOST_CNTL);
2820       
2821	/* Now, we can read data from the EEPROM by clocking it in */
2822	for (i = 7; i >= 0; i--) {
2823		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2824		udelay(EEPROM_DELAY);
2825		tmp_read = he_readl(he_dev, HOST_CNTL);
2826		byte_read |= (unsigned char)
2827			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2828		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2829		udelay(EEPROM_DELAY);
2830	}
2831       
2832	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2833	udelay(EEPROM_DELAY);
2834
2835	return byte_read;
2836}
2837
2838MODULE_LICENSE("GPL");
2839MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2840MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2841module_param(disable64, bool, 0);
2842MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2843module_param(nvpibits, short, 0);
2844MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2845module_param(nvcibits, short, 0);
2846MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2847module_param(rx_skb_reserve, short, 0);
2848MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2849module_param(irq_coalesce, bool, 0);
2850MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2851module_param(sdh, bool, 0);
2852MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2853
2854static struct pci_device_id he_pci_tbl[] = {
2855	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2856	{ 0, }
2857};
2858
2859MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2860
2861static struct pci_driver he_driver = {
2862	.name =		"he",
2863	.probe =	he_init_one,
2864	.remove =	he_remove_one,
2865	.id_table =	he_pci_tbl,
2866};
2867
2868module_pci_driver(he_driver);
v4.6
   1/*
   2
   3  he.c
   4
   5  ForeRunnerHE ATM Adapter driver for ATM on Linux
   6  Copyright (C) 1999-2001  Naval Research Laboratory
   7
   8  This library is free software; you can redistribute it and/or
   9  modify it under the terms of the GNU Lesser General Public
  10  License as published by the Free Software Foundation; either
  11  version 2.1 of the License, or (at your option) any later version.
  12
  13  This library is distributed in the hope that it will be useful,
  14  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  Lesser General Public License for more details.
  17
  18  You should have received a copy of the GNU Lesser General Public
  19  License along with this library; if not, write to the Free Software
  20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21
  22*/
  23
  24/*
  25
  26  he.c
  27
  28  ForeRunnerHE ATM Adapter driver for ATM on Linux
  29  Copyright (C) 1999-2001  Naval Research Laboratory
  30
  31  Permission to use, copy, modify and distribute this software and its
  32  documentation is hereby granted, provided that both the copyright
  33  notice and this permission notice appear in all copies of the software,
  34  derivative works or modified versions, and any portions thereof, and
  35  that both notices appear in supporting documentation.
  36
  37  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
  38  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
  39  RESULTING FROM THE USE OF THIS SOFTWARE.
  40
  41  This driver was written using the "Programmer's Reference Manual for
  42  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
  43
  44  AUTHORS:
  45	chas williams <chas@cmf.nrl.navy.mil>
  46	eric kinzie <ekinzie@cmf.nrl.navy.mil>
  47
  48  NOTES:
  49	4096 supported 'connections'
  50	group 0 is used for all traffic
  51	interrupt queue 0 is used for all interrupts
  52	aal0 support (based on work from ulrich.u.muller@nokia.com)
  53
  54 */
  55
  56#include <linux/module.h>
  57#include <linux/kernel.h>
  58#include <linux/skbuff.h>
  59#include <linux/pci.h>
  60#include <linux/errno.h>
  61#include <linux/types.h>
  62#include <linux/string.h>
  63#include <linux/delay.h>
  64#include <linux/init.h>
  65#include <linux/mm.h>
  66#include <linux/sched.h>
  67#include <linux/timer.h>
  68#include <linux/interrupt.h>
  69#include <linux/dma-mapping.h>
  70#include <linux/bitmap.h>
  71#include <linux/slab.h>
  72#include <asm/io.h>
  73#include <asm/byteorder.h>
  74#include <asm/uaccess.h>
  75
  76#include <linux/atmdev.h>
  77#include <linux/atm.h>
  78#include <linux/sonet.h>
  79
  80#undef USE_SCATTERGATHER
  81#undef USE_CHECKSUM_HW			/* still confused about this */
  82/* #undef HE_DEBUG */
  83
  84#include "he.h"
  85#include "suni.h"
  86#include <linux/atm_he.h>
  87
  88#define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  89
  90#ifdef HE_DEBUG
  91#define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  92#else /* !HE_DEBUG */
  93#define HPRINTK(fmt,args...)	do { } while (0)
  94#endif /* HE_DEBUG */
  95
  96/* declarations */
  97
  98static int he_open(struct atm_vcc *vcc);
  99static void he_close(struct atm_vcc *vcc);
 100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
 101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
 102static irqreturn_t he_irq_handler(int irq, void *dev_id);
 103static void he_tasklet(unsigned long data);
 104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
 105static int he_start(struct atm_dev *dev);
 106static void he_stop(struct he_dev *dev);
 107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
 108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
 109
 110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
 111
 112/* globals */
 113
 114static struct he_dev *he_devs;
 115static bool disable64;
 116static short nvpibits = -1;
 117static short nvcibits = -1;
 118static short rx_skb_reserve = 16;
 119static bool irq_coalesce = true;
 120static bool sdh;
 121
 122/* Read from EEPROM = 0000 0011b */
 123static unsigned int readtab[] = {
 124	CS_HIGH | CLK_HIGH,
 125	CS_LOW | CLK_LOW,
 126	CLK_HIGH,               /* 0 */
 127	CLK_LOW,
 128	CLK_HIGH,               /* 0 */
 129	CLK_LOW,
 130	CLK_HIGH,               /* 0 */
 131	CLK_LOW,
 132	CLK_HIGH,               /* 0 */
 133	CLK_LOW,
 134	CLK_HIGH,               /* 0 */
 135	CLK_LOW,
 136	CLK_HIGH,               /* 0 */
 137	CLK_LOW | SI_HIGH,
 138	CLK_HIGH | SI_HIGH,     /* 1 */
 139	CLK_LOW | SI_HIGH,
 140	CLK_HIGH | SI_HIGH      /* 1 */
 141};     
 142 
 143/* Clock to read from/write to the EEPROM */
 144static unsigned int clocktab[] = {
 145	CLK_LOW,
 146	CLK_HIGH,
 147	CLK_LOW,
 148	CLK_HIGH,
 149	CLK_LOW,
 150	CLK_HIGH,
 151	CLK_LOW,
 152	CLK_HIGH,
 153	CLK_LOW,
 154	CLK_HIGH,
 155	CLK_LOW,
 156	CLK_HIGH,
 157	CLK_LOW,
 158	CLK_HIGH,
 159	CLK_LOW,
 160	CLK_HIGH,
 161	CLK_LOW
 162};     
 163
 164static struct atmdev_ops he_ops =
 165{
 166	.open =		he_open,
 167	.close =	he_close,	
 168	.ioctl =	he_ioctl,	
 169	.send =		he_send,
 170	.phy_put =	he_phy_put,
 171	.phy_get =	he_phy_get,
 172	.proc_read =	he_proc_read,
 173	.owner =	THIS_MODULE
 174};
 175
 176#define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
 177#define he_readl(dev, reg)		readl((dev)->membase + (reg))
 178
 179/* section 2.12 connection memory access */
 180
 181static __inline__ void
 182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
 183								unsigned flags)
 184{
 185	he_writel(he_dev, val, CON_DAT);
 186	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
 187	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
 188	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 189}
 190
 191#define he_writel_rcm(dev, val, reg) 				\
 192			he_writel_internal(dev, val, reg, CON_CTL_RCM)
 193
 194#define he_writel_tcm(dev, val, reg) 				\
 195			he_writel_internal(dev, val, reg, CON_CTL_TCM)
 196
 197#define he_writel_mbox(dev, val, reg) 				\
 198			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
 199
 200static unsigned
 201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
 202{
 203	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
 204	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 205	return he_readl(he_dev, CON_DAT);
 206}
 207
 208#define he_readl_rcm(dev, reg) \
 209			he_readl_internal(dev, reg, CON_CTL_RCM)
 210
 211#define he_readl_tcm(dev, reg) \
 212			he_readl_internal(dev, reg, CON_CTL_TCM)
 213
 214#define he_readl_mbox(dev, reg) \
 215			he_readl_internal(dev, reg, CON_CTL_MBOX)
 216
 217
 218/* figure 2.2 connection id */
 219
 220#define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
 221
 222/* 2.5.1 per connection transmit state registers */
 223
 224#define he_writel_tsr0(dev, val, cid) \
 225		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
 226#define he_readl_tsr0(dev, cid) \
 227		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
 228
 229#define he_writel_tsr1(dev, val, cid) \
 230		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
 231
 232#define he_writel_tsr2(dev, val, cid) \
 233		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
 234
 235#define he_writel_tsr3(dev, val, cid) \
 236		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
 237
 238#define he_writel_tsr4(dev, val, cid) \
 239		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
 240
 241	/* from page 2-20
 242	 *
 243	 * NOTE While the transmit connection is active, bits 23 through 0
 244	 *      of this register must not be written by the host.  Byte
 245	 *      enables should be used during normal operation when writing
 246	 *      the most significant byte.
 247	 */
 248
 249#define he_writel_tsr4_upper(dev, val, cid) \
 250		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
 251							CON_CTL_TCM \
 252							| CON_BYTE_DISABLE_2 \
 253							| CON_BYTE_DISABLE_1 \
 254							| CON_BYTE_DISABLE_0)
 255
 256#define he_readl_tsr4(dev, cid) \
 257		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
 258
 259#define he_writel_tsr5(dev, val, cid) \
 260		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
 261
 262#define he_writel_tsr6(dev, val, cid) \
 263		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
 264
 265#define he_writel_tsr7(dev, val, cid) \
 266		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
 267
 268
 269#define he_writel_tsr8(dev, val, cid) \
 270		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
 271
 272#define he_writel_tsr9(dev, val, cid) \
 273		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
 274
 275#define he_writel_tsr10(dev, val, cid) \
 276		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
 277
 278#define he_writel_tsr11(dev, val, cid) \
 279		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
 280
 281
 282#define he_writel_tsr12(dev, val, cid) \
 283		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
 284
 285#define he_writel_tsr13(dev, val, cid) \
 286		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
 287
 288
 289#define he_writel_tsr14(dev, val, cid) \
 290		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
 291
 292#define he_writel_tsr14_upper(dev, val, cid) \
 293		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
 294							CON_CTL_TCM \
 295							| CON_BYTE_DISABLE_2 \
 296							| CON_BYTE_DISABLE_1 \
 297							| CON_BYTE_DISABLE_0)
 298
 299/* 2.7.1 per connection receive state registers */
 300
 301#define he_writel_rsr0(dev, val, cid) \
 302		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
 303#define he_readl_rsr0(dev, cid) \
 304		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
 305
 306#define he_writel_rsr1(dev, val, cid) \
 307		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
 308
 309#define he_writel_rsr2(dev, val, cid) \
 310		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
 311
 312#define he_writel_rsr3(dev, val, cid) \
 313		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
 314
 315#define he_writel_rsr4(dev, val, cid) \
 316		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
 317
 318#define he_writel_rsr5(dev, val, cid) \
 319		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
 320
 321#define he_writel_rsr6(dev, val, cid) \
 322		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
 323
 324#define he_writel_rsr7(dev, val, cid) \
 325		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
 326
 327static __inline__ struct atm_vcc*
 328__find_vcc(struct he_dev *he_dev, unsigned cid)
 329{
 330	struct hlist_head *head;
 331	struct atm_vcc *vcc;
 332	struct sock *s;
 333	short vpi;
 334	int vci;
 335
 336	vpi = cid >> he_dev->vcibits;
 337	vci = cid & ((1 << he_dev->vcibits) - 1);
 338	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
 339
 340	sk_for_each(s, head) {
 341		vcc = atm_sk(s);
 342		if (vcc->dev == he_dev->atm_dev &&
 343		    vcc->vci == vci && vcc->vpi == vpi &&
 344		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
 345				return vcc;
 346		}
 347	}
 348	return NULL;
 349}
 350
 351static int he_init_one(struct pci_dev *pci_dev,
 352		       const struct pci_device_id *pci_ent)
 353{
 354	struct atm_dev *atm_dev = NULL;
 355	struct he_dev *he_dev = NULL;
 356	int err = 0;
 357
 358	printk(KERN_INFO "ATM he driver\n");
 359
 360	if (pci_enable_device(pci_dev))
 361		return -EIO;
 362	if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
 363		printk(KERN_WARNING "he: no suitable dma available\n");
 364		err = -EIO;
 365		goto init_one_failure;
 366	}
 367
 368	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
 369	if (!atm_dev) {
 370		err = -ENODEV;
 371		goto init_one_failure;
 372	}
 373	pci_set_drvdata(pci_dev, atm_dev);
 374
 375	he_dev = kzalloc(sizeof(struct he_dev),
 376							GFP_KERNEL);
 377	if (!he_dev) {
 378		err = -ENOMEM;
 379		goto init_one_failure;
 380	}
 381	he_dev->pci_dev = pci_dev;
 382	he_dev->atm_dev = atm_dev;
 383	he_dev->atm_dev->dev_data = he_dev;
 384	atm_dev->dev_data = he_dev;
 385	he_dev->number = atm_dev->number;
 386	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
 387	spin_lock_init(&he_dev->global_lock);
 388
 389	if (he_start(atm_dev)) {
 390		he_stop(he_dev);
 391		err = -ENODEV;
 392		goto init_one_failure;
 393	}
 394	he_dev->next = NULL;
 395	if (he_devs)
 396		he_dev->next = he_devs;
 397	he_devs = he_dev;
 398	return 0;
 399
 400init_one_failure:
 401	if (atm_dev)
 402		atm_dev_deregister(atm_dev);
 403	kfree(he_dev);
 404	pci_disable_device(pci_dev);
 405	return err;
 406}
 407
 408static void he_remove_one(struct pci_dev *pci_dev)
 409{
 410	struct atm_dev *atm_dev;
 411	struct he_dev *he_dev;
 412
 413	atm_dev = pci_get_drvdata(pci_dev);
 414	he_dev = HE_DEV(atm_dev);
 415
 416	/* need to remove from he_devs */
 417
 418	he_stop(he_dev);
 419	atm_dev_deregister(atm_dev);
 420	kfree(he_dev);
 421
 422	pci_disable_device(pci_dev);
 423}
 424
 425
 426static unsigned
 427rate_to_atmf(unsigned rate)		/* cps to atm forum format */
 428{
 429#define NONZERO (1 << 14)
 430
 431	unsigned exp = 0;
 432
 433	if (rate == 0)
 434		return 0;
 435
 436	rate <<= 9;
 437	while (rate > 0x3ff) {
 438		++exp;
 439		rate >>= 1;
 440	}
 441
 442	return (NONZERO | (exp << 9) | (rate & 0x1ff));
 443}
 444
 445static void he_init_rx_lbfp0(struct he_dev *he_dev)
 446{
 447	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 448	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 449	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 450	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
 451	
 452	lbufd_index = 0;
 453	lbm_offset = he_readl(he_dev, RCMLBM_BA);
 454
 455	he_writel(he_dev, lbufd_index, RLBF0_H);
 456
 457	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
 458		lbufd_index += 2;
 459		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 460
 461		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 462		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 463
 464		if (++lbuf_count == lbufs_per_row) {
 465			lbuf_count = 0;
 466			row_offset += he_dev->bytes_per_row;
 467		}
 468		lbm_offset += 4;
 469	}
 470		
 471	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
 472	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
 473}
 474
 475static void he_init_rx_lbfp1(struct he_dev *he_dev)
 476{
 477	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 478	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 479	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 480	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
 481	
 482	lbufd_index = 1;
 483	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 484
 485	he_writel(he_dev, lbufd_index, RLBF1_H);
 486
 487	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
 488		lbufd_index += 2;
 489		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 490
 491		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 492		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 493
 494		if (++lbuf_count == lbufs_per_row) {
 495			lbuf_count = 0;
 496			row_offset += he_dev->bytes_per_row;
 497		}
 498		lbm_offset += 4;
 499	}
 500		
 501	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
 502	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
 503}
 504
 505static void he_init_tx_lbfp(struct he_dev *he_dev)
 506{
 507	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 508	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 509	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 510	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
 511	
 512	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
 513	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 514
 515	he_writel(he_dev, lbufd_index, TLBF_H);
 516
 517	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
 518		lbufd_index += 1;
 519		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 520
 521		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 522		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 523
 524		if (++lbuf_count == lbufs_per_row) {
 525			lbuf_count = 0;
 526			row_offset += he_dev->bytes_per_row;
 527		}
 528		lbm_offset += 2;
 529	}
 530		
 531	he_writel(he_dev, lbufd_index - 1, TLBF_T);
 532}
 533
 534static int he_init_tpdrq(struct he_dev *he_dev)
 535{
 536	he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 537						 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
 538						 &he_dev->tpdrq_phys, GFP_KERNEL);
 539	if (he_dev->tpdrq_base == NULL) {
 540		hprintk("failed to alloc tpdrq\n");
 541		return -ENOMEM;
 542	}
 543
 544	he_dev->tpdrq_tail = he_dev->tpdrq_base;
 545	he_dev->tpdrq_head = he_dev->tpdrq_base;
 546
 547	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
 548	he_writel(he_dev, 0, TPDRQ_T);	
 549	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
 550
 551	return 0;
 552}
 553
 554static void he_init_cs_block(struct he_dev *he_dev)
 555{
 556	unsigned clock, rate, delta;
 557	int reg;
 558
 559	/* 5.1.7 cs block initialization */
 560
 561	for (reg = 0; reg < 0x20; ++reg)
 562		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
 563
 564	/* rate grid timer reload values */
 565
 566	clock = he_is622(he_dev) ? 66667000 : 50000000;
 567	rate = he_dev->atm_dev->link_rate;
 568	delta = rate / 16 / 2;
 569
 570	for (reg = 0; reg < 0x10; ++reg) {
 571		/* 2.4 internal transmit function
 572		 *
 573	 	 * we initialize the first row in the rate grid.
 574		 * values are period (in clock cycles) of timer
 575		 */
 576		unsigned period = clock / rate;
 577
 578		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
 579		rate -= delta;
 580	}
 581
 582	if (he_is622(he_dev)) {
 583		/* table 5.2 (4 cells per lbuf) */
 584		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
 585		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
 586		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
 587		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
 588		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
 589
 590		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 591		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
 592		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
 593		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
 594		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 595		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
 596		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
 597
 598		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 599
 600		/* table 5.8 */
 601		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
 602		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
 603		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
 604		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
 605		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
 606		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
 607
 608		/* table 5.9 */
 609		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
 610		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
 611	} else {
 612		/* table 5.1 (4 cells per lbuf) */
 613		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
 614		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
 615		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
 616		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
 617		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
 618
 619		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 620		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
 621		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
 622		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
 623		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 624		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
 625		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
 626
 627		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 628
 629		/* table 5.8 */
 630		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
 631		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
 632		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
 633		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
 634		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
 635		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
 636
 637		/* table 5.9 */
 638		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
 639		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
 640	}
 641
 642	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
 643
 644	for (reg = 0; reg < 0x8; ++reg)
 645		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
 646
 647}
 648
 649static int he_init_cs_block_rcm(struct he_dev *he_dev)
 650{
 651	unsigned (*rategrid)[16][16];
 652	unsigned rate, delta;
 653	int i, j, reg;
 654
 655	unsigned rate_atmf, exp, man;
 656	unsigned long long rate_cps;
 657	int mult, buf, buf_limit = 4;
 658
 659	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
 660	if (!rategrid)
 661		return -ENOMEM;
 662
 663	/* initialize rate grid group table */
 664
 665	for (reg = 0x0; reg < 0xff; ++reg)
 666		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 667
 668	/* initialize rate controller groups */
 669
 670	for (reg = 0x100; reg < 0x1ff; ++reg)
 671		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 672	
 673	/* initialize tNrm lookup table */
 674
 675	/* the manual makes reference to a routine in a sample driver
 676	   for proper configuration; fortunately, we only need this
 677	   in order to support abr connection */
 678	
 679	/* initialize rate to group table */
 680
 681	rate = he_dev->atm_dev->link_rate;
 682	delta = rate / 32;
 683
 684	/*
 685	 * 2.4 transmit internal functions
 686	 * 
 687	 * we construct a copy of the rate grid used by the scheduler
 688	 * in order to construct the rate to group table below
 689	 */
 690
 691	for (j = 0; j < 16; j++) {
 692		(*rategrid)[0][j] = rate;
 693		rate -= delta;
 694	}
 695
 696	for (i = 1; i < 16; i++)
 697		for (j = 0; j < 16; j++)
 698			if (i > 14)
 699				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
 700			else
 701				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
 702
 703	/*
 704	 * 2.4 transmit internal function
 705	 *
 706	 * this table maps the upper 5 bits of exponent and mantissa
 707	 * of the atm forum representation of the rate into an index
 708	 * on rate grid  
 709	 */
 710
 711	rate_atmf = 0;
 712	while (rate_atmf < 0x400) {
 713		man = (rate_atmf & 0x1f) << 4;
 714		exp = rate_atmf >> 5;
 715
 716		/* 
 717			instead of '/ 512', use '>> 9' to prevent a call
 718			to divdu3 on x86 platforms
 719		*/
 720		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
 721
 722		if (rate_cps < 10)
 723			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
 724
 725		for (i = 255; i > 0; i--)
 726			if ((*rategrid)[i/16][i%16] >= rate_cps)
 727				break;	 /* pick nearest rate instead? */
 728
 729		/*
 730		 * each table entry is 16 bits: (rate grid index (8 bits)
 731		 * and a buffer limit (8 bits)
 732		 * there are two table entries in each 32-bit register
 733		 */
 734
 735#ifdef notdef
 736		buf = rate_cps * he_dev->tx_numbuffs /
 737				(he_dev->atm_dev->link_rate * 2);
 738#else
 739		/* this is pretty, but avoids _divdu3 and is mostly correct */
 740		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
 741		if (rate_cps > (272 * mult))
 742			buf = 4;
 743		else if (rate_cps > (204 * mult))
 744			buf = 3;
 745		else if (rate_cps > (136 * mult))
 746			buf = 2;
 747		else if (rate_cps > (68 * mult))
 748			buf = 1;
 749		else
 750			buf = 0;
 751#endif
 752		if (buf > buf_limit)
 753			buf = buf_limit;
 754		reg = (reg << 16) | ((i << 8) | buf);
 755
 756#define RTGTBL_OFFSET 0x400
 757	  
 758		if (rate_atmf & 0x1)
 759			he_writel_rcm(he_dev, reg,
 760				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
 761
 762		++rate_atmf;
 763	}
 764
 765	kfree(rategrid);
 766	return 0;
 767}
 768
 769static int he_init_group(struct he_dev *he_dev, int group)
 770{
 771	struct he_buff *heb, *next;
 772	dma_addr_t mapping;
 773	int i;
 774
 775	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
 776	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
 777	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
 778	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
 779		  G0_RBPS_BS + (group * 32));
 780
 781	/* bitmap table */
 782	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
 783				     * sizeof(unsigned long), GFP_KERNEL);
 
 784	if (!he_dev->rbpl_table) {
 785		hprintk("unable to allocate rbpl bitmap table\n");
 786		return -ENOMEM;
 787	}
 788	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
 789
 790	/* rbpl_virt 64-bit pointers */
 791	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
 792				    * sizeof(struct he_buff *), GFP_KERNEL);
 
 793	if (!he_dev->rbpl_virt) {
 794		hprintk("unable to allocate rbpl virt table\n");
 795		goto out_free_rbpl_table;
 796	}
 797
 798	/* large buffer pool */
 799	he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
 800					    CONFIG_RBPL_BUFSIZE, 64, 0);
 801	if (he_dev->rbpl_pool == NULL) {
 802		hprintk("unable to create rbpl pool\n");
 803		goto out_free_rbpl_virt;
 804	}
 805
 806	he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 807						CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
 808						&he_dev->rbpl_phys, GFP_KERNEL);
 809	if (he_dev->rbpl_base == NULL) {
 810		hprintk("failed to alloc rbpl_base\n");
 811		goto out_destroy_rbpl_pool;
 812	}
 813
 814	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
 815
 816	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
 817
 818		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
 819		if (!heb)
 820			goto out_free_rbpl;
 821		heb->mapping = mapping;
 822		list_add(&heb->entry, &he_dev->rbpl_outstanding);
 823
 824		set_bit(i, he_dev->rbpl_table);
 825		he_dev->rbpl_virt[i] = heb;
 826		he_dev->rbpl_hint = i + 1;
 827		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
 828		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
 829	}
 830	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
 831
 832	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
 833	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
 834						G0_RBPL_T + (group * 32));
 835	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
 836						G0_RBPL_BS + (group * 32));
 837	he_writel(he_dev,
 838			RBP_THRESH(CONFIG_RBPL_THRESH) |
 839			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
 840			RBP_INT_ENB,
 841						G0_RBPL_QI + (group * 32));
 842
 843	/* rx buffer ready queue */
 844
 845	he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 846						CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
 847						&he_dev->rbrq_phys, GFP_KERNEL);
 848	if (he_dev->rbrq_base == NULL) {
 849		hprintk("failed to allocate rbrq\n");
 850		goto out_free_rbpl;
 851	}
 852
 853	he_dev->rbrq_head = he_dev->rbrq_base;
 854	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
 855	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
 856	he_writel(he_dev,
 857		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
 858						G0_RBRQ_Q + (group * 16));
 859	if (irq_coalesce) {
 860		hprintk("coalescing interrupts\n");
 861		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
 862						G0_RBRQ_I + (group * 16));
 863	} else
 864		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
 865						G0_RBRQ_I + (group * 16));
 866
 867	/* tx buffer ready queue */
 868
 869	he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 870						CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
 871						&he_dev->tbrq_phys, GFP_KERNEL);
 872	if (he_dev->tbrq_base == NULL) {
 873		hprintk("failed to allocate tbrq\n");
 874		goto out_free_rbpq_base;
 875	}
 876
 877	he_dev->tbrq_head = he_dev->tbrq_base;
 878
 879	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
 880	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
 881	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
 882	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
 883
 884	return 0;
 885
 886out_free_rbpq_base:
 887	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
 888			  sizeof(struct he_rbrq), he_dev->rbrq_base,
 889			  he_dev->rbrq_phys);
 890out_free_rbpl:
 891	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
 892		dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 893
 894	dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
 895			  sizeof(struct he_rbp), he_dev->rbpl_base,
 896			  he_dev->rbpl_phys);
 897out_destroy_rbpl_pool:
 898	dma_pool_destroy(he_dev->rbpl_pool);
 899out_free_rbpl_virt:
 900	kfree(he_dev->rbpl_virt);
 901out_free_rbpl_table:
 902	kfree(he_dev->rbpl_table);
 903
 904	return -ENOMEM;
 905}
 906
 907static int he_init_irq(struct he_dev *he_dev)
 908{
 909	int i;
 910
 911	/* 2.9.3.5  tail offset for each interrupt queue is located after the
 912		    end of the interrupt queue */
 913
 914	he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
 915					       (CONFIG_IRQ_SIZE + 1)
 916					       * sizeof(struct he_irq),
 917					       &he_dev->irq_phys,
 918					       GFP_KERNEL);
 919	if (he_dev->irq_base == NULL) {
 920		hprintk("failed to allocate irq\n");
 921		return -ENOMEM;
 922	}
 923	he_dev->irq_tailoffset = (unsigned *)
 924					&he_dev->irq_base[CONFIG_IRQ_SIZE];
 925	*he_dev->irq_tailoffset = 0;
 926	he_dev->irq_head = he_dev->irq_base;
 927	he_dev->irq_tail = he_dev->irq_base;
 928
 929	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
 930		he_dev->irq_base[i].isw = ITYPE_INVALID;
 931
 932	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
 933	he_writel(he_dev,
 934		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
 935								IRQ0_HEAD);
 936	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
 937	he_writel(he_dev, 0x0, IRQ0_DATA);
 938
 939	he_writel(he_dev, 0x0, IRQ1_BASE);
 940	he_writel(he_dev, 0x0, IRQ1_HEAD);
 941	he_writel(he_dev, 0x0, IRQ1_CNTL);
 942	he_writel(he_dev, 0x0, IRQ1_DATA);
 943
 944	he_writel(he_dev, 0x0, IRQ2_BASE);
 945	he_writel(he_dev, 0x0, IRQ2_HEAD);
 946	he_writel(he_dev, 0x0, IRQ2_CNTL);
 947	he_writel(he_dev, 0x0, IRQ2_DATA);
 948
 949	he_writel(he_dev, 0x0, IRQ3_BASE);
 950	he_writel(he_dev, 0x0, IRQ3_HEAD);
 951	he_writel(he_dev, 0x0, IRQ3_CNTL);
 952	he_writel(he_dev, 0x0, IRQ3_DATA);
 953
 954	/* 2.9.3.2 interrupt queue mapping registers */
 955
 956	he_writel(he_dev, 0x0, GRP_10_MAP);
 957	he_writel(he_dev, 0x0, GRP_32_MAP);
 958	he_writel(he_dev, 0x0, GRP_54_MAP);
 959	he_writel(he_dev, 0x0, GRP_76_MAP);
 960
 961	if (request_irq(he_dev->pci_dev->irq,
 962			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
 963		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
 964		return -EINVAL;
 965	}   
 966
 967	he_dev->irq = he_dev->pci_dev->irq;
 968
 969	return 0;
 970}
 971
 972static int he_start(struct atm_dev *dev)
 973{
 974	struct he_dev *he_dev;
 975	struct pci_dev *pci_dev;
 976	unsigned long membase;
 977
 978	u16 command;
 979	u32 gen_cntl_0, host_cntl, lb_swap;
 980	u8 cache_size, timer;
 981	
 982	unsigned err;
 983	unsigned int status, reg;
 984	int i, group;
 985
 986	he_dev = HE_DEV(dev);
 987	pci_dev = he_dev->pci_dev;
 988
 989	membase = pci_resource_start(pci_dev, 0);
 990	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
 991
 992	/*
 993	 * pci bus controller initialization 
 994	 */
 995
 996	/* 4.3 pci bus controller-specific initialization */
 997	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
 998		hprintk("can't read GEN_CNTL_0\n");
 999		return -EINVAL;
1000	}
1001	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1002	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1003		hprintk("can't write GEN_CNTL_0.\n");
1004		return -EINVAL;
1005	}
1006
1007	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1008		hprintk("can't read PCI_COMMAND.\n");
1009		return -EINVAL;
1010	}
1011
1012	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1013	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1014		hprintk("can't enable memory.\n");
1015		return -EINVAL;
1016	}
1017
1018	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1019		hprintk("can't read cache line size?\n");
1020		return -EINVAL;
1021	}
1022
1023	if (cache_size < 16) {
1024		cache_size = 16;
1025		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1026			hprintk("can't set cache line size to %d\n", cache_size);
1027	}
1028
1029	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1030		hprintk("can't read latency timer?\n");
1031		return -EINVAL;
1032	}
1033
1034	/* from table 3.9
1035	 *
1036	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1037	 * 
1038	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1039	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1040	 *
1041	 */ 
1042#define LAT_TIMER 209
1043	if (timer < LAT_TIMER) {
1044		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1045		timer = LAT_TIMER;
1046		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1047			hprintk("can't set latency timer to %d\n", timer);
1048	}
1049
1050	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1051		hprintk("can't set up page mapping\n");
1052		return -EINVAL;
1053	}
1054
1055	/* 4.4 card reset */
1056	he_writel(he_dev, 0x0, RESET_CNTL);
1057	he_writel(he_dev, 0xff, RESET_CNTL);
1058
1059	msleep(16);	/* 16 ms */
1060	status = he_readl(he_dev, RESET_CNTL);
1061	if ((status & BOARD_RST_STATUS) == 0) {
1062		hprintk("reset failed\n");
1063		return -EINVAL;
1064	}
1065
1066	/* 4.5 set bus width */
1067	host_cntl = he_readl(he_dev, HOST_CNTL);
1068	if (host_cntl & PCI_BUS_SIZE64)
1069		gen_cntl_0 |= ENBL_64;
1070	else
1071		gen_cntl_0 &= ~ENBL_64;
1072
1073	if (disable64 == 1) {
1074		hprintk("disabling 64-bit pci bus transfers\n");
1075		gen_cntl_0 &= ~ENBL_64;
1076	}
1077
1078	if (gen_cntl_0 & ENBL_64)
1079		hprintk("64-bit transfers enabled\n");
1080
1081	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1082
1083	/* 4.7 read prom contents */
1084	for (i = 0; i < PROD_ID_LEN; ++i)
1085		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1086
1087	he_dev->media = read_prom_byte(he_dev, MEDIA);
1088
1089	for (i = 0; i < 6; ++i)
1090		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1091
1092	hprintk("%s%s, %pM\n", he_dev->prod_id,
1093		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1094	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1095						ATM_OC12_PCR : ATM_OC3_PCR;
1096
1097	/* 4.6 set host endianess */
1098	lb_swap = he_readl(he_dev, LB_SWAP);
1099	if (he_is622(he_dev))
1100		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1101	else
1102		lb_swap |= XFER_SIZE;		/* 8 cells */
1103#ifdef __BIG_ENDIAN
1104	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1105#else
1106	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1107			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1108#endif /* __BIG_ENDIAN */
1109	he_writel(he_dev, lb_swap, LB_SWAP);
1110
1111	/* 4.8 sdram controller initialization */
1112	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1113
1114	/* 4.9 initialize rnum value */
1115	lb_swap |= SWAP_RNUM_MAX(0xf);
1116	he_writel(he_dev, lb_swap, LB_SWAP);
1117
1118	/* 4.10 initialize the interrupt queues */
1119	if ((err = he_init_irq(he_dev)) != 0)
1120		return err;
1121
1122	/* 4.11 enable pci bus controller state machines */
1123	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1124				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1125	he_writel(he_dev, host_cntl, HOST_CNTL);
1126
1127	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1128	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1129
1130	/*
1131	 * atm network controller initialization
1132	 */
1133
1134	/* 5.1.1 generic configuration state */
1135
1136	/*
1137	 *		local (cell) buffer memory map
1138	 *                    
1139	 *             HE155                          HE622
1140	 *                                                      
1141	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1142	 *         |            |            |                   |   |
1143	 *         |  utility   |            |        rx0        |   |
1144	 *        5|____________|         255|___________________| u |
1145	 *        6|            |         256|                   | t |
1146	 *         |            |            |                   | i |
1147	 *         |    rx0     |     row    |        tx         | l |
1148	 *         |            |            |                   | i |
1149	 *         |            |         767|___________________| t |
1150	 *      517|____________|         768|                   | y |
1151	 * row  518|            |            |        rx1        |   |
1152	 *         |            |        1023|___________________|___|
1153	 *         |            |
1154	 *         |    tx      |
1155	 *         |            |
1156	 *         |            |
1157	 *     1535|____________|
1158	 *     1536|            |
1159	 *         |    rx1     |
1160	 *     2047|____________|
1161	 *
1162	 */
1163
1164	/* total 4096 connections */
1165	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1166	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1167
1168	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1169		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1170		return -ENODEV;
1171	}
1172
1173	if (nvpibits != -1) {
1174		he_dev->vpibits = nvpibits;
1175		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1176	}
1177
1178	if (nvcibits != -1) {
1179		he_dev->vcibits = nvcibits;
1180		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1181	}
1182
1183
1184	if (he_is622(he_dev)) {
1185		he_dev->cells_per_row = 40;
1186		he_dev->bytes_per_row = 2048;
1187		he_dev->r0_numrows = 256;
1188		he_dev->tx_numrows = 512;
1189		he_dev->r1_numrows = 256;
1190		he_dev->r0_startrow = 0;
1191		he_dev->tx_startrow = 256;
1192		he_dev->r1_startrow = 768;
1193	} else {
1194		he_dev->cells_per_row = 20;
1195		he_dev->bytes_per_row = 1024;
1196		he_dev->r0_numrows = 512;
1197		he_dev->tx_numrows = 1018;
1198		he_dev->r1_numrows = 512;
1199		he_dev->r0_startrow = 6;
1200		he_dev->tx_startrow = 518;
1201		he_dev->r1_startrow = 1536;
1202	}
1203
1204	he_dev->cells_per_lbuf = 4;
1205	he_dev->buffer_limit = 4;
1206	he_dev->r0_numbuffs = he_dev->r0_numrows *
1207				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1208	if (he_dev->r0_numbuffs > 2560)
1209		he_dev->r0_numbuffs = 2560;
1210
1211	he_dev->r1_numbuffs = he_dev->r1_numrows *
1212				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1213	if (he_dev->r1_numbuffs > 2560)
1214		he_dev->r1_numbuffs = 2560;
1215
1216	he_dev->tx_numbuffs = he_dev->tx_numrows *
1217				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1218	if (he_dev->tx_numbuffs > 5120)
1219		he_dev->tx_numbuffs = 5120;
1220
1221	/* 5.1.2 configure hardware dependent registers */
1222
1223	he_writel(he_dev, 
1224		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1225		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1226		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1227		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1228								LBARB);
1229
1230	he_writel(he_dev, BANK_ON |
1231		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1232								SDRAMCON);
1233
1234	he_writel(he_dev,
1235		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1236						RM_RW_WAIT(1), RCMCONFIG);
1237	he_writel(he_dev,
1238		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1239						TM_RW_WAIT(1), TCMCONFIG);
1240
1241	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1242
1243	he_writel(he_dev, 
1244		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1245		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1246		RX_VALVP(he_dev->vpibits) |
1247		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1248
1249	he_writel(he_dev, DRF_THRESH(0x20) |
1250		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1251		TX_VCI_MASK(he_dev->vcibits) |
1252		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1253
1254	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1255
1256	he_writel(he_dev, PHY_INT_ENB |
1257		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1258								RH_CONFIG);
1259
1260	/* 5.1.3 initialize connection memory */
1261
1262	for (i = 0; i < TCM_MEM_SIZE; ++i)
1263		he_writel_tcm(he_dev, 0, i);
1264
1265	for (i = 0; i < RCM_MEM_SIZE; ++i)
1266		he_writel_rcm(he_dev, 0, i);
1267
1268	/*
1269	 *	transmit connection memory map
1270	 *
1271	 *                  tx memory
1272	 *          0x0 ___________________
1273	 *             |                   |
1274	 *             |                   |
1275	 *             |       TSRa        |
1276	 *             |                   |
1277	 *             |                   |
1278	 *       0x8000|___________________|
1279	 *             |                   |
1280	 *             |       TSRb        |
1281	 *       0xc000|___________________|
1282	 *             |                   |
1283	 *             |       TSRc        |
1284	 *       0xe000|___________________|
1285	 *             |       TSRd        |
1286	 *       0xf000|___________________|
1287	 *             |       tmABR       |
1288	 *      0x10000|___________________|
1289	 *             |                   |
1290	 *             |       tmTPD       |
1291	 *             |___________________|
1292	 *             |                   |
1293	 *                      ....
1294	 *      0x1ffff|___________________|
1295	 *
1296	 *
1297	 */
1298
1299	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1300	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1301	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1302	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1303	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1304
1305
1306	/*
1307	 *	receive connection memory map
1308	 *
1309	 *          0x0 ___________________
1310	 *             |                   |
1311	 *             |                   |
1312	 *             |       RSRa        |
1313	 *             |                   |
1314	 *             |                   |
1315	 *       0x8000|___________________|
1316	 *             |                   |
1317	 *             |             rx0/1 |
1318	 *             |       LBM         |   link lists of local
1319	 *             |             tx    |   buffer memory 
1320	 *             |                   |
1321	 *       0xd000|___________________|
1322	 *             |                   |
1323	 *             |      rmABR        |
1324	 *       0xe000|___________________|
1325	 *             |                   |
1326	 *             |       RSRb        |
1327	 *             |___________________|
1328	 *             |                   |
1329	 *                      ....
1330	 *       0xffff|___________________|
1331	 */
1332
1333	he_writel(he_dev, 0x08000, RCMLBM_BA);
1334	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1335	he_writel(he_dev, 0x0d800, RCMABR_BA);
1336
1337	/* 5.1.4 initialize local buffer free pools linked lists */
1338
1339	he_init_rx_lbfp0(he_dev);
1340	he_init_rx_lbfp1(he_dev);
1341
1342	he_writel(he_dev, 0x0, RLBC_H);
1343	he_writel(he_dev, 0x0, RLBC_T);
1344	he_writel(he_dev, 0x0, RLBC_H2);
1345
1346	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1347	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1348
1349	he_init_tx_lbfp(he_dev);
1350
1351	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1352
1353	/* 5.1.5 initialize intermediate receive queues */
1354
1355	if (he_is622(he_dev)) {
1356		he_writel(he_dev, 0x000f, G0_INMQ_S);
1357		he_writel(he_dev, 0x200f, G0_INMQ_L);
1358
1359		he_writel(he_dev, 0x001f, G1_INMQ_S);
1360		he_writel(he_dev, 0x201f, G1_INMQ_L);
1361
1362		he_writel(he_dev, 0x002f, G2_INMQ_S);
1363		he_writel(he_dev, 0x202f, G2_INMQ_L);
1364
1365		he_writel(he_dev, 0x003f, G3_INMQ_S);
1366		he_writel(he_dev, 0x203f, G3_INMQ_L);
1367
1368		he_writel(he_dev, 0x004f, G4_INMQ_S);
1369		he_writel(he_dev, 0x204f, G4_INMQ_L);
1370
1371		he_writel(he_dev, 0x005f, G5_INMQ_S);
1372		he_writel(he_dev, 0x205f, G5_INMQ_L);
1373
1374		he_writel(he_dev, 0x006f, G6_INMQ_S);
1375		he_writel(he_dev, 0x206f, G6_INMQ_L);
1376
1377		he_writel(he_dev, 0x007f, G7_INMQ_S);
1378		he_writel(he_dev, 0x207f, G7_INMQ_L);
1379	} else {
1380		he_writel(he_dev, 0x0000, G0_INMQ_S);
1381		he_writel(he_dev, 0x0008, G0_INMQ_L);
1382
1383		he_writel(he_dev, 0x0001, G1_INMQ_S);
1384		he_writel(he_dev, 0x0009, G1_INMQ_L);
1385
1386		he_writel(he_dev, 0x0002, G2_INMQ_S);
1387		he_writel(he_dev, 0x000a, G2_INMQ_L);
1388
1389		he_writel(he_dev, 0x0003, G3_INMQ_S);
1390		he_writel(he_dev, 0x000b, G3_INMQ_L);
1391
1392		he_writel(he_dev, 0x0004, G4_INMQ_S);
1393		he_writel(he_dev, 0x000c, G4_INMQ_L);
1394
1395		he_writel(he_dev, 0x0005, G5_INMQ_S);
1396		he_writel(he_dev, 0x000d, G5_INMQ_L);
1397
1398		he_writel(he_dev, 0x0006, G6_INMQ_S);
1399		he_writel(he_dev, 0x000e, G6_INMQ_L);
1400
1401		he_writel(he_dev, 0x0007, G7_INMQ_S);
1402		he_writel(he_dev, 0x000f, G7_INMQ_L);
1403	}
1404
1405	/* 5.1.6 application tunable parameters */
1406
1407	he_writel(he_dev, 0x0, MCC);
1408	he_writel(he_dev, 0x0, OEC);
1409	he_writel(he_dev, 0x0, DCC);
1410	he_writel(he_dev, 0x0, CEC);
1411	
1412	/* 5.1.7 cs block initialization */
1413
1414	he_init_cs_block(he_dev);
1415
1416	/* 5.1.8 cs block connection memory initialization */
1417	
1418	if (he_init_cs_block_rcm(he_dev) < 0)
1419		return -ENOMEM;
1420
1421	/* 5.1.10 initialize host structures */
1422
1423	he_init_tpdrq(he_dev);
1424
1425	he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
1426					   sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1427	if (he_dev->tpd_pool == NULL) {
1428		hprintk("unable to create tpd dma_pool\n");
1429		return -ENOMEM;         
1430	}
1431
1432	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1433
1434	if (he_init_group(he_dev, 0) != 0)
1435		return -ENOMEM;
1436
1437	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1438		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1439		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1440		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1441		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1442						G0_RBPS_BS + (group * 32));
1443
1444		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1445		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1446		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1447						G0_RBPL_QI + (group * 32));
1448		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1449
1450		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1451		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1452		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1453						G0_RBRQ_Q + (group * 16));
1454		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1455
1456		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1457		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1458		he_writel(he_dev, TBRQ_THRESH(0x1),
1459						G0_TBRQ_THRESH + (group * 16));
1460		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1461	}
1462
1463	/* host status page */
1464
1465	he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
1466					  sizeof(struct he_hsp),
1467					  &he_dev->hsp_phys, GFP_KERNEL);
1468	if (he_dev->hsp == NULL) {
1469		hprintk("failed to allocate host status page\n");
1470		return -ENOMEM;
1471	}
1472	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1473
1474	/* initialize framer */
1475
1476#ifdef CONFIG_ATM_HE_USE_SUNI
1477	if (he_isMM(he_dev))
1478		suni_init(he_dev->atm_dev);
1479	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1480		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1481#endif /* CONFIG_ATM_HE_USE_SUNI */
1482
1483	if (sdh) {
1484		/* this really should be in suni.c but for now... */
1485		int val;
1486
1487		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1488		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1489		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1490		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1491	}
1492
1493	/* 5.1.12 enable transmit and receive */
1494
1495	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1496	reg |= TX_ENABLE|ER_ENABLE;
1497	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1498
1499	reg = he_readl(he_dev, RC_CONFIG);
1500	reg |= RX_ENABLE;
1501	he_writel(he_dev, reg, RC_CONFIG);
1502
1503	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1504		he_dev->cs_stper[i].inuse = 0;
1505		he_dev->cs_stper[i].pcr = -1;
1506	}
1507	he_dev->total_bw = 0;
1508
1509
1510	/* atm linux initialization */
1511
1512	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1513	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1514
1515	he_dev->irq_peak = 0;
1516	he_dev->rbrq_peak = 0;
1517	he_dev->rbpl_peak = 0;
1518	he_dev->tbrq_peak = 0;
1519
1520	HPRINTK("hell bent for leather!\n");
1521
1522	return 0;
1523}
1524
1525static void
1526he_stop(struct he_dev *he_dev)
1527{
1528	struct he_buff *heb, *next;
1529	struct pci_dev *pci_dev;
1530	u32 gen_cntl_0, reg;
1531	u16 command;
1532
1533	pci_dev = he_dev->pci_dev;
1534
1535	/* disable interrupts */
1536
1537	if (he_dev->membase) {
1538		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1539		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1540		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1541
1542		tasklet_disable(&he_dev->tasklet);
1543
1544		/* disable recv and transmit */
1545
1546		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1547		reg &= ~(TX_ENABLE|ER_ENABLE);
1548		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1549
1550		reg = he_readl(he_dev, RC_CONFIG);
1551		reg &= ~(RX_ENABLE);
1552		he_writel(he_dev, reg, RC_CONFIG);
1553	}
1554
1555#ifdef CONFIG_ATM_HE_USE_SUNI
1556	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1557		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1558#endif /* CONFIG_ATM_HE_USE_SUNI */
1559
1560	if (he_dev->irq)
1561		free_irq(he_dev->irq, he_dev);
1562
1563	if (he_dev->irq_base)
1564		dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
1565				  * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1566
1567	if (he_dev->hsp)
1568		dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
1569				  he_dev->hsp, he_dev->hsp_phys);
1570
1571	if (he_dev->rbpl_base) {
1572		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1573			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1574
1575		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
1576				  * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1577	}
1578
1579	kfree(he_dev->rbpl_virt);
1580	kfree(he_dev->rbpl_table);
1581	dma_pool_destroy(he_dev->rbpl_pool);
1582
1583	if (he_dev->rbrq_base)
1584		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1585				  he_dev->rbrq_base, he_dev->rbrq_phys);
1586
1587	if (he_dev->tbrq_base)
1588		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1589				  he_dev->tbrq_base, he_dev->tbrq_phys);
1590
1591	if (he_dev->tpdrq_base)
1592		dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1593				  he_dev->tpdrq_base, he_dev->tpdrq_phys);
1594
1595	dma_pool_destroy(he_dev->tpd_pool);
1596
1597	if (he_dev->pci_dev) {
1598		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1599		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1600		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1601	}
1602	
1603	if (he_dev->membase)
1604		iounmap(he_dev->membase);
1605}
1606
1607static struct he_tpd *
1608__alloc_tpd(struct he_dev *he_dev)
1609{
1610	struct he_tpd *tpd;
1611	dma_addr_t mapping;
1612
1613	tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
1614	if (tpd == NULL)
1615		return NULL;
1616			
1617	tpd->status = TPD_ADDR(mapping);
1618	tpd->reserved = 0; 
1619	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1620	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1621	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1622
1623	return tpd;
1624}
1625
1626#define AAL5_LEN(buf,len) 						\
1627			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1628				(((unsigned char *)(buf))[(len)-5]))
1629
1630/* 2.10.1.2 receive
1631 *
1632 * aal5 packets can optionally return the tcp checksum in the lower
1633 * 16 bits of the crc (RSR0_TCP_CKSUM)
1634 */
1635
1636#define TCP_CKSUM(buf,len) 						\
1637			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1638				(((unsigned char *)(buf))[(len-1)]))
1639
1640static int
1641he_service_rbrq(struct he_dev *he_dev, int group)
1642{
1643	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1644				((unsigned long)he_dev->rbrq_base |
1645					he_dev->hsp->group[group].rbrq_tail);
1646	unsigned cid, lastcid = -1;
1647	struct sk_buff *skb;
1648	struct atm_vcc *vcc = NULL;
1649	struct he_vcc *he_vcc;
1650	struct he_buff *heb, *next;
1651	int i;
1652	int pdus_assembled = 0;
1653	int updated = 0;
1654
1655	read_lock(&vcc_sklist_lock);
1656	while (he_dev->rbrq_head != rbrq_tail) {
1657		++updated;
1658
1659		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1660			he_dev->rbrq_head, group,
1661			RBRQ_ADDR(he_dev->rbrq_head),
1662			RBRQ_BUFLEN(he_dev->rbrq_head),
1663			RBRQ_CID(he_dev->rbrq_head),
1664			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1665			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1666			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1667			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1668			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1669			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1670
1671		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1672		heb = he_dev->rbpl_virt[i];
1673
1674		cid = RBRQ_CID(he_dev->rbrq_head);
1675		if (cid != lastcid)
1676			vcc = __find_vcc(he_dev, cid);
1677		lastcid = cid;
1678
1679		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1680			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1681			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1682				clear_bit(i, he_dev->rbpl_table);
1683				list_del(&heb->entry);
1684				dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1685			}
1686					
1687			goto next_rbrq_entry;
1688		}
1689
1690		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1691			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1692				atomic_inc(&vcc->stats->rx_drop);
1693			goto return_host_buffers;
1694		}
1695
1696		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1697		clear_bit(i, he_dev->rbpl_table);
1698		list_move_tail(&heb->entry, &he_vcc->buffers);
1699		he_vcc->pdu_len += heb->len;
1700
1701		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1702			lastcid = -1;
1703			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1704			wake_up(&he_vcc->rx_waitq);
1705			goto return_host_buffers;
1706		}
1707
1708		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1709			goto next_rbrq_entry;
1710
1711		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1712				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1713			HPRINTK("%s%s (%d.%d)\n",
1714				RBRQ_CRC_ERR(he_dev->rbrq_head)
1715							? "CRC_ERR " : "",
1716				RBRQ_LEN_ERR(he_dev->rbrq_head)
1717							? "LEN_ERR" : "",
1718							vcc->vpi, vcc->vci);
1719			atomic_inc(&vcc->stats->rx_err);
1720			goto return_host_buffers;
1721		}
1722
1723		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1724							GFP_ATOMIC);
1725		if (!skb) {
1726			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1727			goto return_host_buffers;
1728		}
1729
1730		if (rx_skb_reserve > 0)
1731			skb_reserve(skb, rx_skb_reserve);
1732
1733		__net_timestamp(skb);
1734
1735		list_for_each_entry(heb, &he_vcc->buffers, entry)
1736			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1737
1738		switch (vcc->qos.aal) {
1739			case ATM_AAL0:
1740				/* 2.10.1.5 raw cell receive */
1741				skb->len = ATM_AAL0_SDU;
1742				skb_set_tail_pointer(skb, skb->len);
1743				break;
1744			case ATM_AAL5:
1745				/* 2.10.1.2 aal5 receive */
1746
1747				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1748				skb_set_tail_pointer(skb, skb->len);
1749#ifdef USE_CHECKSUM_HW
1750				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1751					skb->ip_summed = CHECKSUM_COMPLETE;
1752					skb->csum = TCP_CKSUM(skb->data,
1753							he_vcc->pdu_len);
1754				}
1755#endif
1756				break;
1757		}
1758
1759#ifdef should_never_happen
1760		if (skb->len > vcc->qos.rxtp.max_sdu)
1761			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1762#endif
1763
1764#ifdef notdef
1765		ATM_SKB(skb)->vcc = vcc;
1766#endif
1767		spin_unlock(&he_dev->global_lock);
1768		vcc->push(vcc, skb);
1769		spin_lock(&he_dev->global_lock);
1770
1771		atomic_inc(&vcc->stats->rx);
1772
1773return_host_buffers:
1774		++pdus_assembled;
1775
1776		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1777			dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1778		INIT_LIST_HEAD(&he_vcc->buffers);
1779		he_vcc->pdu_len = 0;
1780
1781next_rbrq_entry:
1782		he_dev->rbrq_head = (struct he_rbrq *)
1783				((unsigned long) he_dev->rbrq_base |
1784					RBRQ_MASK(he_dev->rbrq_head + 1));
1785
1786	}
1787	read_unlock(&vcc_sklist_lock);
1788
1789	if (updated) {
1790		if (updated > he_dev->rbrq_peak)
1791			he_dev->rbrq_peak = updated;
1792
1793		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1794						G0_RBRQ_H + (group * 16));
1795	}
1796
1797	return pdus_assembled;
1798}
1799
1800static void
1801he_service_tbrq(struct he_dev *he_dev, int group)
1802{
1803	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1804				((unsigned long)he_dev->tbrq_base |
1805					he_dev->hsp->group[group].tbrq_tail);
1806	struct he_tpd *tpd;
1807	int slot, updated = 0;
1808	struct he_tpd *__tpd;
1809
1810	/* 2.1.6 transmit buffer return queue */
1811
1812	while (he_dev->tbrq_head != tbrq_tail) {
1813		++updated;
1814
1815		HPRINTK("tbrq%d 0x%x%s%s\n",
1816			group,
1817			TBRQ_TPD(he_dev->tbrq_head), 
1818			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1819			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1820		tpd = NULL;
1821		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1822			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1823				tpd = __tpd;
1824				list_del(&__tpd->entry);
1825				break;
1826			}
1827		}
1828
1829		if (tpd == NULL) {
1830			hprintk("unable to locate tpd for dma buffer %x\n",
1831						TBRQ_TPD(he_dev->tbrq_head));
1832			goto next_tbrq_entry;
1833		}
1834
1835		if (TBRQ_EOS(he_dev->tbrq_head)) {
1836			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1837				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1838			if (tpd->vcc)
1839				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1840
1841			goto next_tbrq_entry;
1842		}
1843
1844		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1845			if (tpd->iovec[slot].addr)
1846				dma_unmap_single(&he_dev->pci_dev->dev,
1847					tpd->iovec[slot].addr,
1848					tpd->iovec[slot].len & TPD_LEN_MASK,
1849							DMA_TO_DEVICE);
1850			if (tpd->iovec[slot].len & TPD_LST)
1851				break;
1852				
1853		}
1854
1855		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1856			if (tpd->vcc && tpd->vcc->pop)
1857				tpd->vcc->pop(tpd->vcc, tpd->skb);
1858			else
1859				dev_kfree_skb_any(tpd->skb);
1860		}
1861
1862next_tbrq_entry:
1863		if (tpd)
1864			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1865		he_dev->tbrq_head = (struct he_tbrq *)
1866				((unsigned long) he_dev->tbrq_base |
1867					TBRQ_MASK(he_dev->tbrq_head + 1));
1868	}
1869
1870	if (updated) {
1871		if (updated > he_dev->tbrq_peak)
1872			he_dev->tbrq_peak = updated;
1873
1874		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1875						G0_TBRQ_H + (group * 16));
1876	}
1877}
1878
1879static void
1880he_service_rbpl(struct he_dev *he_dev, int group)
1881{
1882	struct he_rbp *new_tail;
1883	struct he_rbp *rbpl_head;
1884	struct he_buff *heb;
1885	dma_addr_t mapping;
1886	int i;
1887	int moved = 0;
1888
1889	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1890					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1891
1892	for (;;) {
1893		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1894						RBPL_MASK(he_dev->rbpl_tail+1));
1895
1896		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1897		if (new_tail == rbpl_head)
1898			break;
1899
1900		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1901		if (i > (RBPL_TABLE_SIZE - 1)) {
1902			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1903			if (i > (RBPL_TABLE_SIZE - 1))
1904				break;
1905		}
1906		he_dev->rbpl_hint = i + 1;
1907
1908		heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
1909		if (!heb)
1910			break;
1911		heb->mapping = mapping;
1912		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1913		he_dev->rbpl_virt[i] = heb;
1914		set_bit(i, he_dev->rbpl_table);
1915		new_tail->idx = i << RBP_IDX_OFFSET;
1916		new_tail->phys = mapping + offsetof(struct he_buff, data);
1917
1918		he_dev->rbpl_tail = new_tail;
1919		++moved;
1920	} 
1921
1922	if (moved)
1923		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1924}
1925
1926static void
1927he_tasklet(unsigned long data)
1928{
1929	unsigned long flags;
1930	struct he_dev *he_dev = (struct he_dev *) data;
1931	int group, type;
1932	int updated = 0;
1933
1934	HPRINTK("tasklet (0x%lx)\n", data);
1935	spin_lock_irqsave(&he_dev->global_lock, flags);
1936
1937	while (he_dev->irq_head != he_dev->irq_tail) {
1938		++updated;
1939
1940		type = ITYPE_TYPE(he_dev->irq_head->isw);
1941		group = ITYPE_GROUP(he_dev->irq_head->isw);
1942
1943		switch (type) {
1944			case ITYPE_RBRQ_THRESH:
1945				HPRINTK("rbrq%d threshold\n", group);
1946				/* fall through */
1947			case ITYPE_RBRQ_TIMER:
1948				if (he_service_rbrq(he_dev, group))
1949					he_service_rbpl(he_dev, group);
1950				break;
1951			case ITYPE_TBRQ_THRESH:
1952				HPRINTK("tbrq%d threshold\n", group);
1953				/* fall through */
1954			case ITYPE_TPD_COMPLETE:
1955				he_service_tbrq(he_dev, group);
1956				break;
1957			case ITYPE_RBPL_THRESH:
1958				he_service_rbpl(he_dev, group);
1959				break;
1960			case ITYPE_RBPS_THRESH:
1961				/* shouldn't happen unless small buffers enabled */
1962				break;
1963			case ITYPE_PHY:
1964				HPRINTK("phy interrupt\n");
1965#ifdef CONFIG_ATM_HE_USE_SUNI
1966				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1967				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1968					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1969				spin_lock_irqsave(&he_dev->global_lock, flags);
1970#endif
1971				break;
1972			case ITYPE_OTHER:
1973				switch (type|group) {
1974					case ITYPE_PARITY:
1975						hprintk("parity error\n");
1976						break;
1977					case ITYPE_ABORT:
1978						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1979						break;
1980				}
1981				break;
1982			case ITYPE_TYPE(ITYPE_INVALID):
1983				/* see 8.1.1 -- check all queues */
1984
1985				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1986
1987				he_service_rbrq(he_dev, 0);
1988				he_service_rbpl(he_dev, 0);
1989				he_service_tbrq(he_dev, 0);
1990				break;
1991			default:
1992				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1993		}
1994
1995		he_dev->irq_head->isw = ITYPE_INVALID;
1996
1997		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1998	}
1999
2000	if (updated) {
2001		if (updated > he_dev->irq_peak)
2002			he_dev->irq_peak = updated;
2003
2004		he_writel(he_dev,
2005			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2006			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2007			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2008		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2009	}
2010	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2011}
2012
2013static irqreturn_t
2014he_irq_handler(int irq, void *dev_id)
2015{
2016	unsigned long flags;
2017	struct he_dev *he_dev = (struct he_dev * )dev_id;
2018	int handled = 0;
2019
2020	if (he_dev == NULL)
2021		return IRQ_NONE;
2022
2023	spin_lock_irqsave(&he_dev->global_lock, flags);
2024
2025	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2026						(*he_dev->irq_tailoffset << 2));
2027
2028	if (he_dev->irq_tail == he_dev->irq_head) {
2029		HPRINTK("tailoffset not updated?\n");
2030		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2031			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2032		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2033	}
2034
2035#ifdef DEBUG
2036	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2037		hprintk("spurious (or shared) interrupt?\n");
2038#endif
2039
2040	if (he_dev->irq_head != he_dev->irq_tail) {
2041		handled = 1;
2042		tasklet_schedule(&he_dev->tasklet);
2043		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2044		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2045	}
2046	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2047	return IRQ_RETVAL(handled);
2048
2049}
2050
2051static __inline__ void
2052__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2053{
2054	struct he_tpdrq *new_tail;
2055
2056	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2057					tpd, cid, he_dev->tpdrq_tail);
2058
2059	/* new_tail = he_dev->tpdrq_tail; */
2060	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2061					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2062
2063	/*
2064	 * check to see if we are about to set the tail == head
2065	 * if true, update the head pointer from the adapter
2066	 * to see if this is really the case (reading the queue
2067	 * head for every enqueue would be unnecessarily slow)
2068	 */
2069
2070	if (new_tail == he_dev->tpdrq_head) {
2071		he_dev->tpdrq_head = (struct he_tpdrq *)
2072			(((unsigned long)he_dev->tpdrq_base) |
2073				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2074
2075		if (new_tail == he_dev->tpdrq_head) {
2076			int slot;
2077
2078			hprintk("tpdrq full (cid 0x%x)\n", cid);
2079			/*
2080			 * FIXME
2081			 * push tpd onto a transmit backlog queue
2082			 * after service_tbrq, service the backlog
2083			 * for now, we just drop the pdu
2084			 */
2085			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2086				if (tpd->iovec[slot].addr)
2087					dma_unmap_single(&he_dev->pci_dev->dev,
2088						tpd->iovec[slot].addr,
2089						tpd->iovec[slot].len & TPD_LEN_MASK,
2090								DMA_TO_DEVICE);
2091			}
2092			if (tpd->skb) {
2093				if (tpd->vcc->pop)
2094					tpd->vcc->pop(tpd->vcc, tpd->skb);
2095				else
2096					dev_kfree_skb_any(tpd->skb);
2097				atomic_inc(&tpd->vcc->stats->tx_err);
2098			}
2099			dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2100			return;
2101		}
2102	}
2103
2104	/* 2.1.5 transmit packet descriptor ready queue */
2105	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2106	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2107	he_dev->tpdrq_tail->cid = cid;
2108	wmb();
2109
2110	he_dev->tpdrq_tail = new_tail;
2111
2112	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2113	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2114}
2115
2116static int
2117he_open(struct atm_vcc *vcc)
2118{
2119	unsigned long flags;
2120	struct he_dev *he_dev = HE_DEV(vcc->dev);
2121	struct he_vcc *he_vcc;
2122	int err = 0;
2123	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2124	short vpi = vcc->vpi;
2125	int vci = vcc->vci;
2126
2127	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2128		return 0;
2129
2130	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2131
2132	set_bit(ATM_VF_ADDR, &vcc->flags);
2133
2134	cid = he_mkcid(he_dev, vpi, vci);
2135
2136	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2137	if (he_vcc == NULL) {
2138		hprintk("unable to allocate he_vcc during open\n");
2139		return -ENOMEM;
2140	}
2141
2142	INIT_LIST_HEAD(&he_vcc->buffers);
2143	he_vcc->pdu_len = 0;
2144	he_vcc->rc_index = -1;
2145
2146	init_waitqueue_head(&he_vcc->rx_waitq);
2147	init_waitqueue_head(&he_vcc->tx_waitq);
2148
2149	vcc->dev_data = he_vcc;
2150
2151	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2152		int pcr_goal;
2153
2154		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2155		if (pcr_goal == 0)
2156			pcr_goal = he_dev->atm_dev->link_rate;
2157		if (pcr_goal < 0)	/* means round down, technically */
2158			pcr_goal = -pcr_goal;
2159
2160		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2161
2162		switch (vcc->qos.aal) {
2163			case ATM_AAL5:
2164				tsr0_aal = TSR0_AAL5;
2165				tsr4 = TSR4_AAL5;
2166				break;
2167			case ATM_AAL0:
2168				tsr0_aal = TSR0_AAL0_SDU;
2169				tsr4 = TSR4_AAL0_SDU;
2170				break;
2171			default:
2172				err = -EINVAL;
2173				goto open_failed;
2174		}
2175
2176		spin_lock_irqsave(&he_dev->global_lock, flags);
2177		tsr0 = he_readl_tsr0(he_dev, cid);
2178		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2179
2180		if (TSR0_CONN_STATE(tsr0) != 0) {
2181			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2182			err = -EBUSY;
2183			goto open_failed;
2184		}
2185
2186		switch (vcc->qos.txtp.traffic_class) {
2187			case ATM_UBR:
2188				/* 2.3.3.1 open connection ubr */
2189
2190				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2191					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2192				break;
2193
2194			case ATM_CBR:
2195				/* 2.3.3.2 open connection cbr */
2196
2197				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2198				if ((he_dev->total_bw + pcr_goal)
2199					> (he_dev->atm_dev->link_rate * 9 / 10))
2200				{
2201					err = -EBUSY;
2202					goto open_failed;
2203				}
2204
2205				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2206
2207				/* find an unused cs_stper register */
2208				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2209					if (he_dev->cs_stper[reg].inuse == 0 || 
2210					    he_dev->cs_stper[reg].pcr == pcr_goal)
2211							break;
2212
2213				if (reg == HE_NUM_CS_STPER) {
2214					err = -EBUSY;
2215					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2216					goto open_failed;
2217				}
2218
2219				he_dev->total_bw += pcr_goal;
2220
2221				he_vcc->rc_index = reg;
2222				++he_dev->cs_stper[reg].inuse;
2223				he_dev->cs_stper[reg].pcr = pcr_goal;
2224
2225				clock = he_is622(he_dev) ? 66667000 : 50000000;
2226				period = clock / pcr_goal;
2227				
2228				HPRINTK("rc_index = %d period = %d\n",
2229								reg, period);
2230
2231				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2232							CS_STPER0 + reg);
2233				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2234
2235				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2236							TSR0_RC_INDEX(reg);
2237
2238				break;
2239			default:
2240				err = -EINVAL;
2241				goto open_failed;
2242		}
2243
2244		spin_lock_irqsave(&he_dev->global_lock, flags);
2245
2246		he_writel_tsr0(he_dev, tsr0, cid);
2247		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2248		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2249					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2250		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2251		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2252
2253		he_writel_tsr3(he_dev, 0x0, cid);
2254		he_writel_tsr5(he_dev, 0x0, cid);
2255		he_writel_tsr6(he_dev, 0x0, cid);
2256		he_writel_tsr7(he_dev, 0x0, cid);
2257		he_writel_tsr8(he_dev, 0x0, cid);
2258		he_writel_tsr10(he_dev, 0x0, cid);
2259		he_writel_tsr11(he_dev, 0x0, cid);
2260		he_writel_tsr12(he_dev, 0x0, cid);
2261		he_writel_tsr13(he_dev, 0x0, cid);
2262		he_writel_tsr14(he_dev, 0x0, cid);
2263		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2264		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2265	}
2266
2267	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2268		unsigned aal;
2269
2270		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2271		 				&HE_VCC(vcc)->rx_waitq);
2272
2273		switch (vcc->qos.aal) {
2274			case ATM_AAL5:
2275				aal = RSR0_AAL5;
2276				break;
2277			case ATM_AAL0:
2278				aal = RSR0_RAWCELL;
2279				break;
2280			default:
2281				err = -EINVAL;
2282				goto open_failed;
2283		}
2284
2285		spin_lock_irqsave(&he_dev->global_lock, flags);
2286
2287		rsr0 = he_readl_rsr0(he_dev, cid);
2288		if (rsr0 & RSR0_OPEN_CONN) {
2289			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2290
2291			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2292			err = -EBUSY;
2293			goto open_failed;
2294		}
2295
2296		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2297		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2298		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2299				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2300
2301#ifdef USE_CHECKSUM_HW
2302		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2303			rsr0 |= RSR0_TCP_CKSUM;
2304#endif
2305
2306		he_writel_rsr4(he_dev, rsr4, cid);
2307		he_writel_rsr1(he_dev, rsr1, cid);
2308		/* 5.1.11 last parameter initialized should be
2309			  the open/closed indication in rsr0 */
2310		he_writel_rsr0(he_dev,
2311			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2312		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2313
2314		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2315	}
2316
2317open_failed:
2318
2319	if (err) {
2320		kfree(he_vcc);
2321		clear_bit(ATM_VF_ADDR, &vcc->flags);
2322	}
2323	else
2324		set_bit(ATM_VF_READY, &vcc->flags);
2325
2326	return err;
2327}
2328
2329static void
2330he_close(struct atm_vcc *vcc)
2331{
2332	unsigned long flags;
2333	DECLARE_WAITQUEUE(wait, current);
2334	struct he_dev *he_dev = HE_DEV(vcc->dev);
2335	struct he_tpd *tpd;
2336	unsigned cid;
2337	struct he_vcc *he_vcc = HE_VCC(vcc);
2338#define MAX_RETRY 30
2339	int retry = 0, sleep = 1, tx_inuse;
2340
2341	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2342
2343	clear_bit(ATM_VF_READY, &vcc->flags);
2344	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2345
2346	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2347		int timeout;
2348
2349		HPRINTK("close rx cid 0x%x\n", cid);
2350
2351		/* 2.7.2.2 close receive operation */
2352
2353		/* wait for previous close (if any) to finish */
2354
2355		spin_lock_irqsave(&he_dev->global_lock, flags);
2356		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2357			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2358			udelay(250);
2359		}
2360
2361		set_current_state(TASK_UNINTERRUPTIBLE);
2362		add_wait_queue(&he_vcc->rx_waitq, &wait);
2363
2364		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2365		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2366		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2367		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2368
2369		timeout = schedule_timeout(30*HZ);
2370
2371		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2372		set_current_state(TASK_RUNNING);
2373
2374		if (timeout == 0)
2375			hprintk("close rx timeout cid 0x%x\n", cid);
2376
2377		HPRINTK("close rx cid 0x%x complete\n", cid);
2378
2379	}
2380
2381	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2382		volatile unsigned tsr4, tsr0;
2383		int timeout;
2384
2385		HPRINTK("close tx cid 0x%x\n", cid);
2386		
2387		/* 2.1.2
2388		 *
2389		 * ... the host must first stop queueing packets to the TPDRQ
2390		 * on the connection to be closed, then wait for all outstanding
2391		 * packets to be transmitted and their buffers returned to the
2392		 * TBRQ. When the last packet on the connection arrives in the
2393		 * TBRQ, the host issues the close command to the adapter.
2394		 */
2395
2396		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2397		       (retry < MAX_RETRY)) {
2398			msleep(sleep);
2399			if (sleep < 250)
2400				sleep = sleep * 2;
2401
2402			++retry;
2403		}
2404
2405		if (tx_inuse > 1)
2406			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2407
2408		/* 2.3.1.1 generic close operations with flush */
2409
2410		spin_lock_irqsave(&he_dev->global_lock, flags);
2411		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2412					/* also clears TSR4_SESSION_ENDED */
2413
2414		switch (vcc->qos.txtp.traffic_class) {
2415			case ATM_UBR:
2416				he_writel_tsr1(he_dev, 
2417					TSR1_MCR(rate_to_atmf(200000))
2418					| TSR1_PCR(0), cid);
2419				break;
2420			case ATM_CBR:
2421				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2422				break;
2423		}
2424		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2425
2426		tpd = __alloc_tpd(he_dev);
2427		if (tpd == NULL) {
2428			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2429			goto close_tx_incomplete;
2430		}
2431		tpd->status |= TPD_EOS | TPD_INT;
2432		tpd->skb = NULL;
2433		tpd->vcc = vcc;
2434		wmb();
2435
2436		set_current_state(TASK_UNINTERRUPTIBLE);
2437		add_wait_queue(&he_vcc->tx_waitq, &wait);
2438		__enqueue_tpd(he_dev, tpd, cid);
2439		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2440
2441		timeout = schedule_timeout(30*HZ);
2442
2443		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2444		set_current_state(TASK_RUNNING);
2445
2446		spin_lock_irqsave(&he_dev->global_lock, flags);
2447
2448		if (timeout == 0) {
2449			hprintk("close tx timeout cid 0x%x\n", cid);
2450			goto close_tx_incomplete;
2451		}
2452
2453		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2454			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2455			udelay(250);
2456		}
2457
2458		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2459			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2460			udelay(250);
2461		}
2462
2463close_tx_incomplete:
2464
2465		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2466			int reg = he_vcc->rc_index;
2467
2468			HPRINTK("cs_stper reg = %d\n", reg);
2469
2470			if (he_dev->cs_stper[reg].inuse == 0)
2471				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2472			else
2473				--he_dev->cs_stper[reg].inuse;
2474
2475			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2476		}
2477		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2478
2479		HPRINTK("close tx cid 0x%x complete\n", cid);
2480	}
2481
2482	kfree(he_vcc);
2483
2484	clear_bit(ATM_VF_ADDR, &vcc->flags);
2485}
2486
2487static int
2488he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2489{
2490	unsigned long flags;
2491	struct he_dev *he_dev = HE_DEV(vcc->dev);
2492	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2493	struct he_tpd *tpd;
2494#ifdef USE_SCATTERGATHER
2495	int i, slot = 0;
2496#endif
2497
2498#define HE_TPD_BUFSIZE 0xffff
2499
2500	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2501
2502	if ((skb->len > HE_TPD_BUFSIZE) ||
2503	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2504		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2505		if (vcc->pop)
2506			vcc->pop(vcc, skb);
2507		else
2508			dev_kfree_skb_any(skb);
2509		atomic_inc(&vcc->stats->tx_err);
2510		return -EINVAL;
2511	}
2512
2513#ifndef USE_SCATTERGATHER
2514	if (skb_shinfo(skb)->nr_frags) {
2515		hprintk("no scatter/gather support\n");
2516		if (vcc->pop)
2517			vcc->pop(vcc, skb);
2518		else
2519			dev_kfree_skb_any(skb);
2520		atomic_inc(&vcc->stats->tx_err);
2521		return -EINVAL;
2522	}
2523#endif
2524	spin_lock_irqsave(&he_dev->global_lock, flags);
2525
2526	tpd = __alloc_tpd(he_dev);
2527	if (tpd == NULL) {
2528		if (vcc->pop)
2529			vcc->pop(vcc, skb);
2530		else
2531			dev_kfree_skb_any(skb);
2532		atomic_inc(&vcc->stats->tx_err);
2533		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2534		return -ENOMEM;
2535	}
2536
2537	if (vcc->qos.aal == ATM_AAL5)
2538		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2539	else {
2540		char *pti_clp = (void *) (skb->data + 3);
2541		int clp, pti;
2542
2543		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2544		clp = (*pti_clp & ATM_HDR_CLP);
2545		tpd->status |= TPD_CELLTYPE(pti);
2546		if (clp)
2547			tpd->status |= TPD_CLP;
2548
2549		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2550	}
2551
2552#ifdef USE_SCATTERGATHER
2553	tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
2554				skb_headlen(skb), DMA_TO_DEVICE);
2555	tpd->iovec[slot].len = skb_headlen(skb);
2556	++slot;
2557
2558	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2559		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2560
2561		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2562			tpd->vcc = vcc;
2563			tpd->skb = NULL;	/* not the last fragment
2564						   so dont ->push() yet */
2565			wmb();
2566
2567			__enqueue_tpd(he_dev, tpd, cid);
2568			tpd = __alloc_tpd(he_dev);
2569			if (tpd == NULL) {
2570				if (vcc->pop)
2571					vcc->pop(vcc, skb);
2572				else
2573					dev_kfree_skb_any(skb);
2574				atomic_inc(&vcc->stats->tx_err);
2575				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2576				return -ENOMEM;
2577			}
2578			tpd->status |= TPD_USERCELL;
2579			slot = 0;
2580		}
2581
2582		tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
2583			(void *) page_address(frag->page) + frag->page_offset,
2584				frag->size, DMA_TO_DEVICE);
2585		tpd->iovec[slot].len = frag->size;
2586		++slot;
2587
2588	}
2589
2590	tpd->iovec[slot - 1].len |= TPD_LST;
2591#else
2592	tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2593	tpd->length0 = skb->len | TPD_LST;
2594#endif
2595	tpd->status |= TPD_INT;
2596
2597	tpd->vcc = vcc;
2598	tpd->skb = skb;
2599	wmb();
2600	ATM_SKB(skb)->vcc = vcc;
2601
2602	__enqueue_tpd(he_dev, tpd, cid);
2603	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2604
2605	atomic_inc(&vcc->stats->tx);
2606
2607	return 0;
2608}
2609
2610static int
2611he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2612{
2613	unsigned long flags;
2614	struct he_dev *he_dev = HE_DEV(atm_dev);
2615	struct he_ioctl_reg reg;
2616	int err = 0;
2617
2618	switch (cmd) {
2619		case HE_GET_REG:
2620			if (!capable(CAP_NET_ADMIN))
2621				return -EPERM;
2622
2623			if (copy_from_user(&reg, arg,
2624					   sizeof(struct he_ioctl_reg)))
2625				return -EFAULT;
2626
2627			spin_lock_irqsave(&he_dev->global_lock, flags);
2628			switch (reg.type) {
2629				case HE_REGTYPE_PCI:
2630					if (reg.addr >= HE_REGMAP_SIZE) {
2631						err = -EINVAL;
2632						break;
2633					}
2634
2635					reg.val = he_readl(he_dev, reg.addr);
2636					break;
2637				case HE_REGTYPE_RCM:
2638					reg.val =
2639						he_readl_rcm(he_dev, reg.addr);
2640					break;
2641				case HE_REGTYPE_TCM:
2642					reg.val =
2643						he_readl_tcm(he_dev, reg.addr);
2644					break;
2645				case HE_REGTYPE_MBOX:
2646					reg.val =
2647						he_readl_mbox(he_dev, reg.addr);
2648					break;
2649				default:
2650					err = -EINVAL;
2651					break;
2652			}
2653			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2654			if (err == 0)
2655				if (copy_to_user(arg, &reg,
2656							sizeof(struct he_ioctl_reg)))
2657					return -EFAULT;
2658			break;
2659		default:
2660#ifdef CONFIG_ATM_HE_USE_SUNI
2661			if (atm_dev->phy && atm_dev->phy->ioctl)
2662				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2663#else /* CONFIG_ATM_HE_USE_SUNI */
2664			err = -EINVAL;
2665#endif /* CONFIG_ATM_HE_USE_SUNI */
2666			break;
2667	}
2668
2669	return err;
2670}
2671
2672static void
2673he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2674{
2675	unsigned long flags;
2676	struct he_dev *he_dev = HE_DEV(atm_dev);
2677
2678	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2679
2680	spin_lock_irqsave(&he_dev->global_lock, flags);
2681	he_writel(he_dev, val, FRAMER + (addr*4));
2682	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2683	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2684}
2685 
2686	
2687static unsigned char
2688he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2689{ 
2690	unsigned long flags;
2691	struct he_dev *he_dev = HE_DEV(atm_dev);
2692	unsigned reg;
2693
2694	spin_lock_irqsave(&he_dev->global_lock, flags);
2695	reg = he_readl(he_dev, FRAMER + (addr*4));
2696	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2697
2698	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2699	return reg;
2700}
2701
2702static int
2703he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2704{
2705	unsigned long flags;
2706	struct he_dev *he_dev = HE_DEV(dev);
2707	int left, i;
2708#ifdef notdef
2709	struct he_rbrq *rbrq_tail;
2710	struct he_tpdrq *tpdrq_head;
2711	int rbpl_head, rbpl_tail;
2712#endif
2713	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2714
2715
2716	left = *pos;
2717	if (!left--)
2718		return sprintf(page, "ATM he driver\n");
2719
2720	if (!left--)
2721		return sprintf(page, "%s%s\n\n",
2722			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2723
2724	if (!left--)
2725		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2726
2727	spin_lock_irqsave(&he_dev->global_lock, flags);
2728	mcc += he_readl(he_dev, MCC);
2729	oec += he_readl(he_dev, OEC);
2730	dcc += he_readl(he_dev, DCC);
2731	cec += he_readl(he_dev, CEC);
2732	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2733
2734	if (!left--)
2735		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2736							mcc, oec, dcc, cec);
2737
2738	if (!left--)
2739		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2740				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2741
2742	if (!left--)
2743		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2744						CONFIG_TPDRQ_SIZE);
2745
2746	if (!left--)
2747		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2748				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2749
2750	if (!left--)
2751		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2752					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2753
2754
2755#ifdef notdef
2756	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2757	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2758
2759	inuse = rbpl_head - rbpl_tail;
2760	if (inuse < 0)
2761		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2762	inuse /= sizeof(struct he_rbp);
2763
2764	if (!left--)
2765		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2766						CONFIG_RBPL_SIZE, inuse);
2767#endif
2768
2769	if (!left--)
2770		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2771
2772	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2773		if (!left--)
2774			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2775						he_dev->cs_stper[i].pcr,
2776						he_dev->cs_stper[i].inuse);
2777
2778	if (!left--)
2779		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2780			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2781
2782	return 0;
2783}
2784
2785/* eeprom routines  -- see 4.7 */
2786
2787static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2788{
2789	u32 val = 0, tmp_read = 0;
2790	int i, j = 0;
2791	u8 byte_read = 0;
2792
2793	val = readl(he_dev->membase + HOST_CNTL);
2794	val &= 0xFFFFE0FF;
2795       
2796	/* Turn on write enable */
2797	val |= 0x800;
2798	he_writel(he_dev, val, HOST_CNTL);
2799       
2800	/* Send READ instruction */
2801	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2802		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2803		udelay(EEPROM_DELAY);
2804	}
2805       
2806	/* Next, we need to send the byte address to read from */
2807	for (i = 7; i >= 0; i--) {
2808		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2809		udelay(EEPROM_DELAY);
2810		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2811		udelay(EEPROM_DELAY);
2812	}
2813       
2814	j = 0;
2815
2816	val &= 0xFFFFF7FF;      /* Turn off write enable */
2817	he_writel(he_dev, val, HOST_CNTL);
2818       
2819	/* Now, we can read data from the EEPROM by clocking it in */
2820	for (i = 7; i >= 0; i--) {
2821		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2822		udelay(EEPROM_DELAY);
2823		tmp_read = he_readl(he_dev, HOST_CNTL);
2824		byte_read |= (unsigned char)
2825			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2826		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2827		udelay(EEPROM_DELAY);
2828	}
2829       
2830	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2831	udelay(EEPROM_DELAY);
2832
2833	return byte_read;
2834}
2835
2836MODULE_LICENSE("GPL");
2837MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2838MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2839module_param(disable64, bool, 0);
2840MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2841module_param(nvpibits, short, 0);
2842MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2843module_param(nvcibits, short, 0);
2844MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2845module_param(rx_skb_reserve, short, 0);
2846MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2847module_param(irq_coalesce, bool, 0);
2848MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2849module_param(sdh, bool, 0);
2850MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2851
2852static struct pci_device_id he_pci_tbl[] = {
2853	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2854	{ 0, }
2855};
2856
2857MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2858
2859static struct pci_driver he_driver = {
2860	.name =		"he",
2861	.probe =	he_init_one,
2862	.remove =	he_remove_one,
2863	.id_table =	he_pci_tbl,
2864};
2865
2866module_pci_driver(he_driver);