Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2
   3  he.c
   4
   5  ForeRunnerHE ATM Adapter driver for ATM on Linux
   6  Copyright (C) 1999-2001  Naval Research Laboratory
   7
   8  This library is free software; you can redistribute it and/or
   9  modify it under the terms of the GNU Lesser General Public
  10  License as published by the Free Software Foundation; either
  11  version 2.1 of the License, or (at your option) any later version.
  12
  13  This library is distributed in the hope that it will be useful,
  14  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  Lesser General Public License for more details.
  17
  18  You should have received a copy of the GNU Lesser General Public
  19  License along with this library; if not, write to the Free Software
  20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21
  22*/
  23
  24/*
  25
  26  he.c
  27
  28  ForeRunnerHE ATM Adapter driver for ATM on Linux
  29  Copyright (C) 1999-2001  Naval Research Laboratory
  30
  31  Permission to use, copy, modify and distribute this software and its
  32  documentation is hereby granted, provided that both the copyright
  33  notice and this permission notice appear in all copies of the software,
  34  derivative works or modified versions, and any portions thereof, and
  35  that both notices appear in supporting documentation.
  36
  37  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
  38  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
  39  RESULTING FROM THE USE OF THIS SOFTWARE.
  40
  41  This driver was written using the "Programmer's Reference Manual for
  42  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
  43
  44  AUTHORS:
  45	chas williams <chas@cmf.nrl.navy.mil>
  46	eric kinzie <ekinzie@cmf.nrl.navy.mil>
  47
  48  NOTES:
  49	4096 supported 'connections'
  50	group 0 is used for all traffic
  51	interrupt queue 0 is used for all interrupts
  52	aal0 support (based on work from ulrich.u.muller@nokia.com)
  53
  54 */
  55
  56#include <linux/module.h>
  57#include <linux/kernel.h>
  58#include <linux/skbuff.h>
  59#include <linux/pci.h>
  60#include <linux/errno.h>
  61#include <linux/types.h>
  62#include <linux/string.h>
  63#include <linux/delay.h>
  64#include <linux/init.h>
  65#include <linux/mm.h>
  66#include <linux/sched.h>
  67#include <linux/timer.h>
  68#include <linux/interrupt.h>
  69#include <linux/dma-mapping.h>
  70#include <linux/bitmap.h>
  71#include <linux/slab.h>
  72#include <asm/io.h>
  73#include <asm/byteorder.h>
  74#include <asm/uaccess.h>
  75
  76#include <linux/atmdev.h>
  77#include <linux/atm.h>
  78#include <linux/sonet.h>
  79
  80#undef USE_SCATTERGATHER
  81#undef USE_CHECKSUM_HW			/* still confused about this */
  82/* #undef HE_DEBUG */
  83
  84#include "he.h"
  85#include "suni.h"
  86#include <linux/atm_he.h>
  87
  88#define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  89
  90#ifdef HE_DEBUG
  91#define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  92#else /* !HE_DEBUG */
  93#define HPRINTK(fmt,args...)	do { } while (0)
  94#endif /* HE_DEBUG */
  95
  96/* declarations */
  97
  98static int he_open(struct atm_vcc *vcc);
  99static void he_close(struct atm_vcc *vcc);
 100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
 101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
 102static irqreturn_t he_irq_handler(int irq, void *dev_id);
 103static void he_tasklet(unsigned long data);
 104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
 105static int he_start(struct atm_dev *dev);
 106static void he_stop(struct he_dev *dev);
 107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
 108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
 109
 110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
 111
 112/* globals */
 113
 114static struct he_dev *he_devs;
 115static int disable64;
 116static short nvpibits = -1;
 117static short nvcibits = -1;
 118static short rx_skb_reserve = 16;
 119static int irq_coalesce = 1;
 120static int sdh = 0;
 121
 122/* Read from EEPROM = 0000 0011b */
 123static unsigned int readtab[] = {
 124	CS_HIGH | CLK_HIGH,
 125	CS_LOW | CLK_LOW,
 126	CLK_HIGH,               /* 0 */
 127	CLK_LOW,
 128	CLK_HIGH,               /* 0 */
 129	CLK_LOW,
 130	CLK_HIGH,               /* 0 */
 131	CLK_LOW,
 132	CLK_HIGH,               /* 0 */
 133	CLK_LOW,
 134	CLK_HIGH,               /* 0 */
 135	CLK_LOW,
 136	CLK_HIGH,               /* 0 */
 137	CLK_LOW | SI_HIGH,
 138	CLK_HIGH | SI_HIGH,     /* 1 */
 139	CLK_LOW | SI_HIGH,
 140	CLK_HIGH | SI_HIGH      /* 1 */
 141};     
 142 
 143/* Clock to read from/write to the EEPROM */
 144static unsigned int clocktab[] = {
 145	CLK_LOW,
 146	CLK_HIGH,
 147	CLK_LOW,
 148	CLK_HIGH,
 149	CLK_LOW,
 150	CLK_HIGH,
 151	CLK_LOW,
 152	CLK_HIGH,
 153	CLK_LOW,
 154	CLK_HIGH,
 155	CLK_LOW,
 156	CLK_HIGH,
 157	CLK_LOW,
 158	CLK_HIGH,
 159	CLK_LOW,
 160	CLK_HIGH,
 161	CLK_LOW
 162};     
 163
 164static struct atmdev_ops he_ops =
 165{
 166	.open =		he_open,
 167	.close =	he_close,	
 168	.ioctl =	he_ioctl,	
 169	.send =		he_send,
 170	.phy_put =	he_phy_put,
 171	.phy_get =	he_phy_get,
 172	.proc_read =	he_proc_read,
 173	.owner =	THIS_MODULE
 174};
 175
 176#define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
 177#define he_readl(dev, reg)		readl((dev)->membase + (reg))
 178
 179/* section 2.12 connection memory access */
 180
 181static __inline__ void
 182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
 183								unsigned flags)
 184{
 185	he_writel(he_dev, val, CON_DAT);
 186	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
 187	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
 188	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 189}
 190
 191#define he_writel_rcm(dev, val, reg) 				\
 192			he_writel_internal(dev, val, reg, CON_CTL_RCM)
 193
 194#define he_writel_tcm(dev, val, reg) 				\
 195			he_writel_internal(dev, val, reg, CON_CTL_TCM)
 196
 197#define he_writel_mbox(dev, val, reg) 				\
 198			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
 199
 200static unsigned
 201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
 202{
 203	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
 204	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 205	return he_readl(he_dev, CON_DAT);
 206}
 207
 208#define he_readl_rcm(dev, reg) \
 209			he_readl_internal(dev, reg, CON_CTL_RCM)
 210
 211#define he_readl_tcm(dev, reg) \
 212			he_readl_internal(dev, reg, CON_CTL_TCM)
 213
 214#define he_readl_mbox(dev, reg) \
 215			he_readl_internal(dev, reg, CON_CTL_MBOX)
 216
 217
 218/* figure 2.2 connection id */
 219
 220#define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
 221
 222/* 2.5.1 per connection transmit state registers */
 223
 224#define he_writel_tsr0(dev, val, cid) \
 225		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
 226#define he_readl_tsr0(dev, cid) \
 227		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
 228
 229#define he_writel_tsr1(dev, val, cid) \
 230		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
 231
 232#define he_writel_tsr2(dev, val, cid) \
 233		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
 234
 235#define he_writel_tsr3(dev, val, cid) \
 236		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
 237
 238#define he_writel_tsr4(dev, val, cid) \
 239		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
 240
 241	/* from page 2-20
 242	 *
 243	 * NOTE While the transmit connection is active, bits 23 through 0
 244	 *      of this register must not be written by the host.  Byte
 245	 *      enables should be used during normal operation when writing
 246	 *      the most significant byte.
 247	 */
 248
 249#define he_writel_tsr4_upper(dev, val, cid) \
 250		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
 251							CON_CTL_TCM \
 252							| CON_BYTE_DISABLE_2 \
 253							| CON_BYTE_DISABLE_1 \
 254							| CON_BYTE_DISABLE_0)
 255
 256#define he_readl_tsr4(dev, cid) \
 257		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
 258
 259#define he_writel_tsr5(dev, val, cid) \
 260		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
 261
 262#define he_writel_tsr6(dev, val, cid) \
 263		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
 264
 265#define he_writel_tsr7(dev, val, cid) \
 266		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
 267
 268
 269#define he_writel_tsr8(dev, val, cid) \
 270		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
 271
 272#define he_writel_tsr9(dev, val, cid) \
 273		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
 274
 275#define he_writel_tsr10(dev, val, cid) \
 276		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
 277
 278#define he_writel_tsr11(dev, val, cid) \
 279		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
 280
 281
 282#define he_writel_tsr12(dev, val, cid) \
 283		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
 284
 285#define he_writel_tsr13(dev, val, cid) \
 286		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
 287
 288
 289#define he_writel_tsr14(dev, val, cid) \
 290		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
 291
 292#define he_writel_tsr14_upper(dev, val, cid) \
 293		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
 294							CON_CTL_TCM \
 295							| CON_BYTE_DISABLE_2 \
 296							| CON_BYTE_DISABLE_1 \
 297							| CON_BYTE_DISABLE_0)
 298
 299/* 2.7.1 per connection receive state registers */
 300
 301#define he_writel_rsr0(dev, val, cid) \
 302		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
 303#define he_readl_rsr0(dev, cid) \
 304		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
 305
 306#define he_writel_rsr1(dev, val, cid) \
 307		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
 308
 309#define he_writel_rsr2(dev, val, cid) \
 310		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
 311
 312#define he_writel_rsr3(dev, val, cid) \
 313		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
 314
 315#define he_writel_rsr4(dev, val, cid) \
 316		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
 317
 318#define he_writel_rsr5(dev, val, cid) \
 319		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
 320
 321#define he_writel_rsr6(dev, val, cid) \
 322		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
 323
 324#define he_writel_rsr7(dev, val, cid) \
 325		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
 326
 327static __inline__ struct atm_vcc*
 328__find_vcc(struct he_dev *he_dev, unsigned cid)
 329{
 330	struct hlist_head *head;
 331	struct atm_vcc *vcc;
 332	struct hlist_node *node;
 333	struct sock *s;
 334	short vpi;
 335	int vci;
 336
 337	vpi = cid >> he_dev->vcibits;
 338	vci = cid & ((1 << he_dev->vcibits) - 1);
 339	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
 340
 341	sk_for_each(s, node, head) {
 342		vcc = atm_sk(s);
 343		if (vcc->dev == he_dev->atm_dev &&
 344		    vcc->vci == vci && vcc->vpi == vpi &&
 345		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
 346				return vcc;
 347		}
 348	}
 349	return NULL;
 350}
 351
 352static int __devinit
 353he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
 354{
 355	struct atm_dev *atm_dev = NULL;
 356	struct he_dev *he_dev = NULL;
 357	int err = 0;
 358
 359	printk(KERN_INFO "ATM he driver\n");
 360
 361	if (pci_enable_device(pci_dev))
 362		return -EIO;
 363	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
 364		printk(KERN_WARNING "he: no suitable dma available\n");
 365		err = -EIO;
 366		goto init_one_failure;
 367	}
 368
 369	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
 370	if (!atm_dev) {
 371		err = -ENODEV;
 372		goto init_one_failure;
 373	}
 374	pci_set_drvdata(pci_dev, atm_dev);
 375
 376	he_dev = kzalloc(sizeof(struct he_dev),
 377							GFP_KERNEL);
 378	if (!he_dev) {
 379		err = -ENOMEM;
 380		goto init_one_failure;
 381	}
 382	he_dev->pci_dev = pci_dev;
 383	he_dev->atm_dev = atm_dev;
 384	he_dev->atm_dev->dev_data = he_dev;
 385	atm_dev->dev_data = he_dev;
 386	he_dev->number = atm_dev->number;
 387	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
 388	spin_lock_init(&he_dev->global_lock);
 389
 390	if (he_start(atm_dev)) {
 391		he_stop(he_dev);
 392		err = -ENODEV;
 393		goto init_one_failure;
 394	}
 395	he_dev->next = NULL;
 396	if (he_devs)
 397		he_dev->next = he_devs;
 398	he_devs = he_dev;
 399	return 0;
 400
 401init_one_failure:
 402	if (atm_dev)
 403		atm_dev_deregister(atm_dev);
 404	kfree(he_dev);
 405	pci_disable_device(pci_dev);
 406	return err;
 407}
 408
 409static void __devexit
 410he_remove_one (struct pci_dev *pci_dev)
 411{
 412	struct atm_dev *atm_dev;
 413	struct he_dev *he_dev;
 414
 415	atm_dev = pci_get_drvdata(pci_dev);
 416	he_dev = HE_DEV(atm_dev);
 417
 418	/* need to remove from he_devs */
 419
 420	he_stop(he_dev);
 421	atm_dev_deregister(atm_dev);
 422	kfree(he_dev);
 423
 424	pci_set_drvdata(pci_dev, NULL);
 425	pci_disable_device(pci_dev);
 426}
 427
 428
 429static unsigned
 430rate_to_atmf(unsigned rate)		/* cps to atm forum format */
 431{
 432#define NONZERO (1 << 14)
 433
 434	unsigned exp = 0;
 435
 436	if (rate == 0)
 437		return 0;
 438
 439	rate <<= 9;
 440	while (rate > 0x3ff) {
 441		++exp;
 442		rate >>= 1;
 443	}
 444
 445	return (NONZERO | (exp << 9) | (rate & 0x1ff));
 446}
 447
 448static void __devinit
 449he_init_rx_lbfp0(struct he_dev *he_dev)
 450{
 451	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 452	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 453	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 454	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
 455	
 456	lbufd_index = 0;
 457	lbm_offset = he_readl(he_dev, RCMLBM_BA);
 458
 459	he_writel(he_dev, lbufd_index, RLBF0_H);
 460
 461	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
 462		lbufd_index += 2;
 463		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 464
 465		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 466		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 467
 468		if (++lbuf_count == lbufs_per_row) {
 469			lbuf_count = 0;
 470			row_offset += he_dev->bytes_per_row;
 471		}
 472		lbm_offset += 4;
 473	}
 474		
 475	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
 476	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
 477}
 478
 479static void __devinit
 480he_init_rx_lbfp1(struct he_dev *he_dev)
 481{
 482	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 483	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 484	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 485	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
 486	
 487	lbufd_index = 1;
 488	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 489
 490	he_writel(he_dev, lbufd_index, RLBF1_H);
 491
 492	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
 493		lbufd_index += 2;
 494		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 495
 496		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 497		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 498
 499		if (++lbuf_count == lbufs_per_row) {
 500			lbuf_count = 0;
 501			row_offset += he_dev->bytes_per_row;
 502		}
 503		lbm_offset += 4;
 504	}
 505		
 506	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
 507	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
 508}
 509
 510static void __devinit
 511he_init_tx_lbfp(struct he_dev *he_dev)
 512{
 513	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 514	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 515	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 516	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
 517	
 518	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
 519	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 520
 521	he_writel(he_dev, lbufd_index, TLBF_H);
 522
 523	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
 524		lbufd_index += 1;
 525		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 526
 527		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 528		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 529
 530		if (++lbuf_count == lbufs_per_row) {
 531			lbuf_count = 0;
 532			row_offset += he_dev->bytes_per_row;
 533		}
 534		lbm_offset += 2;
 535	}
 536		
 537	he_writel(he_dev, lbufd_index - 1, TLBF_T);
 538}
 539
 540static int __devinit
 541he_init_tpdrq(struct he_dev *he_dev)
 542{
 543	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
 544		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
 545	if (he_dev->tpdrq_base == NULL) {
 546		hprintk("failed to alloc tpdrq\n");
 547		return -ENOMEM;
 548	}
 549	memset(he_dev->tpdrq_base, 0,
 550				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
 551
 552	he_dev->tpdrq_tail = he_dev->tpdrq_base;
 553	he_dev->tpdrq_head = he_dev->tpdrq_base;
 554
 555	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
 556	he_writel(he_dev, 0, TPDRQ_T);	
 557	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
 558
 559	return 0;
 560}
 561
 562static void __devinit
 563he_init_cs_block(struct he_dev *he_dev)
 564{
 565	unsigned clock, rate, delta;
 566	int reg;
 567
 568	/* 5.1.7 cs block initialization */
 569
 570	for (reg = 0; reg < 0x20; ++reg)
 571		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
 572
 573	/* rate grid timer reload values */
 574
 575	clock = he_is622(he_dev) ? 66667000 : 50000000;
 576	rate = he_dev->atm_dev->link_rate;
 577	delta = rate / 16 / 2;
 578
 579	for (reg = 0; reg < 0x10; ++reg) {
 580		/* 2.4 internal transmit function
 581		 *
 582	 	 * we initialize the first row in the rate grid.
 583		 * values are period (in clock cycles) of timer
 584		 */
 585		unsigned period = clock / rate;
 586
 587		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
 588		rate -= delta;
 589	}
 590
 591	if (he_is622(he_dev)) {
 592		/* table 5.2 (4 cells per lbuf) */
 593		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
 594		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
 595		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
 596		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
 597		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
 598
 599		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 600		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
 601		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
 602		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
 603		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 604		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
 605		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
 606
 607		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 608
 609		/* table 5.8 */
 610		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
 611		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
 612		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
 613		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
 614		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
 615		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
 616
 617		/* table 5.9 */
 618		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
 619		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
 620	} else {
 621		/* table 5.1 (4 cells per lbuf) */
 622		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
 623		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
 624		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
 625		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
 626		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
 627
 628		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 629		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
 630		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
 631		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
 632		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 633		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
 634		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
 635
 636		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 637
 638		/* table 5.8 */
 639		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
 640		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
 641		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
 642		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
 643		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
 644		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
 645
 646		/* table 5.9 */
 647		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
 648		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
 649	}
 650
 651	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
 652
 653	for (reg = 0; reg < 0x8; ++reg)
 654		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
 655
 656}
 657
 658static int __devinit
 659he_init_cs_block_rcm(struct he_dev *he_dev)
 660{
 661	unsigned (*rategrid)[16][16];
 662	unsigned rate, delta;
 663	int i, j, reg;
 664
 665	unsigned rate_atmf, exp, man;
 666	unsigned long long rate_cps;
 667	int mult, buf, buf_limit = 4;
 668
 669	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
 670	if (!rategrid)
 671		return -ENOMEM;
 672
 673	/* initialize rate grid group table */
 674
 675	for (reg = 0x0; reg < 0xff; ++reg)
 676		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 677
 678	/* initialize rate controller groups */
 679
 680	for (reg = 0x100; reg < 0x1ff; ++reg)
 681		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 682	
 683	/* initialize tNrm lookup table */
 684
 685	/* the manual makes reference to a routine in a sample driver
 686	   for proper configuration; fortunately, we only need this
 687	   in order to support abr connection */
 688	
 689	/* initialize rate to group table */
 690
 691	rate = he_dev->atm_dev->link_rate;
 692	delta = rate / 32;
 693
 694	/*
 695	 * 2.4 transmit internal functions
 696	 * 
 697	 * we construct a copy of the rate grid used by the scheduler
 698	 * in order to construct the rate to group table below
 699	 */
 700
 701	for (j = 0; j < 16; j++) {
 702		(*rategrid)[0][j] = rate;
 703		rate -= delta;
 704	}
 705
 706	for (i = 1; i < 16; i++)
 707		for (j = 0; j < 16; j++)
 708			if (i > 14)
 709				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
 710			else
 711				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
 712
 713	/*
 714	 * 2.4 transmit internal function
 715	 *
 716	 * this table maps the upper 5 bits of exponent and mantissa
 717	 * of the atm forum representation of the rate into an index
 718	 * on rate grid  
 719	 */
 720
 721	rate_atmf = 0;
 722	while (rate_atmf < 0x400) {
 723		man = (rate_atmf & 0x1f) << 4;
 724		exp = rate_atmf >> 5;
 725
 726		/* 
 727			instead of '/ 512', use '>> 9' to prevent a call
 728			to divdu3 on x86 platforms
 729		*/
 730		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
 731
 732		if (rate_cps < 10)
 733			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
 734
 735		for (i = 255; i > 0; i--)
 736			if ((*rategrid)[i/16][i%16] >= rate_cps)
 737				break;	 /* pick nearest rate instead? */
 738
 739		/*
 740		 * each table entry is 16 bits: (rate grid index (8 bits)
 741		 * and a buffer limit (8 bits)
 742		 * there are two table entries in each 32-bit register
 743		 */
 744
 745#ifdef notdef
 746		buf = rate_cps * he_dev->tx_numbuffs /
 747				(he_dev->atm_dev->link_rate * 2);
 748#else
 749		/* this is pretty, but avoids _divdu3 and is mostly correct */
 750		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
 751		if (rate_cps > (272 * mult))
 752			buf = 4;
 753		else if (rate_cps > (204 * mult))
 754			buf = 3;
 755		else if (rate_cps > (136 * mult))
 756			buf = 2;
 757		else if (rate_cps > (68 * mult))
 758			buf = 1;
 759		else
 760			buf = 0;
 761#endif
 762		if (buf > buf_limit)
 763			buf = buf_limit;
 764		reg = (reg << 16) | ((i << 8) | buf);
 765
 766#define RTGTBL_OFFSET 0x400
 767	  
 768		if (rate_atmf & 0x1)
 769			he_writel_rcm(he_dev, reg,
 770				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
 771
 772		++rate_atmf;
 773	}
 774
 775	kfree(rategrid);
 776	return 0;
 777}
 778
 779static int __devinit
 780he_init_group(struct he_dev *he_dev, int group)
 781{
 782	struct he_buff *heb, *next;
 783	dma_addr_t mapping;
 784	int i;
 785
 786	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
 787	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
 788	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
 789	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
 790		  G0_RBPS_BS + (group * 32));
 791
 792	/* bitmap table */
 793	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
 794				     * sizeof(unsigned long), GFP_KERNEL);
 795	if (!he_dev->rbpl_table) {
 796		hprintk("unable to allocate rbpl bitmap table\n");
 797		return -ENOMEM;
 798	}
 799	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
 800
 801	/* rbpl_virt 64-bit pointers */
 802	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
 803				    * sizeof(struct he_buff *), GFP_KERNEL);
 804	if (!he_dev->rbpl_virt) {
 805		hprintk("unable to allocate rbpl virt table\n");
 806		goto out_free_rbpl_table;
 807	}
 808
 809	/* large buffer pool */
 810	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
 811					    CONFIG_RBPL_BUFSIZE, 64, 0);
 812	if (he_dev->rbpl_pool == NULL) {
 813		hprintk("unable to create rbpl pool\n");
 814		goto out_free_rbpl_virt;
 815	}
 816
 817	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
 818		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
 819	if (he_dev->rbpl_base == NULL) {
 820		hprintk("failed to alloc rbpl_base\n");
 821		goto out_destroy_rbpl_pool;
 822	}
 823	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
 824
 825	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
 826
 827	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
 828
 829		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
 830		if (!heb)
 831			goto out_free_rbpl;
 832		heb->mapping = mapping;
 833		list_add(&heb->entry, &he_dev->rbpl_outstanding);
 834
 835		set_bit(i, he_dev->rbpl_table);
 836		he_dev->rbpl_virt[i] = heb;
 837		he_dev->rbpl_hint = i + 1;
 838		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
 839		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
 840	}
 841	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
 842
 843	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
 844	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
 845						G0_RBPL_T + (group * 32));
 846	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
 847						G0_RBPL_BS + (group * 32));
 848	he_writel(he_dev,
 849			RBP_THRESH(CONFIG_RBPL_THRESH) |
 850			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
 851			RBP_INT_ENB,
 852						G0_RBPL_QI + (group * 32));
 853
 854	/* rx buffer ready queue */
 855
 856	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
 857		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
 858	if (he_dev->rbrq_base == NULL) {
 859		hprintk("failed to allocate rbrq\n");
 860		goto out_free_rbpl;
 861	}
 862	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
 863
 864	he_dev->rbrq_head = he_dev->rbrq_base;
 865	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
 866	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
 867	he_writel(he_dev,
 868		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
 869						G0_RBRQ_Q + (group * 16));
 870	if (irq_coalesce) {
 871		hprintk("coalescing interrupts\n");
 872		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
 873						G0_RBRQ_I + (group * 16));
 874	} else
 875		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
 876						G0_RBRQ_I + (group * 16));
 877
 878	/* tx buffer ready queue */
 879
 880	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
 881		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
 882	if (he_dev->tbrq_base == NULL) {
 883		hprintk("failed to allocate tbrq\n");
 884		goto out_free_rbpq_base;
 885	}
 886	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
 887
 888	he_dev->tbrq_head = he_dev->tbrq_base;
 889
 890	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
 891	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
 892	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
 893	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
 894
 895	return 0;
 896
 897out_free_rbpq_base:
 898	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
 899			sizeof(struct he_rbrq), he_dev->rbrq_base,
 900			he_dev->rbrq_phys);
 901out_free_rbpl:
 902	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
 903		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 904
 905	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
 906			sizeof(struct he_rbp), he_dev->rbpl_base,
 907			he_dev->rbpl_phys);
 908out_destroy_rbpl_pool:
 909	pci_pool_destroy(he_dev->rbpl_pool);
 910out_free_rbpl_virt:
 911	kfree(he_dev->rbpl_virt);
 912out_free_rbpl_table:
 913	kfree(he_dev->rbpl_table);
 914
 915	return -ENOMEM;
 916}
 917
 918static int __devinit
 919he_init_irq(struct he_dev *he_dev)
 920{
 921	int i;
 922
 923	/* 2.9.3.5  tail offset for each interrupt queue is located after the
 924		    end of the interrupt queue */
 925
 926	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
 927			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
 928	if (he_dev->irq_base == NULL) {
 929		hprintk("failed to allocate irq\n");
 930		return -ENOMEM;
 931	}
 932	he_dev->irq_tailoffset = (unsigned *)
 933					&he_dev->irq_base[CONFIG_IRQ_SIZE];
 934	*he_dev->irq_tailoffset = 0;
 935	he_dev->irq_head = he_dev->irq_base;
 936	he_dev->irq_tail = he_dev->irq_base;
 937
 938	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
 939		he_dev->irq_base[i].isw = ITYPE_INVALID;
 940
 941	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
 942	he_writel(he_dev,
 943		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
 944								IRQ0_HEAD);
 945	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
 946	he_writel(he_dev, 0x0, IRQ0_DATA);
 947
 948	he_writel(he_dev, 0x0, IRQ1_BASE);
 949	he_writel(he_dev, 0x0, IRQ1_HEAD);
 950	he_writel(he_dev, 0x0, IRQ1_CNTL);
 951	he_writel(he_dev, 0x0, IRQ1_DATA);
 952
 953	he_writel(he_dev, 0x0, IRQ2_BASE);
 954	he_writel(he_dev, 0x0, IRQ2_HEAD);
 955	he_writel(he_dev, 0x0, IRQ2_CNTL);
 956	he_writel(he_dev, 0x0, IRQ2_DATA);
 957
 958	he_writel(he_dev, 0x0, IRQ3_BASE);
 959	he_writel(he_dev, 0x0, IRQ3_HEAD);
 960	he_writel(he_dev, 0x0, IRQ3_CNTL);
 961	he_writel(he_dev, 0x0, IRQ3_DATA);
 962
 963	/* 2.9.3.2 interrupt queue mapping registers */
 964
 965	he_writel(he_dev, 0x0, GRP_10_MAP);
 966	he_writel(he_dev, 0x0, GRP_32_MAP);
 967	he_writel(he_dev, 0x0, GRP_54_MAP);
 968	he_writel(he_dev, 0x0, GRP_76_MAP);
 969
 970	if (request_irq(he_dev->pci_dev->irq,
 971			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
 972		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
 973		return -EINVAL;
 974	}   
 975
 976	he_dev->irq = he_dev->pci_dev->irq;
 977
 978	return 0;
 979}
 980
 981static int __devinit
 982he_start(struct atm_dev *dev)
 983{
 984	struct he_dev *he_dev;
 985	struct pci_dev *pci_dev;
 986	unsigned long membase;
 987
 988	u16 command;
 989	u32 gen_cntl_0, host_cntl, lb_swap;
 990	u8 cache_size, timer;
 991	
 992	unsigned err;
 993	unsigned int status, reg;
 994	int i, group;
 995
 996	he_dev = HE_DEV(dev);
 997	pci_dev = he_dev->pci_dev;
 998
 999	membase = pci_resource_start(pci_dev, 0);
1000	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1001
1002	/*
1003	 * pci bus controller initialization 
1004	 */
1005
1006	/* 4.3 pci bus controller-specific initialization */
1007	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1008		hprintk("can't read GEN_CNTL_0\n");
1009		return -EINVAL;
1010	}
1011	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1012	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1013		hprintk("can't write GEN_CNTL_0.\n");
1014		return -EINVAL;
1015	}
1016
1017	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1018		hprintk("can't read PCI_COMMAND.\n");
1019		return -EINVAL;
1020	}
1021
1022	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1023	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1024		hprintk("can't enable memory.\n");
1025		return -EINVAL;
1026	}
1027
1028	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1029		hprintk("can't read cache line size?\n");
1030		return -EINVAL;
1031	}
1032
1033	if (cache_size < 16) {
1034		cache_size = 16;
1035		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1036			hprintk("can't set cache line size to %d\n", cache_size);
1037	}
1038
1039	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1040		hprintk("can't read latency timer?\n");
1041		return -EINVAL;
1042	}
1043
1044	/* from table 3.9
1045	 *
1046	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1047	 * 
1048	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1049	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1050	 *
1051	 */ 
1052#define LAT_TIMER 209
1053	if (timer < LAT_TIMER) {
1054		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1055		timer = LAT_TIMER;
1056		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1057			hprintk("can't set latency timer to %d\n", timer);
1058	}
1059
1060	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1061		hprintk("can't set up page mapping\n");
1062		return -EINVAL;
1063	}
1064
1065	/* 4.4 card reset */
1066	he_writel(he_dev, 0x0, RESET_CNTL);
1067	he_writel(he_dev, 0xff, RESET_CNTL);
1068
1069	udelay(16*1000);	/* 16 ms */
1070	status = he_readl(he_dev, RESET_CNTL);
1071	if ((status & BOARD_RST_STATUS) == 0) {
1072		hprintk("reset failed\n");
1073		return -EINVAL;
1074	}
1075
1076	/* 4.5 set bus width */
1077	host_cntl = he_readl(he_dev, HOST_CNTL);
1078	if (host_cntl & PCI_BUS_SIZE64)
1079		gen_cntl_0 |= ENBL_64;
1080	else
1081		gen_cntl_0 &= ~ENBL_64;
1082
1083	if (disable64 == 1) {
1084		hprintk("disabling 64-bit pci bus transfers\n");
1085		gen_cntl_0 &= ~ENBL_64;
1086	}
1087
1088	if (gen_cntl_0 & ENBL_64)
1089		hprintk("64-bit transfers enabled\n");
1090
1091	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1092
1093	/* 4.7 read prom contents */
1094	for (i = 0; i < PROD_ID_LEN; ++i)
1095		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1096
1097	he_dev->media = read_prom_byte(he_dev, MEDIA);
1098
1099	for (i = 0; i < 6; ++i)
1100		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1101
1102	hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1103				he_dev->prod_id,
1104					he_dev->media & 0x40 ? "SM" : "MM",
1105						dev->esi[0],
1106						dev->esi[1],
1107						dev->esi[2],
1108						dev->esi[3],
1109						dev->esi[4],
1110						dev->esi[5]);
1111	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1112						ATM_OC12_PCR : ATM_OC3_PCR;
1113
1114	/* 4.6 set host endianess */
1115	lb_swap = he_readl(he_dev, LB_SWAP);
1116	if (he_is622(he_dev))
1117		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1118	else
1119		lb_swap |= XFER_SIZE;		/* 8 cells */
1120#ifdef __BIG_ENDIAN
1121	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1122#else
1123	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1124			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1125#endif /* __BIG_ENDIAN */
1126	he_writel(he_dev, lb_swap, LB_SWAP);
1127
1128	/* 4.8 sdram controller initialization */
1129	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1130
1131	/* 4.9 initialize rnum value */
1132	lb_swap |= SWAP_RNUM_MAX(0xf);
1133	he_writel(he_dev, lb_swap, LB_SWAP);
1134
1135	/* 4.10 initialize the interrupt queues */
1136	if ((err = he_init_irq(he_dev)) != 0)
1137		return err;
1138
1139	/* 4.11 enable pci bus controller state machines */
1140	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1141				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1142	he_writel(he_dev, host_cntl, HOST_CNTL);
1143
1144	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1145	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1146
1147	/*
1148	 * atm network controller initialization
1149	 */
1150
1151	/* 5.1.1 generic configuration state */
1152
1153	/*
1154	 *		local (cell) buffer memory map
1155	 *                    
1156	 *             HE155                          HE622
1157	 *                                                      
1158	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1159	 *         |            |            |                   |   |
1160	 *         |  utility   |            |        rx0        |   |
1161	 *        5|____________|         255|___________________| u |
1162	 *        6|            |         256|                   | t |
1163	 *         |            |            |                   | i |
1164	 *         |    rx0     |     row    |        tx         | l |
1165	 *         |            |            |                   | i |
1166	 *         |            |         767|___________________| t |
1167	 *      517|____________|         768|                   | y |
1168	 * row  518|            |            |        rx1        |   |
1169	 *         |            |        1023|___________________|___|
1170	 *         |            |
1171	 *         |    tx      |
1172	 *         |            |
1173	 *         |            |
1174	 *     1535|____________|
1175	 *     1536|            |
1176	 *         |    rx1     |
1177	 *     2047|____________|
1178	 *
1179	 */
1180
1181	/* total 4096 connections */
1182	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1183	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1184
1185	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1186		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1187		return -ENODEV;
1188	}
1189
1190	if (nvpibits != -1) {
1191		he_dev->vpibits = nvpibits;
1192		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1193	}
1194
1195	if (nvcibits != -1) {
1196		he_dev->vcibits = nvcibits;
1197		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1198	}
1199
1200
1201	if (he_is622(he_dev)) {
1202		he_dev->cells_per_row = 40;
1203		he_dev->bytes_per_row = 2048;
1204		he_dev->r0_numrows = 256;
1205		he_dev->tx_numrows = 512;
1206		he_dev->r1_numrows = 256;
1207		he_dev->r0_startrow = 0;
1208		he_dev->tx_startrow = 256;
1209		he_dev->r1_startrow = 768;
1210	} else {
1211		he_dev->cells_per_row = 20;
1212		he_dev->bytes_per_row = 1024;
1213		he_dev->r0_numrows = 512;
1214		he_dev->tx_numrows = 1018;
1215		he_dev->r1_numrows = 512;
1216		he_dev->r0_startrow = 6;
1217		he_dev->tx_startrow = 518;
1218		he_dev->r1_startrow = 1536;
1219	}
1220
1221	he_dev->cells_per_lbuf = 4;
1222	he_dev->buffer_limit = 4;
1223	he_dev->r0_numbuffs = he_dev->r0_numrows *
1224				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1225	if (he_dev->r0_numbuffs > 2560)
1226		he_dev->r0_numbuffs = 2560;
1227
1228	he_dev->r1_numbuffs = he_dev->r1_numrows *
1229				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1230	if (he_dev->r1_numbuffs > 2560)
1231		he_dev->r1_numbuffs = 2560;
1232
1233	he_dev->tx_numbuffs = he_dev->tx_numrows *
1234				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1235	if (he_dev->tx_numbuffs > 5120)
1236		he_dev->tx_numbuffs = 5120;
1237
1238	/* 5.1.2 configure hardware dependent registers */
1239
1240	he_writel(he_dev, 
1241		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1242		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1243		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1244		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1245								LBARB);
1246
1247	he_writel(he_dev, BANK_ON |
1248		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1249								SDRAMCON);
1250
1251	he_writel(he_dev,
1252		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1253						RM_RW_WAIT(1), RCMCONFIG);
1254	he_writel(he_dev,
1255		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1256						TM_RW_WAIT(1), TCMCONFIG);
1257
1258	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1259
1260	he_writel(he_dev, 
1261		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1262		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1263		RX_VALVP(he_dev->vpibits) |
1264		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1265
1266	he_writel(he_dev, DRF_THRESH(0x20) |
1267		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1268		TX_VCI_MASK(he_dev->vcibits) |
1269		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1270
1271	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1272
1273	he_writel(he_dev, PHY_INT_ENB |
1274		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1275								RH_CONFIG);
1276
1277	/* 5.1.3 initialize connection memory */
1278
1279	for (i = 0; i < TCM_MEM_SIZE; ++i)
1280		he_writel_tcm(he_dev, 0, i);
1281
1282	for (i = 0; i < RCM_MEM_SIZE; ++i)
1283		he_writel_rcm(he_dev, 0, i);
1284
1285	/*
1286	 *	transmit connection memory map
1287	 *
1288	 *                  tx memory
1289	 *          0x0 ___________________
1290	 *             |                   |
1291	 *             |                   |
1292	 *             |       TSRa        |
1293	 *             |                   |
1294	 *             |                   |
1295	 *       0x8000|___________________|
1296	 *             |                   |
1297	 *             |       TSRb        |
1298	 *       0xc000|___________________|
1299	 *             |                   |
1300	 *             |       TSRc        |
1301	 *       0xe000|___________________|
1302	 *             |       TSRd        |
1303	 *       0xf000|___________________|
1304	 *             |       tmABR       |
1305	 *      0x10000|___________________|
1306	 *             |                   |
1307	 *             |       tmTPD       |
1308	 *             |___________________|
1309	 *             |                   |
1310	 *                      ....
1311	 *      0x1ffff|___________________|
1312	 *
1313	 *
1314	 */
1315
1316	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1317	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1318	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1319	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1320	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1321
1322
1323	/*
1324	 *	receive connection memory map
1325	 *
1326	 *          0x0 ___________________
1327	 *             |                   |
1328	 *             |                   |
1329	 *             |       RSRa        |
1330	 *             |                   |
1331	 *             |                   |
1332	 *       0x8000|___________________|
1333	 *             |                   |
1334	 *             |             rx0/1 |
1335	 *             |       LBM         |   link lists of local
1336	 *             |             tx    |   buffer memory 
1337	 *             |                   |
1338	 *       0xd000|___________________|
1339	 *             |                   |
1340	 *             |      rmABR        |
1341	 *       0xe000|___________________|
1342	 *             |                   |
1343	 *             |       RSRb        |
1344	 *             |___________________|
1345	 *             |                   |
1346	 *                      ....
1347	 *       0xffff|___________________|
1348	 */
1349
1350	he_writel(he_dev, 0x08000, RCMLBM_BA);
1351	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1352	he_writel(he_dev, 0x0d800, RCMABR_BA);
1353
1354	/* 5.1.4 initialize local buffer free pools linked lists */
1355
1356	he_init_rx_lbfp0(he_dev);
1357	he_init_rx_lbfp1(he_dev);
1358
1359	he_writel(he_dev, 0x0, RLBC_H);
1360	he_writel(he_dev, 0x0, RLBC_T);
1361	he_writel(he_dev, 0x0, RLBC_H2);
1362
1363	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1364	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1365
1366	he_init_tx_lbfp(he_dev);
1367
1368	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1369
1370	/* 5.1.5 initialize intermediate receive queues */
1371
1372	if (he_is622(he_dev)) {
1373		he_writel(he_dev, 0x000f, G0_INMQ_S);
1374		he_writel(he_dev, 0x200f, G0_INMQ_L);
1375
1376		he_writel(he_dev, 0x001f, G1_INMQ_S);
1377		he_writel(he_dev, 0x201f, G1_INMQ_L);
1378
1379		he_writel(he_dev, 0x002f, G2_INMQ_S);
1380		he_writel(he_dev, 0x202f, G2_INMQ_L);
1381
1382		he_writel(he_dev, 0x003f, G3_INMQ_S);
1383		he_writel(he_dev, 0x203f, G3_INMQ_L);
1384
1385		he_writel(he_dev, 0x004f, G4_INMQ_S);
1386		he_writel(he_dev, 0x204f, G4_INMQ_L);
1387
1388		he_writel(he_dev, 0x005f, G5_INMQ_S);
1389		he_writel(he_dev, 0x205f, G5_INMQ_L);
1390
1391		he_writel(he_dev, 0x006f, G6_INMQ_S);
1392		he_writel(he_dev, 0x206f, G6_INMQ_L);
1393
1394		he_writel(he_dev, 0x007f, G7_INMQ_S);
1395		he_writel(he_dev, 0x207f, G7_INMQ_L);
1396	} else {
1397		he_writel(he_dev, 0x0000, G0_INMQ_S);
1398		he_writel(he_dev, 0x0008, G0_INMQ_L);
1399
1400		he_writel(he_dev, 0x0001, G1_INMQ_S);
1401		he_writel(he_dev, 0x0009, G1_INMQ_L);
1402
1403		he_writel(he_dev, 0x0002, G2_INMQ_S);
1404		he_writel(he_dev, 0x000a, G2_INMQ_L);
1405
1406		he_writel(he_dev, 0x0003, G3_INMQ_S);
1407		he_writel(he_dev, 0x000b, G3_INMQ_L);
1408
1409		he_writel(he_dev, 0x0004, G4_INMQ_S);
1410		he_writel(he_dev, 0x000c, G4_INMQ_L);
1411
1412		he_writel(he_dev, 0x0005, G5_INMQ_S);
1413		he_writel(he_dev, 0x000d, G5_INMQ_L);
1414
1415		he_writel(he_dev, 0x0006, G6_INMQ_S);
1416		he_writel(he_dev, 0x000e, G6_INMQ_L);
1417
1418		he_writel(he_dev, 0x0007, G7_INMQ_S);
1419		he_writel(he_dev, 0x000f, G7_INMQ_L);
1420	}
1421
1422	/* 5.1.6 application tunable parameters */
1423
1424	he_writel(he_dev, 0x0, MCC);
1425	he_writel(he_dev, 0x0, OEC);
1426	he_writel(he_dev, 0x0, DCC);
1427	he_writel(he_dev, 0x0, CEC);
1428	
1429	/* 5.1.7 cs block initialization */
1430
1431	he_init_cs_block(he_dev);
1432
1433	/* 5.1.8 cs block connection memory initialization */
1434	
1435	if (he_init_cs_block_rcm(he_dev) < 0)
1436		return -ENOMEM;
1437
1438	/* 5.1.10 initialize host structures */
1439
1440	he_init_tpdrq(he_dev);
1441
1442	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1443		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1444	if (he_dev->tpd_pool == NULL) {
1445		hprintk("unable to create tpd pci_pool\n");
1446		return -ENOMEM;         
1447	}
1448
1449	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1450
1451	if (he_init_group(he_dev, 0) != 0)
1452		return -ENOMEM;
1453
1454	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1455		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1456		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1457		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1458		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1459						G0_RBPS_BS + (group * 32));
1460
1461		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1462		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1463		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1464						G0_RBPL_QI + (group * 32));
1465		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1466
1467		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1468		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1469		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1470						G0_RBRQ_Q + (group * 16));
1471		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1472
1473		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1474		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1475		he_writel(he_dev, TBRQ_THRESH(0x1),
1476						G0_TBRQ_THRESH + (group * 16));
1477		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1478	}
1479
1480	/* host status page */
1481
1482	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1483				sizeof(struct he_hsp), &he_dev->hsp_phys);
1484	if (he_dev->hsp == NULL) {
1485		hprintk("failed to allocate host status page\n");
1486		return -ENOMEM;
1487	}
1488	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1489	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1490
1491	/* initialize framer */
1492
1493#ifdef CONFIG_ATM_HE_USE_SUNI
1494	if (he_isMM(he_dev))
1495		suni_init(he_dev->atm_dev);
1496	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1497		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1498#endif /* CONFIG_ATM_HE_USE_SUNI */
1499
1500	if (sdh) {
1501		/* this really should be in suni.c but for now... */
1502		int val;
1503
1504		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1505		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1506		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1507		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1508	}
1509
1510	/* 5.1.12 enable transmit and receive */
1511
1512	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1513	reg |= TX_ENABLE|ER_ENABLE;
1514	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1515
1516	reg = he_readl(he_dev, RC_CONFIG);
1517	reg |= RX_ENABLE;
1518	he_writel(he_dev, reg, RC_CONFIG);
1519
1520	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1521		he_dev->cs_stper[i].inuse = 0;
1522		he_dev->cs_stper[i].pcr = -1;
1523	}
1524	he_dev->total_bw = 0;
1525
1526
1527	/* atm linux initialization */
1528
1529	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1530	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1531
1532	he_dev->irq_peak = 0;
1533	he_dev->rbrq_peak = 0;
1534	he_dev->rbpl_peak = 0;
1535	he_dev->tbrq_peak = 0;
1536
1537	HPRINTK("hell bent for leather!\n");
1538
1539	return 0;
1540}
1541
1542static void
1543he_stop(struct he_dev *he_dev)
1544{
1545	struct he_buff *heb, *next;
1546	struct pci_dev *pci_dev;
1547	u32 gen_cntl_0, reg;
1548	u16 command;
1549
1550	pci_dev = he_dev->pci_dev;
1551
1552	/* disable interrupts */
1553
1554	if (he_dev->membase) {
1555		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1556		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1557		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1558
1559		tasklet_disable(&he_dev->tasklet);
1560
1561		/* disable recv and transmit */
1562
1563		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1564		reg &= ~(TX_ENABLE|ER_ENABLE);
1565		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1566
1567		reg = he_readl(he_dev, RC_CONFIG);
1568		reg &= ~(RX_ENABLE);
1569		he_writel(he_dev, reg, RC_CONFIG);
1570	}
1571
1572#ifdef CONFIG_ATM_HE_USE_SUNI
1573	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1574		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1575#endif /* CONFIG_ATM_HE_USE_SUNI */
1576
1577	if (he_dev->irq)
1578		free_irq(he_dev->irq, he_dev);
1579
1580	if (he_dev->irq_base)
1581		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1582			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1583
1584	if (he_dev->hsp)
1585		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1586						he_dev->hsp, he_dev->hsp_phys);
1587
1588	if (he_dev->rbpl_base) {
1589		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1590			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1591
1592		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1593			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1594	}
1595
1596	kfree(he_dev->rbpl_virt);
1597	kfree(he_dev->rbpl_table);
1598
1599	if (he_dev->rbpl_pool)
1600		pci_pool_destroy(he_dev->rbpl_pool);
1601
1602	if (he_dev->rbrq_base)
1603		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1604							he_dev->rbrq_base, he_dev->rbrq_phys);
1605
1606	if (he_dev->tbrq_base)
1607		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1608							he_dev->tbrq_base, he_dev->tbrq_phys);
1609
1610	if (he_dev->tpdrq_base)
1611		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1612							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1613
1614	if (he_dev->tpd_pool)
1615		pci_pool_destroy(he_dev->tpd_pool);
1616
1617	if (he_dev->pci_dev) {
1618		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1619		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1620		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1621	}
1622	
1623	if (he_dev->membase)
1624		iounmap(he_dev->membase);
1625}
1626
1627static struct he_tpd *
1628__alloc_tpd(struct he_dev *he_dev)
1629{
1630	struct he_tpd *tpd;
1631	dma_addr_t mapping;
1632
1633	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1634	if (tpd == NULL)
1635		return NULL;
1636			
1637	tpd->status = TPD_ADDR(mapping);
1638	tpd->reserved = 0; 
1639	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1640	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1641	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1642
1643	return tpd;
1644}
1645
1646#define AAL5_LEN(buf,len) 						\
1647			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1648				(((unsigned char *)(buf))[(len)-5]))
1649
1650/* 2.10.1.2 receive
1651 *
1652 * aal5 packets can optionally return the tcp checksum in the lower
1653 * 16 bits of the crc (RSR0_TCP_CKSUM)
1654 */
1655
1656#define TCP_CKSUM(buf,len) 						\
1657			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1658				(((unsigned char *)(buf))[(len-1)]))
1659
1660static int
1661he_service_rbrq(struct he_dev *he_dev, int group)
1662{
1663	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1664				((unsigned long)he_dev->rbrq_base |
1665					he_dev->hsp->group[group].rbrq_tail);
1666	unsigned cid, lastcid = -1;
1667	struct sk_buff *skb;
1668	struct atm_vcc *vcc = NULL;
1669	struct he_vcc *he_vcc;
1670	struct he_buff *heb, *next;
1671	int i;
1672	int pdus_assembled = 0;
1673	int updated = 0;
1674
1675	read_lock(&vcc_sklist_lock);
1676	while (he_dev->rbrq_head != rbrq_tail) {
1677		++updated;
1678
1679		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1680			he_dev->rbrq_head, group,
1681			RBRQ_ADDR(he_dev->rbrq_head),
1682			RBRQ_BUFLEN(he_dev->rbrq_head),
1683			RBRQ_CID(he_dev->rbrq_head),
1684			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1685			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1686			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1687			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1688			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1689			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1690
1691		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1692		heb = he_dev->rbpl_virt[i];
1693
1694		cid = RBRQ_CID(he_dev->rbrq_head);
1695		if (cid != lastcid)
1696			vcc = __find_vcc(he_dev, cid);
1697		lastcid = cid;
1698
1699		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1700			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1701			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1702				clear_bit(i, he_dev->rbpl_table);
1703				list_del(&heb->entry);
1704				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1705			}
1706					
1707			goto next_rbrq_entry;
1708		}
1709
1710		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1711			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1712				atomic_inc(&vcc->stats->rx_drop);
1713			goto return_host_buffers;
1714		}
1715
1716		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1717		clear_bit(i, he_dev->rbpl_table);
1718		list_move_tail(&heb->entry, &he_vcc->buffers);
1719		he_vcc->pdu_len += heb->len;
1720
1721		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1722			lastcid = -1;
1723			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1724			wake_up(&he_vcc->rx_waitq);
1725			goto return_host_buffers;
1726		}
1727
1728		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1729			goto next_rbrq_entry;
1730
1731		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1732				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1733			HPRINTK("%s%s (%d.%d)\n",
1734				RBRQ_CRC_ERR(he_dev->rbrq_head)
1735							? "CRC_ERR " : "",
1736				RBRQ_LEN_ERR(he_dev->rbrq_head)
1737							? "LEN_ERR" : "",
1738							vcc->vpi, vcc->vci);
1739			atomic_inc(&vcc->stats->rx_err);
1740			goto return_host_buffers;
1741		}
1742
1743		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1744							GFP_ATOMIC);
1745		if (!skb) {
1746			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1747			goto return_host_buffers;
1748		}
1749
1750		if (rx_skb_reserve > 0)
1751			skb_reserve(skb, rx_skb_reserve);
1752
1753		__net_timestamp(skb);
1754
1755		list_for_each_entry(heb, &he_vcc->buffers, entry)
1756			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1757
1758		switch (vcc->qos.aal) {
1759			case ATM_AAL0:
1760				/* 2.10.1.5 raw cell receive */
1761				skb->len = ATM_AAL0_SDU;
1762				skb_set_tail_pointer(skb, skb->len);
1763				break;
1764			case ATM_AAL5:
1765				/* 2.10.1.2 aal5 receive */
1766
1767				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1768				skb_set_tail_pointer(skb, skb->len);
1769#ifdef USE_CHECKSUM_HW
1770				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1771					skb->ip_summed = CHECKSUM_COMPLETE;
1772					skb->csum = TCP_CKSUM(skb->data,
1773							he_vcc->pdu_len);
1774				}
1775#endif
1776				break;
1777		}
1778
1779#ifdef should_never_happen
1780		if (skb->len > vcc->qos.rxtp.max_sdu)
1781			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1782#endif
1783
1784#ifdef notdef
1785		ATM_SKB(skb)->vcc = vcc;
1786#endif
1787		spin_unlock(&he_dev->global_lock);
1788		vcc->push(vcc, skb);
1789		spin_lock(&he_dev->global_lock);
1790
1791		atomic_inc(&vcc->stats->rx);
1792
1793return_host_buffers:
1794		++pdus_assembled;
1795
1796		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1797			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1798		INIT_LIST_HEAD(&he_vcc->buffers);
1799		he_vcc->pdu_len = 0;
1800
1801next_rbrq_entry:
1802		he_dev->rbrq_head = (struct he_rbrq *)
1803				((unsigned long) he_dev->rbrq_base |
1804					RBRQ_MASK(he_dev->rbrq_head + 1));
1805
1806	}
1807	read_unlock(&vcc_sklist_lock);
1808
1809	if (updated) {
1810		if (updated > he_dev->rbrq_peak)
1811			he_dev->rbrq_peak = updated;
1812
1813		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1814						G0_RBRQ_H + (group * 16));
1815	}
1816
1817	return pdus_assembled;
1818}
1819
1820static void
1821he_service_tbrq(struct he_dev *he_dev, int group)
1822{
1823	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1824				((unsigned long)he_dev->tbrq_base |
1825					he_dev->hsp->group[group].tbrq_tail);
1826	struct he_tpd *tpd;
1827	int slot, updated = 0;
1828	struct he_tpd *__tpd;
1829
1830	/* 2.1.6 transmit buffer return queue */
1831
1832	while (he_dev->tbrq_head != tbrq_tail) {
1833		++updated;
1834
1835		HPRINTK("tbrq%d 0x%x%s%s\n",
1836			group,
1837			TBRQ_TPD(he_dev->tbrq_head), 
1838			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1839			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1840		tpd = NULL;
1841		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1842			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1843				tpd = __tpd;
1844				list_del(&__tpd->entry);
1845				break;
1846			}
1847		}
1848
1849		if (tpd == NULL) {
1850			hprintk("unable to locate tpd for dma buffer %x\n",
1851						TBRQ_TPD(he_dev->tbrq_head));
1852			goto next_tbrq_entry;
1853		}
1854
1855		if (TBRQ_EOS(he_dev->tbrq_head)) {
1856			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1857				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1858			if (tpd->vcc)
1859				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1860
1861			goto next_tbrq_entry;
1862		}
1863
1864		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1865			if (tpd->iovec[slot].addr)
1866				pci_unmap_single(he_dev->pci_dev,
1867					tpd->iovec[slot].addr,
1868					tpd->iovec[slot].len & TPD_LEN_MASK,
1869							PCI_DMA_TODEVICE);
1870			if (tpd->iovec[slot].len & TPD_LST)
1871				break;
1872				
1873		}
1874
1875		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1876			if (tpd->vcc && tpd->vcc->pop)
1877				tpd->vcc->pop(tpd->vcc, tpd->skb);
1878			else
1879				dev_kfree_skb_any(tpd->skb);
1880		}
1881
1882next_tbrq_entry:
1883		if (tpd)
1884			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1885		he_dev->tbrq_head = (struct he_tbrq *)
1886				((unsigned long) he_dev->tbrq_base |
1887					TBRQ_MASK(he_dev->tbrq_head + 1));
1888	}
1889
1890	if (updated) {
1891		if (updated > he_dev->tbrq_peak)
1892			he_dev->tbrq_peak = updated;
1893
1894		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1895						G0_TBRQ_H + (group * 16));
1896	}
1897}
1898
1899static void
1900he_service_rbpl(struct he_dev *he_dev, int group)
1901{
1902	struct he_rbp *new_tail;
1903	struct he_rbp *rbpl_head;
1904	struct he_buff *heb;
1905	dma_addr_t mapping;
1906	int i;
1907	int moved = 0;
1908
1909	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1910					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1911
1912	for (;;) {
1913		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1914						RBPL_MASK(he_dev->rbpl_tail+1));
1915
1916		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1917		if (new_tail == rbpl_head)
1918			break;
1919
1920		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1921		if (i > (RBPL_TABLE_SIZE - 1)) {
1922			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1923			if (i > (RBPL_TABLE_SIZE - 1))
1924				break;
1925		}
1926		he_dev->rbpl_hint = i + 1;
1927
1928		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1929		if (!heb)
1930			break;
1931		heb->mapping = mapping;
1932		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1933		he_dev->rbpl_virt[i] = heb;
1934		set_bit(i, he_dev->rbpl_table);
1935		new_tail->idx = i << RBP_IDX_OFFSET;
1936		new_tail->phys = mapping + offsetof(struct he_buff, data);
1937
1938		he_dev->rbpl_tail = new_tail;
1939		++moved;
1940	} 
1941
1942	if (moved)
1943		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1944}
1945
1946static void
1947he_tasklet(unsigned long data)
1948{
1949	unsigned long flags;
1950	struct he_dev *he_dev = (struct he_dev *) data;
1951	int group, type;
1952	int updated = 0;
1953
1954	HPRINTK("tasklet (0x%lx)\n", data);
1955	spin_lock_irqsave(&he_dev->global_lock, flags);
1956
1957	while (he_dev->irq_head != he_dev->irq_tail) {
1958		++updated;
1959
1960		type = ITYPE_TYPE(he_dev->irq_head->isw);
1961		group = ITYPE_GROUP(he_dev->irq_head->isw);
1962
1963		switch (type) {
1964			case ITYPE_RBRQ_THRESH:
1965				HPRINTK("rbrq%d threshold\n", group);
1966				/* fall through */
1967			case ITYPE_RBRQ_TIMER:
1968				if (he_service_rbrq(he_dev, group))
1969					he_service_rbpl(he_dev, group);
1970				break;
1971			case ITYPE_TBRQ_THRESH:
1972				HPRINTK("tbrq%d threshold\n", group);
1973				/* fall through */
1974			case ITYPE_TPD_COMPLETE:
1975				he_service_tbrq(he_dev, group);
1976				break;
1977			case ITYPE_RBPL_THRESH:
1978				he_service_rbpl(he_dev, group);
1979				break;
1980			case ITYPE_RBPS_THRESH:
1981				/* shouldn't happen unless small buffers enabled */
1982				break;
1983			case ITYPE_PHY:
1984				HPRINTK("phy interrupt\n");
1985#ifdef CONFIG_ATM_HE_USE_SUNI
1986				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1987				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1988					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1989				spin_lock_irqsave(&he_dev->global_lock, flags);
1990#endif
1991				break;
1992			case ITYPE_OTHER:
1993				switch (type|group) {
1994					case ITYPE_PARITY:
1995						hprintk("parity error\n");
1996						break;
1997					case ITYPE_ABORT:
1998						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1999						break;
2000				}
2001				break;
2002			case ITYPE_TYPE(ITYPE_INVALID):
2003				/* see 8.1.1 -- check all queues */
2004
2005				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2006
2007				he_service_rbrq(he_dev, 0);
2008				he_service_rbpl(he_dev, 0);
2009				he_service_tbrq(he_dev, 0);
2010				break;
2011			default:
2012				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2013		}
2014
2015		he_dev->irq_head->isw = ITYPE_INVALID;
2016
2017		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2018	}
2019
2020	if (updated) {
2021		if (updated > he_dev->irq_peak)
2022			he_dev->irq_peak = updated;
2023
2024		he_writel(he_dev,
2025			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2026			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2027			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2028		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2029	}
2030	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2031}
2032
2033static irqreturn_t
2034he_irq_handler(int irq, void *dev_id)
2035{
2036	unsigned long flags;
2037	struct he_dev *he_dev = (struct he_dev * )dev_id;
2038	int handled = 0;
2039
2040	if (he_dev == NULL)
2041		return IRQ_NONE;
2042
2043	spin_lock_irqsave(&he_dev->global_lock, flags);
2044
2045	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2046						(*he_dev->irq_tailoffset << 2));
2047
2048	if (he_dev->irq_tail == he_dev->irq_head) {
2049		HPRINTK("tailoffset not updated?\n");
2050		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2051			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2052		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2053	}
2054
2055#ifdef DEBUG
2056	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2057		hprintk("spurious (or shared) interrupt?\n");
2058#endif
2059
2060	if (he_dev->irq_head != he_dev->irq_tail) {
2061		handled = 1;
2062		tasklet_schedule(&he_dev->tasklet);
2063		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2064		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2065	}
2066	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2067	return IRQ_RETVAL(handled);
2068
2069}
2070
2071static __inline__ void
2072__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2073{
2074	struct he_tpdrq *new_tail;
2075
2076	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2077					tpd, cid, he_dev->tpdrq_tail);
2078
2079	/* new_tail = he_dev->tpdrq_tail; */
2080	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2081					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2082
2083	/*
2084	 * check to see if we are about to set the tail == head
2085	 * if true, update the head pointer from the adapter
2086	 * to see if this is really the case (reading the queue
2087	 * head for every enqueue would be unnecessarily slow)
2088	 */
2089
2090	if (new_tail == he_dev->tpdrq_head) {
2091		he_dev->tpdrq_head = (struct he_tpdrq *)
2092			(((unsigned long)he_dev->tpdrq_base) |
2093				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2094
2095		if (new_tail == he_dev->tpdrq_head) {
2096			int slot;
2097
2098			hprintk("tpdrq full (cid 0x%x)\n", cid);
2099			/*
2100			 * FIXME
2101			 * push tpd onto a transmit backlog queue
2102			 * after service_tbrq, service the backlog
2103			 * for now, we just drop the pdu
2104			 */
2105			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2106				if (tpd->iovec[slot].addr)
2107					pci_unmap_single(he_dev->pci_dev,
2108						tpd->iovec[slot].addr,
2109						tpd->iovec[slot].len & TPD_LEN_MASK,
2110								PCI_DMA_TODEVICE);
2111			}
2112			if (tpd->skb) {
2113				if (tpd->vcc->pop)
2114					tpd->vcc->pop(tpd->vcc, tpd->skb);
2115				else
2116					dev_kfree_skb_any(tpd->skb);
2117				atomic_inc(&tpd->vcc->stats->tx_err);
2118			}
2119			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2120			return;
2121		}
2122	}
2123
2124	/* 2.1.5 transmit packet descriptor ready queue */
2125	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2126	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2127	he_dev->tpdrq_tail->cid = cid;
2128	wmb();
2129
2130	he_dev->tpdrq_tail = new_tail;
2131
2132	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2133	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2134}
2135
2136static int
2137he_open(struct atm_vcc *vcc)
2138{
2139	unsigned long flags;
2140	struct he_dev *he_dev = HE_DEV(vcc->dev);
2141	struct he_vcc *he_vcc;
2142	int err = 0;
2143	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2144	short vpi = vcc->vpi;
2145	int vci = vcc->vci;
2146
2147	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2148		return 0;
2149
2150	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2151
2152	set_bit(ATM_VF_ADDR, &vcc->flags);
2153
2154	cid = he_mkcid(he_dev, vpi, vci);
2155
2156	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2157	if (he_vcc == NULL) {
2158		hprintk("unable to allocate he_vcc during open\n");
2159		return -ENOMEM;
2160	}
2161
2162	INIT_LIST_HEAD(&he_vcc->buffers);
2163	he_vcc->pdu_len = 0;
2164	he_vcc->rc_index = -1;
2165
2166	init_waitqueue_head(&he_vcc->rx_waitq);
2167	init_waitqueue_head(&he_vcc->tx_waitq);
2168
2169	vcc->dev_data = he_vcc;
2170
2171	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2172		int pcr_goal;
2173
2174		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2175		if (pcr_goal == 0)
2176			pcr_goal = he_dev->atm_dev->link_rate;
2177		if (pcr_goal < 0)	/* means round down, technically */
2178			pcr_goal = -pcr_goal;
2179
2180		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2181
2182		switch (vcc->qos.aal) {
2183			case ATM_AAL5:
2184				tsr0_aal = TSR0_AAL5;
2185				tsr4 = TSR4_AAL5;
2186				break;
2187			case ATM_AAL0:
2188				tsr0_aal = TSR0_AAL0_SDU;
2189				tsr4 = TSR4_AAL0_SDU;
2190				break;
2191			default:
2192				err = -EINVAL;
2193				goto open_failed;
2194		}
2195
2196		spin_lock_irqsave(&he_dev->global_lock, flags);
2197		tsr0 = he_readl_tsr0(he_dev, cid);
2198		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2199
2200		if (TSR0_CONN_STATE(tsr0) != 0) {
2201			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2202			err = -EBUSY;
2203			goto open_failed;
2204		}
2205
2206		switch (vcc->qos.txtp.traffic_class) {
2207			case ATM_UBR:
2208				/* 2.3.3.1 open connection ubr */
2209
2210				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2211					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2212				break;
2213
2214			case ATM_CBR:
2215				/* 2.3.3.2 open connection cbr */
2216
2217				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2218				if ((he_dev->total_bw + pcr_goal)
2219					> (he_dev->atm_dev->link_rate * 9 / 10))
2220				{
2221					err = -EBUSY;
2222					goto open_failed;
2223				}
2224
2225				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2226
2227				/* find an unused cs_stper register */
2228				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2229					if (he_dev->cs_stper[reg].inuse == 0 || 
2230					    he_dev->cs_stper[reg].pcr == pcr_goal)
2231							break;
2232
2233				if (reg == HE_NUM_CS_STPER) {
2234					err = -EBUSY;
2235					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2236					goto open_failed;
2237				}
2238
2239				he_dev->total_bw += pcr_goal;
2240
2241				he_vcc->rc_index = reg;
2242				++he_dev->cs_stper[reg].inuse;
2243				he_dev->cs_stper[reg].pcr = pcr_goal;
2244
2245				clock = he_is622(he_dev) ? 66667000 : 50000000;
2246				period = clock / pcr_goal;
2247				
2248				HPRINTK("rc_index = %d period = %d\n",
2249								reg, period);
2250
2251				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2252							CS_STPER0 + reg);
2253				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2254
2255				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2256							TSR0_RC_INDEX(reg);
2257
2258				break;
2259			default:
2260				err = -EINVAL;
2261				goto open_failed;
2262		}
2263
2264		spin_lock_irqsave(&he_dev->global_lock, flags);
2265
2266		he_writel_tsr0(he_dev, tsr0, cid);
2267		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2268		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2269					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2270		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2271		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2272
2273		he_writel_tsr3(he_dev, 0x0, cid);
2274		he_writel_tsr5(he_dev, 0x0, cid);
2275		he_writel_tsr6(he_dev, 0x0, cid);
2276		he_writel_tsr7(he_dev, 0x0, cid);
2277		he_writel_tsr8(he_dev, 0x0, cid);
2278		he_writel_tsr10(he_dev, 0x0, cid);
2279		he_writel_tsr11(he_dev, 0x0, cid);
2280		he_writel_tsr12(he_dev, 0x0, cid);
2281		he_writel_tsr13(he_dev, 0x0, cid);
2282		he_writel_tsr14(he_dev, 0x0, cid);
2283		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2284		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2285	}
2286
2287	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2288		unsigned aal;
2289
2290		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2291		 				&HE_VCC(vcc)->rx_waitq);
2292
2293		switch (vcc->qos.aal) {
2294			case ATM_AAL5:
2295				aal = RSR0_AAL5;
2296				break;
2297			case ATM_AAL0:
2298				aal = RSR0_RAWCELL;
2299				break;
2300			default:
2301				err = -EINVAL;
2302				goto open_failed;
2303		}
2304
2305		spin_lock_irqsave(&he_dev->global_lock, flags);
2306
2307		rsr0 = he_readl_rsr0(he_dev, cid);
2308		if (rsr0 & RSR0_OPEN_CONN) {
2309			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2310
2311			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2312			err = -EBUSY;
2313			goto open_failed;
2314		}
2315
2316		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2317		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2318		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2319				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2320
2321#ifdef USE_CHECKSUM_HW
2322		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2323			rsr0 |= RSR0_TCP_CKSUM;
2324#endif
2325
2326		he_writel_rsr4(he_dev, rsr4, cid);
2327		he_writel_rsr1(he_dev, rsr1, cid);
2328		/* 5.1.11 last parameter initialized should be
2329			  the open/closed indication in rsr0 */
2330		he_writel_rsr0(he_dev,
2331			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2332		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2333
2334		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2335	}
2336
2337open_failed:
2338
2339	if (err) {
2340		kfree(he_vcc);
2341		clear_bit(ATM_VF_ADDR, &vcc->flags);
2342	}
2343	else
2344		set_bit(ATM_VF_READY, &vcc->flags);
2345
2346	return err;
2347}
2348
2349static void
2350he_close(struct atm_vcc *vcc)
2351{
2352	unsigned long flags;
2353	DECLARE_WAITQUEUE(wait, current);
2354	struct he_dev *he_dev = HE_DEV(vcc->dev);
2355	struct he_tpd *tpd;
2356	unsigned cid;
2357	struct he_vcc *he_vcc = HE_VCC(vcc);
2358#define MAX_RETRY 30
2359	int retry = 0, sleep = 1, tx_inuse;
2360
2361	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2362
2363	clear_bit(ATM_VF_READY, &vcc->flags);
2364	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2365
2366	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2367		int timeout;
2368
2369		HPRINTK("close rx cid 0x%x\n", cid);
2370
2371		/* 2.7.2.2 close receive operation */
2372
2373		/* wait for previous close (if any) to finish */
2374
2375		spin_lock_irqsave(&he_dev->global_lock, flags);
2376		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2377			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2378			udelay(250);
2379		}
2380
2381		set_current_state(TASK_UNINTERRUPTIBLE);
2382		add_wait_queue(&he_vcc->rx_waitq, &wait);
2383
2384		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2385		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2386		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2387		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2388
2389		timeout = schedule_timeout(30*HZ);
2390
2391		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2392		set_current_state(TASK_RUNNING);
2393
2394		if (timeout == 0)
2395			hprintk("close rx timeout cid 0x%x\n", cid);
2396
2397		HPRINTK("close rx cid 0x%x complete\n", cid);
2398
2399	}
2400
2401	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2402		volatile unsigned tsr4, tsr0;
2403		int timeout;
2404
2405		HPRINTK("close tx cid 0x%x\n", cid);
2406		
2407		/* 2.1.2
2408		 *
2409		 * ... the host must first stop queueing packets to the TPDRQ
2410		 * on the connection to be closed, then wait for all outstanding
2411		 * packets to be transmitted and their buffers returned to the
2412		 * TBRQ. When the last packet on the connection arrives in the
2413		 * TBRQ, the host issues the close command to the adapter.
2414		 */
2415
2416		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2417		       (retry < MAX_RETRY)) {
2418			msleep(sleep);
2419			if (sleep < 250)
2420				sleep = sleep * 2;
2421
2422			++retry;
2423		}
2424
2425		if (tx_inuse > 1)
2426			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2427
2428		/* 2.3.1.1 generic close operations with flush */
2429
2430		spin_lock_irqsave(&he_dev->global_lock, flags);
2431		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2432					/* also clears TSR4_SESSION_ENDED */
2433
2434		switch (vcc->qos.txtp.traffic_class) {
2435			case ATM_UBR:
2436				he_writel_tsr1(he_dev, 
2437					TSR1_MCR(rate_to_atmf(200000))
2438					| TSR1_PCR(0), cid);
2439				break;
2440			case ATM_CBR:
2441				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2442				break;
2443		}
2444		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2445
2446		tpd = __alloc_tpd(he_dev);
2447		if (tpd == NULL) {
2448			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2449			goto close_tx_incomplete;
2450		}
2451		tpd->status |= TPD_EOS | TPD_INT;
2452		tpd->skb = NULL;
2453		tpd->vcc = vcc;
2454		wmb();
2455
2456		set_current_state(TASK_UNINTERRUPTIBLE);
2457		add_wait_queue(&he_vcc->tx_waitq, &wait);
2458		__enqueue_tpd(he_dev, tpd, cid);
2459		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2460
2461		timeout = schedule_timeout(30*HZ);
2462
2463		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2464		set_current_state(TASK_RUNNING);
2465
2466		spin_lock_irqsave(&he_dev->global_lock, flags);
2467
2468		if (timeout == 0) {
2469			hprintk("close tx timeout cid 0x%x\n", cid);
2470			goto close_tx_incomplete;
2471		}
2472
2473		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2474			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2475			udelay(250);
2476		}
2477
2478		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2479			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2480			udelay(250);
2481		}
2482
2483close_tx_incomplete:
2484
2485		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2486			int reg = he_vcc->rc_index;
2487
2488			HPRINTK("cs_stper reg = %d\n", reg);
2489
2490			if (he_dev->cs_stper[reg].inuse == 0)
2491				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2492			else
2493				--he_dev->cs_stper[reg].inuse;
2494
2495			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2496		}
2497		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2498
2499		HPRINTK("close tx cid 0x%x complete\n", cid);
2500	}
2501
2502	kfree(he_vcc);
2503
2504	clear_bit(ATM_VF_ADDR, &vcc->flags);
2505}
2506
2507static int
2508he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2509{
2510	unsigned long flags;
2511	struct he_dev *he_dev = HE_DEV(vcc->dev);
2512	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2513	struct he_tpd *tpd;
2514#ifdef USE_SCATTERGATHER
2515	int i, slot = 0;
2516#endif
2517
2518#define HE_TPD_BUFSIZE 0xffff
2519
2520	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2521
2522	if ((skb->len > HE_TPD_BUFSIZE) ||
2523	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2524		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2525		if (vcc->pop)
2526			vcc->pop(vcc, skb);
2527		else
2528			dev_kfree_skb_any(skb);
2529		atomic_inc(&vcc->stats->tx_err);
2530		return -EINVAL;
2531	}
2532
2533#ifndef USE_SCATTERGATHER
2534	if (skb_shinfo(skb)->nr_frags) {
2535		hprintk("no scatter/gather support\n");
2536		if (vcc->pop)
2537			vcc->pop(vcc, skb);
2538		else
2539			dev_kfree_skb_any(skb);
2540		atomic_inc(&vcc->stats->tx_err);
2541		return -EINVAL;
2542	}
2543#endif
2544	spin_lock_irqsave(&he_dev->global_lock, flags);
2545
2546	tpd = __alloc_tpd(he_dev);
2547	if (tpd == NULL) {
2548		if (vcc->pop)
2549			vcc->pop(vcc, skb);
2550		else
2551			dev_kfree_skb_any(skb);
2552		atomic_inc(&vcc->stats->tx_err);
2553		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2554		return -ENOMEM;
2555	}
2556
2557	if (vcc->qos.aal == ATM_AAL5)
2558		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2559	else {
2560		char *pti_clp = (void *) (skb->data + 3);
2561		int clp, pti;
2562
2563		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2564		clp = (*pti_clp & ATM_HDR_CLP);
2565		tpd->status |= TPD_CELLTYPE(pti);
2566		if (clp)
2567			tpd->status |= TPD_CLP;
2568
2569		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2570	}
2571
2572#ifdef USE_SCATTERGATHER
2573	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2574				skb_headlen(skb), PCI_DMA_TODEVICE);
2575	tpd->iovec[slot].len = skb_headlen(skb);
2576	++slot;
2577
2578	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2579		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2580
2581		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2582			tpd->vcc = vcc;
2583			tpd->skb = NULL;	/* not the last fragment
2584						   so dont ->push() yet */
2585			wmb();
2586
2587			__enqueue_tpd(he_dev, tpd, cid);
2588			tpd = __alloc_tpd(he_dev);
2589			if (tpd == NULL) {
2590				if (vcc->pop)
2591					vcc->pop(vcc, skb);
2592				else
2593					dev_kfree_skb_any(skb);
2594				atomic_inc(&vcc->stats->tx_err);
2595				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2596				return -ENOMEM;
2597			}
2598			tpd->status |= TPD_USERCELL;
2599			slot = 0;
2600		}
2601
2602		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2603			(void *) page_address(frag->page) + frag->page_offset,
2604				frag->size, PCI_DMA_TODEVICE);
2605		tpd->iovec[slot].len = frag->size;
2606		++slot;
2607
2608	}
2609
2610	tpd->iovec[slot - 1].len |= TPD_LST;
2611#else
2612	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2613	tpd->length0 = skb->len | TPD_LST;
2614#endif
2615	tpd->status |= TPD_INT;
2616
2617	tpd->vcc = vcc;
2618	tpd->skb = skb;
2619	wmb();
2620	ATM_SKB(skb)->vcc = vcc;
2621
2622	__enqueue_tpd(he_dev, tpd, cid);
2623	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2624
2625	atomic_inc(&vcc->stats->tx);
2626
2627	return 0;
2628}
2629
2630static int
2631he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2632{
2633	unsigned long flags;
2634	struct he_dev *he_dev = HE_DEV(atm_dev);
2635	struct he_ioctl_reg reg;
2636	int err = 0;
2637
2638	switch (cmd) {
2639		case HE_GET_REG:
2640			if (!capable(CAP_NET_ADMIN))
2641				return -EPERM;
2642
2643			if (copy_from_user(&reg, arg,
2644					   sizeof(struct he_ioctl_reg)))
2645				return -EFAULT;
2646
2647			spin_lock_irqsave(&he_dev->global_lock, flags);
2648			switch (reg.type) {
2649				case HE_REGTYPE_PCI:
2650					if (reg.addr >= HE_REGMAP_SIZE) {
2651						err = -EINVAL;
2652						break;
2653					}
2654
2655					reg.val = he_readl(he_dev, reg.addr);
2656					break;
2657				case HE_REGTYPE_RCM:
2658					reg.val =
2659						he_readl_rcm(he_dev, reg.addr);
2660					break;
2661				case HE_REGTYPE_TCM:
2662					reg.val =
2663						he_readl_tcm(he_dev, reg.addr);
2664					break;
2665				case HE_REGTYPE_MBOX:
2666					reg.val =
2667						he_readl_mbox(he_dev, reg.addr);
2668					break;
2669				default:
2670					err = -EINVAL;
2671					break;
2672			}
2673			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2674			if (err == 0)
2675				if (copy_to_user(arg, &reg,
2676							sizeof(struct he_ioctl_reg)))
2677					return -EFAULT;
2678			break;
2679		default:
2680#ifdef CONFIG_ATM_HE_USE_SUNI
2681			if (atm_dev->phy && atm_dev->phy->ioctl)
2682				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2683#else /* CONFIG_ATM_HE_USE_SUNI */
2684			err = -EINVAL;
2685#endif /* CONFIG_ATM_HE_USE_SUNI */
2686			break;
2687	}
2688
2689	return err;
2690}
2691
2692static void
2693he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2694{
2695	unsigned long flags;
2696	struct he_dev *he_dev = HE_DEV(atm_dev);
2697
2698	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2699
2700	spin_lock_irqsave(&he_dev->global_lock, flags);
2701	he_writel(he_dev, val, FRAMER + (addr*4));
2702	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2703	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2704}
2705 
2706	
2707static unsigned char
2708he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2709{ 
2710	unsigned long flags;
2711	struct he_dev *he_dev = HE_DEV(atm_dev);
2712	unsigned reg;
2713
2714	spin_lock_irqsave(&he_dev->global_lock, flags);
2715	reg = he_readl(he_dev, FRAMER + (addr*4));
2716	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2717
2718	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2719	return reg;
2720}
2721
2722static int
2723he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2724{
2725	unsigned long flags;
2726	struct he_dev *he_dev = HE_DEV(dev);
2727	int left, i;
2728#ifdef notdef
2729	struct he_rbrq *rbrq_tail;
2730	struct he_tpdrq *tpdrq_head;
2731	int rbpl_head, rbpl_tail;
2732#endif
2733	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2734
2735
2736	left = *pos;
2737	if (!left--)
2738		return sprintf(page, "ATM he driver\n");
2739
2740	if (!left--)
2741		return sprintf(page, "%s%s\n\n",
2742			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2743
2744	if (!left--)
2745		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2746
2747	spin_lock_irqsave(&he_dev->global_lock, flags);
2748	mcc += he_readl(he_dev, MCC);
2749	oec += he_readl(he_dev, OEC);
2750	dcc += he_readl(he_dev, DCC);
2751	cec += he_readl(he_dev, CEC);
2752	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2753
2754	if (!left--)
2755		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2756							mcc, oec, dcc, cec);
2757
2758	if (!left--)
2759		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2760				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2761
2762	if (!left--)
2763		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2764						CONFIG_TPDRQ_SIZE);
2765
2766	if (!left--)
2767		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2768				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2769
2770	if (!left--)
2771		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2772					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2773
2774
2775#ifdef notdef
2776	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2777	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2778
2779	inuse = rbpl_head - rbpl_tail;
2780	if (inuse < 0)
2781		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2782	inuse /= sizeof(struct he_rbp);
2783
2784	if (!left--)
2785		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2786						CONFIG_RBPL_SIZE, inuse);
2787#endif
2788
2789	if (!left--)
2790		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2791
2792	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2793		if (!left--)
2794			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2795						he_dev->cs_stper[i].pcr,
2796						he_dev->cs_stper[i].inuse);
2797
2798	if (!left--)
2799		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2800			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2801
2802	return 0;
2803}
2804
2805/* eeprom routines  -- see 4.7 */
2806
2807static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2808{
2809	u32 val = 0, tmp_read = 0;
2810	int i, j = 0;
2811	u8 byte_read = 0;
2812
2813	val = readl(he_dev->membase + HOST_CNTL);
2814	val &= 0xFFFFE0FF;
2815       
2816	/* Turn on write enable */
2817	val |= 0x800;
2818	he_writel(he_dev, val, HOST_CNTL);
2819       
2820	/* Send READ instruction */
2821	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2822		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2823		udelay(EEPROM_DELAY);
2824	}
2825       
2826	/* Next, we need to send the byte address to read from */
2827	for (i = 7; i >= 0; i--) {
2828		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2829		udelay(EEPROM_DELAY);
2830		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2831		udelay(EEPROM_DELAY);
2832	}
2833       
2834	j = 0;
2835
2836	val &= 0xFFFFF7FF;      /* Turn off write enable */
2837	he_writel(he_dev, val, HOST_CNTL);
2838       
2839	/* Now, we can read data from the EEPROM by clocking it in */
2840	for (i = 7; i >= 0; i--) {
2841		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2842		udelay(EEPROM_DELAY);
2843		tmp_read = he_readl(he_dev, HOST_CNTL);
2844		byte_read |= (unsigned char)
2845			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2846		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2847		udelay(EEPROM_DELAY);
2848	}
2849       
2850	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2851	udelay(EEPROM_DELAY);
2852
2853	return byte_read;
2854}
2855
2856MODULE_LICENSE("GPL");
2857MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2858MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2859module_param(disable64, bool, 0);
2860MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2861module_param(nvpibits, short, 0);
2862MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2863module_param(nvcibits, short, 0);
2864MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2865module_param(rx_skb_reserve, short, 0);
2866MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2867module_param(irq_coalesce, bool, 0);
2868MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2869module_param(sdh, bool, 0);
2870MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2871
2872static struct pci_device_id he_pci_tbl[] = {
2873	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2874	{ 0, }
2875};
2876
2877MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2878
2879static struct pci_driver he_driver = {
2880	.name =		"he",
2881	.probe =	he_init_one,
2882	.remove =	__devexit_p(he_remove_one),
2883	.id_table =	he_pci_tbl,
2884};
2885
2886static int __init he_init(void)
2887{
2888	return pci_register_driver(&he_driver);
2889}
2890
2891static void __exit he_cleanup(void)
2892{
2893	pci_unregister_driver(&he_driver);
2894}
2895
2896module_init(he_init);
2897module_exit(he_cleanup);
v3.15
   1/*
   2
   3  he.c
   4
   5  ForeRunnerHE ATM Adapter driver for ATM on Linux
   6  Copyright (C) 1999-2001  Naval Research Laboratory
   7
   8  This library is free software; you can redistribute it and/or
   9  modify it under the terms of the GNU Lesser General Public
  10  License as published by the Free Software Foundation; either
  11  version 2.1 of the License, or (at your option) any later version.
  12
  13  This library is distributed in the hope that it will be useful,
  14  but WITHOUT ANY WARRANTY; without even the implied warranty of
  15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  Lesser General Public License for more details.
  17
  18  You should have received a copy of the GNU Lesser General Public
  19  License along with this library; if not, write to the Free Software
  20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  21
  22*/
  23
  24/*
  25
  26  he.c
  27
  28  ForeRunnerHE ATM Adapter driver for ATM on Linux
  29  Copyright (C) 1999-2001  Naval Research Laboratory
  30
  31  Permission to use, copy, modify and distribute this software and its
  32  documentation is hereby granted, provided that both the copyright
  33  notice and this permission notice appear in all copies of the software,
  34  derivative works or modified versions, and any portions thereof, and
  35  that both notices appear in supporting documentation.
  36
  37  NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
  38  DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
  39  RESULTING FROM THE USE OF THIS SOFTWARE.
  40
  41  This driver was written using the "Programmer's Reference Manual for
  42  ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
  43
  44  AUTHORS:
  45	chas williams <chas@cmf.nrl.navy.mil>
  46	eric kinzie <ekinzie@cmf.nrl.navy.mil>
  47
  48  NOTES:
  49	4096 supported 'connections'
  50	group 0 is used for all traffic
  51	interrupt queue 0 is used for all interrupts
  52	aal0 support (based on work from ulrich.u.muller@nokia.com)
  53
  54 */
  55
  56#include <linux/module.h>
  57#include <linux/kernel.h>
  58#include <linux/skbuff.h>
  59#include <linux/pci.h>
  60#include <linux/errno.h>
  61#include <linux/types.h>
  62#include <linux/string.h>
  63#include <linux/delay.h>
  64#include <linux/init.h>
  65#include <linux/mm.h>
  66#include <linux/sched.h>
  67#include <linux/timer.h>
  68#include <linux/interrupt.h>
  69#include <linux/dma-mapping.h>
  70#include <linux/bitmap.h>
  71#include <linux/slab.h>
  72#include <asm/io.h>
  73#include <asm/byteorder.h>
  74#include <asm/uaccess.h>
  75
  76#include <linux/atmdev.h>
  77#include <linux/atm.h>
  78#include <linux/sonet.h>
  79
  80#undef USE_SCATTERGATHER
  81#undef USE_CHECKSUM_HW			/* still confused about this */
  82/* #undef HE_DEBUG */
  83
  84#include "he.h"
  85#include "suni.h"
  86#include <linux/atm_he.h>
  87
  88#define hprintk(fmt,args...)	printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  89
  90#ifdef HE_DEBUG
  91#define HPRINTK(fmt,args...)	printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  92#else /* !HE_DEBUG */
  93#define HPRINTK(fmt,args...)	do { } while (0)
  94#endif /* HE_DEBUG */
  95
  96/* declarations */
  97
  98static int he_open(struct atm_vcc *vcc);
  99static void he_close(struct atm_vcc *vcc);
 100static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
 101static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
 102static irqreturn_t he_irq_handler(int irq, void *dev_id);
 103static void he_tasklet(unsigned long data);
 104static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
 105static int he_start(struct atm_dev *dev);
 106static void he_stop(struct he_dev *dev);
 107static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
 108static unsigned char he_phy_get(struct atm_dev *, unsigned long);
 109
 110static u8 read_prom_byte(struct he_dev *he_dev, int addr);
 111
 112/* globals */
 113
 114static struct he_dev *he_devs;
 115static bool disable64;
 116static short nvpibits = -1;
 117static short nvcibits = -1;
 118static short rx_skb_reserve = 16;
 119static bool irq_coalesce = 1;
 120static bool sdh = 0;
 121
 122/* Read from EEPROM = 0000 0011b */
 123static unsigned int readtab[] = {
 124	CS_HIGH | CLK_HIGH,
 125	CS_LOW | CLK_LOW,
 126	CLK_HIGH,               /* 0 */
 127	CLK_LOW,
 128	CLK_HIGH,               /* 0 */
 129	CLK_LOW,
 130	CLK_HIGH,               /* 0 */
 131	CLK_LOW,
 132	CLK_HIGH,               /* 0 */
 133	CLK_LOW,
 134	CLK_HIGH,               /* 0 */
 135	CLK_LOW,
 136	CLK_HIGH,               /* 0 */
 137	CLK_LOW | SI_HIGH,
 138	CLK_HIGH | SI_HIGH,     /* 1 */
 139	CLK_LOW | SI_HIGH,
 140	CLK_HIGH | SI_HIGH      /* 1 */
 141};     
 142 
 143/* Clock to read from/write to the EEPROM */
 144static unsigned int clocktab[] = {
 145	CLK_LOW,
 146	CLK_HIGH,
 147	CLK_LOW,
 148	CLK_HIGH,
 149	CLK_LOW,
 150	CLK_HIGH,
 151	CLK_LOW,
 152	CLK_HIGH,
 153	CLK_LOW,
 154	CLK_HIGH,
 155	CLK_LOW,
 156	CLK_HIGH,
 157	CLK_LOW,
 158	CLK_HIGH,
 159	CLK_LOW,
 160	CLK_HIGH,
 161	CLK_LOW
 162};     
 163
 164static struct atmdev_ops he_ops =
 165{
 166	.open =		he_open,
 167	.close =	he_close,	
 168	.ioctl =	he_ioctl,	
 169	.send =		he_send,
 170	.phy_put =	he_phy_put,
 171	.phy_get =	he_phy_get,
 172	.proc_read =	he_proc_read,
 173	.owner =	THIS_MODULE
 174};
 175
 176#define he_writel(dev, val, reg)	do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
 177#define he_readl(dev, reg)		readl((dev)->membase + (reg))
 178
 179/* section 2.12 connection memory access */
 180
 181static __inline__ void
 182he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
 183								unsigned flags)
 184{
 185	he_writel(he_dev, val, CON_DAT);
 186	(void) he_readl(he_dev, CON_DAT);		/* flush posted writes */
 187	he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
 188	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 189}
 190
 191#define he_writel_rcm(dev, val, reg) 				\
 192			he_writel_internal(dev, val, reg, CON_CTL_RCM)
 193
 194#define he_writel_tcm(dev, val, reg) 				\
 195			he_writel_internal(dev, val, reg, CON_CTL_TCM)
 196
 197#define he_writel_mbox(dev, val, reg) 				\
 198			he_writel_internal(dev, val, reg, CON_CTL_MBOX)
 199
 200static unsigned
 201he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
 202{
 203	he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
 204	while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
 205	return he_readl(he_dev, CON_DAT);
 206}
 207
 208#define he_readl_rcm(dev, reg) \
 209			he_readl_internal(dev, reg, CON_CTL_RCM)
 210
 211#define he_readl_tcm(dev, reg) \
 212			he_readl_internal(dev, reg, CON_CTL_TCM)
 213
 214#define he_readl_mbox(dev, reg) \
 215			he_readl_internal(dev, reg, CON_CTL_MBOX)
 216
 217
 218/* figure 2.2 connection id */
 219
 220#define he_mkcid(dev, vpi, vci)		(((vpi << (dev)->vcibits) | vci) & 0x1fff)
 221
 222/* 2.5.1 per connection transmit state registers */
 223
 224#define he_writel_tsr0(dev, val, cid) \
 225		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
 226#define he_readl_tsr0(dev, cid) \
 227		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
 228
 229#define he_writel_tsr1(dev, val, cid) \
 230		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
 231
 232#define he_writel_tsr2(dev, val, cid) \
 233		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
 234
 235#define he_writel_tsr3(dev, val, cid) \
 236		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
 237
 238#define he_writel_tsr4(dev, val, cid) \
 239		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
 240
 241	/* from page 2-20
 242	 *
 243	 * NOTE While the transmit connection is active, bits 23 through 0
 244	 *      of this register must not be written by the host.  Byte
 245	 *      enables should be used during normal operation when writing
 246	 *      the most significant byte.
 247	 */
 248
 249#define he_writel_tsr4_upper(dev, val, cid) \
 250		he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
 251							CON_CTL_TCM \
 252							| CON_BYTE_DISABLE_2 \
 253							| CON_BYTE_DISABLE_1 \
 254							| CON_BYTE_DISABLE_0)
 255
 256#define he_readl_tsr4(dev, cid) \
 257		he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
 258
 259#define he_writel_tsr5(dev, val, cid) \
 260		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
 261
 262#define he_writel_tsr6(dev, val, cid) \
 263		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
 264
 265#define he_writel_tsr7(dev, val, cid) \
 266		he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
 267
 268
 269#define he_writel_tsr8(dev, val, cid) \
 270		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
 271
 272#define he_writel_tsr9(dev, val, cid) \
 273		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
 274
 275#define he_writel_tsr10(dev, val, cid) \
 276		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
 277
 278#define he_writel_tsr11(dev, val, cid) \
 279		he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
 280
 281
 282#define he_writel_tsr12(dev, val, cid) \
 283		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
 284
 285#define he_writel_tsr13(dev, val, cid) \
 286		he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
 287
 288
 289#define he_writel_tsr14(dev, val, cid) \
 290		he_writel_tcm(dev, val, CONFIG_TSRD | cid)
 291
 292#define he_writel_tsr14_upper(dev, val, cid) \
 293		he_writel_internal(dev, val, CONFIG_TSRD | cid, \
 294							CON_CTL_TCM \
 295							| CON_BYTE_DISABLE_2 \
 296							| CON_BYTE_DISABLE_1 \
 297							| CON_BYTE_DISABLE_0)
 298
 299/* 2.7.1 per connection receive state registers */
 300
 301#define he_writel_rsr0(dev, val, cid) \
 302		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
 303#define he_readl_rsr0(dev, cid) \
 304		he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
 305
 306#define he_writel_rsr1(dev, val, cid) \
 307		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
 308
 309#define he_writel_rsr2(dev, val, cid) \
 310		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
 311
 312#define he_writel_rsr3(dev, val, cid) \
 313		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
 314
 315#define he_writel_rsr4(dev, val, cid) \
 316		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
 317
 318#define he_writel_rsr5(dev, val, cid) \
 319		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
 320
 321#define he_writel_rsr6(dev, val, cid) \
 322		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
 323
 324#define he_writel_rsr7(dev, val, cid) \
 325		he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
 326
 327static __inline__ struct atm_vcc*
 328__find_vcc(struct he_dev *he_dev, unsigned cid)
 329{
 330	struct hlist_head *head;
 331	struct atm_vcc *vcc;
 
 332	struct sock *s;
 333	short vpi;
 334	int vci;
 335
 336	vpi = cid >> he_dev->vcibits;
 337	vci = cid & ((1 << he_dev->vcibits) - 1);
 338	head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
 339
 340	sk_for_each(s, head) {
 341		vcc = atm_sk(s);
 342		if (vcc->dev == he_dev->atm_dev &&
 343		    vcc->vci == vci && vcc->vpi == vpi &&
 344		    vcc->qos.rxtp.traffic_class != ATM_NONE) {
 345				return vcc;
 346		}
 347	}
 348	return NULL;
 349}
 350
 351static int he_init_one(struct pci_dev *pci_dev,
 352		       const struct pci_device_id *pci_ent)
 353{
 354	struct atm_dev *atm_dev = NULL;
 355	struct he_dev *he_dev = NULL;
 356	int err = 0;
 357
 358	printk(KERN_INFO "ATM he driver\n");
 359
 360	if (pci_enable_device(pci_dev))
 361		return -EIO;
 362	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
 363		printk(KERN_WARNING "he: no suitable dma available\n");
 364		err = -EIO;
 365		goto init_one_failure;
 366	}
 367
 368	atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
 369	if (!atm_dev) {
 370		err = -ENODEV;
 371		goto init_one_failure;
 372	}
 373	pci_set_drvdata(pci_dev, atm_dev);
 374
 375	he_dev = kzalloc(sizeof(struct he_dev),
 376							GFP_KERNEL);
 377	if (!he_dev) {
 378		err = -ENOMEM;
 379		goto init_one_failure;
 380	}
 381	he_dev->pci_dev = pci_dev;
 382	he_dev->atm_dev = atm_dev;
 383	he_dev->atm_dev->dev_data = he_dev;
 384	atm_dev->dev_data = he_dev;
 385	he_dev->number = atm_dev->number;
 386	tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
 387	spin_lock_init(&he_dev->global_lock);
 388
 389	if (he_start(atm_dev)) {
 390		he_stop(he_dev);
 391		err = -ENODEV;
 392		goto init_one_failure;
 393	}
 394	he_dev->next = NULL;
 395	if (he_devs)
 396		he_dev->next = he_devs;
 397	he_devs = he_dev;
 398	return 0;
 399
 400init_one_failure:
 401	if (atm_dev)
 402		atm_dev_deregister(atm_dev);
 403	kfree(he_dev);
 404	pci_disable_device(pci_dev);
 405	return err;
 406}
 407
 408static void he_remove_one(struct pci_dev *pci_dev)
 
 409{
 410	struct atm_dev *atm_dev;
 411	struct he_dev *he_dev;
 412
 413	atm_dev = pci_get_drvdata(pci_dev);
 414	he_dev = HE_DEV(atm_dev);
 415
 416	/* need to remove from he_devs */
 417
 418	he_stop(he_dev);
 419	atm_dev_deregister(atm_dev);
 420	kfree(he_dev);
 421
 
 422	pci_disable_device(pci_dev);
 423}
 424
 425
 426static unsigned
 427rate_to_atmf(unsigned rate)		/* cps to atm forum format */
 428{
 429#define NONZERO (1 << 14)
 430
 431	unsigned exp = 0;
 432
 433	if (rate == 0)
 434		return 0;
 435
 436	rate <<= 9;
 437	while (rate > 0x3ff) {
 438		++exp;
 439		rate >>= 1;
 440	}
 441
 442	return (NONZERO | (exp << 9) | (rate & 0x1ff));
 443}
 444
 445static void he_init_rx_lbfp0(struct he_dev *he_dev)
 
 446{
 447	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 448	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 449	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 450	unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
 451	
 452	lbufd_index = 0;
 453	lbm_offset = he_readl(he_dev, RCMLBM_BA);
 454
 455	he_writel(he_dev, lbufd_index, RLBF0_H);
 456
 457	for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
 458		lbufd_index += 2;
 459		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 460
 461		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 462		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 463
 464		if (++lbuf_count == lbufs_per_row) {
 465			lbuf_count = 0;
 466			row_offset += he_dev->bytes_per_row;
 467		}
 468		lbm_offset += 4;
 469	}
 470		
 471	he_writel(he_dev, lbufd_index - 2, RLBF0_T);
 472	he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
 473}
 474
 475static void he_init_rx_lbfp1(struct he_dev *he_dev)
 
 476{
 477	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 478	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 479	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 480	unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
 481	
 482	lbufd_index = 1;
 483	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 484
 485	he_writel(he_dev, lbufd_index, RLBF1_H);
 486
 487	for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
 488		lbufd_index += 2;
 489		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 490
 491		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 492		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 493
 494		if (++lbuf_count == lbufs_per_row) {
 495			lbuf_count = 0;
 496			row_offset += he_dev->bytes_per_row;
 497		}
 498		lbm_offset += 4;
 499	}
 500		
 501	he_writel(he_dev, lbufd_index - 2, RLBF1_T);
 502	he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
 503}
 504
 505static void he_init_tx_lbfp(struct he_dev *he_dev)
 
 506{
 507	unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
 508	unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
 509	unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
 510	unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
 511	
 512	lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
 513	lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
 514
 515	he_writel(he_dev, lbufd_index, TLBF_H);
 516
 517	for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
 518		lbufd_index += 1;
 519		lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
 520
 521		he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
 522		he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
 523
 524		if (++lbuf_count == lbufs_per_row) {
 525			lbuf_count = 0;
 526			row_offset += he_dev->bytes_per_row;
 527		}
 528		lbm_offset += 2;
 529	}
 530		
 531	he_writel(he_dev, lbufd_index - 1, TLBF_T);
 532}
 533
 534static int he_init_tpdrq(struct he_dev *he_dev)
 
 535{
 536	he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
 537		CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
 538	if (he_dev->tpdrq_base == NULL) {
 539		hprintk("failed to alloc tpdrq\n");
 540		return -ENOMEM;
 541	}
 542	memset(he_dev->tpdrq_base, 0,
 543				CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
 544
 545	he_dev->tpdrq_tail = he_dev->tpdrq_base;
 546	he_dev->tpdrq_head = he_dev->tpdrq_base;
 547
 548	he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
 549	he_writel(he_dev, 0, TPDRQ_T);	
 550	he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
 551
 552	return 0;
 553}
 554
 555static void he_init_cs_block(struct he_dev *he_dev)
 
 556{
 557	unsigned clock, rate, delta;
 558	int reg;
 559
 560	/* 5.1.7 cs block initialization */
 561
 562	for (reg = 0; reg < 0x20; ++reg)
 563		he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
 564
 565	/* rate grid timer reload values */
 566
 567	clock = he_is622(he_dev) ? 66667000 : 50000000;
 568	rate = he_dev->atm_dev->link_rate;
 569	delta = rate / 16 / 2;
 570
 571	for (reg = 0; reg < 0x10; ++reg) {
 572		/* 2.4 internal transmit function
 573		 *
 574	 	 * we initialize the first row in the rate grid.
 575		 * values are period (in clock cycles) of timer
 576		 */
 577		unsigned period = clock / rate;
 578
 579		he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
 580		rate -= delta;
 581	}
 582
 583	if (he_is622(he_dev)) {
 584		/* table 5.2 (4 cells per lbuf) */
 585		he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
 586		he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
 587		he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
 588		he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
 589		he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
 590
 591		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 592		he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
 593		he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
 594		he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
 595		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 596		he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
 597		he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
 598
 599		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 600
 601		/* table 5.8 */
 602		he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
 603		he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
 604		he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
 605		he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
 606		he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
 607		he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
 608
 609		/* table 5.9 */
 610		he_writel_mbox(he_dev, 0x5, CS_OTPPER);
 611		he_writel_mbox(he_dev, 0x14, CS_OTWPER);
 612	} else {
 613		/* table 5.1 (4 cells per lbuf) */
 614		he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
 615		he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
 616		he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
 617		he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
 618		he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
 619
 620		/* table 5.3, 5.4, 5.5, 5.6, 5.7 */
 621		he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
 622		he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
 623		he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
 624		he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
 625		he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
 626		he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
 627
 628		he_writel_mbox(he_dev, 0x4680, CS_RTATR);
 629
 630		/* table 5.8 */
 631		he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
 632		he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
 633		he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
 634		he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
 635		he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
 636		he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
 637
 638		/* table 5.9 */
 639		he_writel_mbox(he_dev, 0x6, CS_OTPPER);
 640		he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
 641	}
 642
 643	he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
 644
 645	for (reg = 0; reg < 0x8; ++reg)
 646		he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
 647
 648}
 649
 650static int he_init_cs_block_rcm(struct he_dev *he_dev)
 
 651{
 652	unsigned (*rategrid)[16][16];
 653	unsigned rate, delta;
 654	int i, j, reg;
 655
 656	unsigned rate_atmf, exp, man;
 657	unsigned long long rate_cps;
 658	int mult, buf, buf_limit = 4;
 659
 660	rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
 661	if (!rategrid)
 662		return -ENOMEM;
 663
 664	/* initialize rate grid group table */
 665
 666	for (reg = 0x0; reg < 0xff; ++reg)
 667		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 668
 669	/* initialize rate controller groups */
 670
 671	for (reg = 0x100; reg < 0x1ff; ++reg)
 672		he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
 673	
 674	/* initialize tNrm lookup table */
 675
 676	/* the manual makes reference to a routine in a sample driver
 677	   for proper configuration; fortunately, we only need this
 678	   in order to support abr connection */
 679	
 680	/* initialize rate to group table */
 681
 682	rate = he_dev->atm_dev->link_rate;
 683	delta = rate / 32;
 684
 685	/*
 686	 * 2.4 transmit internal functions
 687	 * 
 688	 * we construct a copy of the rate grid used by the scheduler
 689	 * in order to construct the rate to group table below
 690	 */
 691
 692	for (j = 0; j < 16; j++) {
 693		(*rategrid)[0][j] = rate;
 694		rate -= delta;
 695	}
 696
 697	for (i = 1; i < 16; i++)
 698		for (j = 0; j < 16; j++)
 699			if (i > 14)
 700				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
 701			else
 702				(*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
 703
 704	/*
 705	 * 2.4 transmit internal function
 706	 *
 707	 * this table maps the upper 5 bits of exponent and mantissa
 708	 * of the atm forum representation of the rate into an index
 709	 * on rate grid  
 710	 */
 711
 712	rate_atmf = 0;
 713	while (rate_atmf < 0x400) {
 714		man = (rate_atmf & 0x1f) << 4;
 715		exp = rate_atmf >> 5;
 716
 717		/* 
 718			instead of '/ 512', use '>> 9' to prevent a call
 719			to divdu3 on x86 platforms
 720		*/
 721		rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
 722
 723		if (rate_cps < 10)
 724			rate_cps = 10;	/* 2.2.1 minimum payload rate is 10 cps */
 725
 726		for (i = 255; i > 0; i--)
 727			if ((*rategrid)[i/16][i%16] >= rate_cps)
 728				break;	 /* pick nearest rate instead? */
 729
 730		/*
 731		 * each table entry is 16 bits: (rate grid index (8 bits)
 732		 * and a buffer limit (8 bits)
 733		 * there are two table entries in each 32-bit register
 734		 */
 735
 736#ifdef notdef
 737		buf = rate_cps * he_dev->tx_numbuffs /
 738				(he_dev->atm_dev->link_rate * 2);
 739#else
 740		/* this is pretty, but avoids _divdu3 and is mostly correct */
 741		mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
 742		if (rate_cps > (272 * mult))
 743			buf = 4;
 744		else if (rate_cps > (204 * mult))
 745			buf = 3;
 746		else if (rate_cps > (136 * mult))
 747			buf = 2;
 748		else if (rate_cps > (68 * mult))
 749			buf = 1;
 750		else
 751			buf = 0;
 752#endif
 753		if (buf > buf_limit)
 754			buf = buf_limit;
 755		reg = (reg << 16) | ((i << 8) | buf);
 756
 757#define RTGTBL_OFFSET 0x400
 758	  
 759		if (rate_atmf & 0x1)
 760			he_writel_rcm(he_dev, reg,
 761				CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
 762
 763		++rate_atmf;
 764	}
 765
 766	kfree(rategrid);
 767	return 0;
 768}
 769
 770static int he_init_group(struct he_dev *he_dev, int group)
 
 771{
 772	struct he_buff *heb, *next;
 773	dma_addr_t mapping;
 774	int i;
 775
 776	he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
 777	he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
 778	he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
 779	he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
 780		  G0_RBPS_BS + (group * 32));
 781
 782	/* bitmap table */
 783	he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
 784				     * sizeof(unsigned long), GFP_KERNEL);
 785	if (!he_dev->rbpl_table) {
 786		hprintk("unable to allocate rbpl bitmap table\n");
 787		return -ENOMEM;
 788	}
 789	bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
 790
 791	/* rbpl_virt 64-bit pointers */
 792	he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
 793				    * sizeof(struct he_buff *), GFP_KERNEL);
 794	if (!he_dev->rbpl_virt) {
 795		hprintk("unable to allocate rbpl virt table\n");
 796		goto out_free_rbpl_table;
 797	}
 798
 799	/* large buffer pool */
 800	he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
 801					    CONFIG_RBPL_BUFSIZE, 64, 0);
 802	if (he_dev->rbpl_pool == NULL) {
 803		hprintk("unable to create rbpl pool\n");
 804		goto out_free_rbpl_virt;
 805	}
 806
 807	he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
 808		CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
 809	if (he_dev->rbpl_base == NULL) {
 810		hprintk("failed to alloc rbpl_base\n");
 811		goto out_destroy_rbpl_pool;
 812	}
 813	memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
 814
 815	INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
 816
 817	for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
 818
 819		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
 820		if (!heb)
 821			goto out_free_rbpl;
 822		heb->mapping = mapping;
 823		list_add(&heb->entry, &he_dev->rbpl_outstanding);
 824
 825		set_bit(i, he_dev->rbpl_table);
 826		he_dev->rbpl_virt[i] = heb;
 827		he_dev->rbpl_hint = i + 1;
 828		he_dev->rbpl_base[i].idx =  i << RBP_IDX_OFFSET;
 829		he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
 830	}
 831	he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
 832
 833	he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
 834	he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
 835						G0_RBPL_T + (group * 32));
 836	he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
 837						G0_RBPL_BS + (group * 32));
 838	he_writel(he_dev,
 839			RBP_THRESH(CONFIG_RBPL_THRESH) |
 840			RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
 841			RBP_INT_ENB,
 842						G0_RBPL_QI + (group * 32));
 843
 844	/* rx buffer ready queue */
 845
 846	he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
 847		CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
 848	if (he_dev->rbrq_base == NULL) {
 849		hprintk("failed to allocate rbrq\n");
 850		goto out_free_rbpl;
 851	}
 852	memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
 853
 854	he_dev->rbrq_head = he_dev->rbrq_base;
 855	he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
 856	he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
 857	he_writel(he_dev,
 858		RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
 859						G0_RBRQ_Q + (group * 16));
 860	if (irq_coalesce) {
 861		hprintk("coalescing interrupts\n");
 862		he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
 863						G0_RBRQ_I + (group * 16));
 864	} else
 865		he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
 866						G0_RBRQ_I + (group * 16));
 867
 868	/* tx buffer ready queue */
 869
 870	he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
 871		CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
 872	if (he_dev->tbrq_base == NULL) {
 873		hprintk("failed to allocate tbrq\n");
 874		goto out_free_rbpq_base;
 875	}
 876	memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
 877
 878	he_dev->tbrq_head = he_dev->tbrq_base;
 879
 880	he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
 881	he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
 882	he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
 883	he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
 884
 885	return 0;
 886
 887out_free_rbpq_base:
 888	pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
 889			sizeof(struct he_rbrq), he_dev->rbrq_base,
 890			he_dev->rbrq_phys);
 891out_free_rbpl:
 892	list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
 893		pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
 894
 895	pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
 896			sizeof(struct he_rbp), he_dev->rbpl_base,
 897			he_dev->rbpl_phys);
 898out_destroy_rbpl_pool:
 899	pci_pool_destroy(he_dev->rbpl_pool);
 900out_free_rbpl_virt:
 901	kfree(he_dev->rbpl_virt);
 902out_free_rbpl_table:
 903	kfree(he_dev->rbpl_table);
 904
 905	return -ENOMEM;
 906}
 907
 908static int he_init_irq(struct he_dev *he_dev)
 
 909{
 910	int i;
 911
 912	/* 2.9.3.5  tail offset for each interrupt queue is located after the
 913		    end of the interrupt queue */
 914
 915	he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
 916			(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
 917	if (he_dev->irq_base == NULL) {
 918		hprintk("failed to allocate irq\n");
 919		return -ENOMEM;
 920	}
 921	he_dev->irq_tailoffset = (unsigned *)
 922					&he_dev->irq_base[CONFIG_IRQ_SIZE];
 923	*he_dev->irq_tailoffset = 0;
 924	he_dev->irq_head = he_dev->irq_base;
 925	he_dev->irq_tail = he_dev->irq_base;
 926
 927	for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
 928		he_dev->irq_base[i].isw = ITYPE_INVALID;
 929
 930	he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
 931	he_writel(he_dev,
 932		IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
 933								IRQ0_HEAD);
 934	he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
 935	he_writel(he_dev, 0x0, IRQ0_DATA);
 936
 937	he_writel(he_dev, 0x0, IRQ1_BASE);
 938	he_writel(he_dev, 0x0, IRQ1_HEAD);
 939	he_writel(he_dev, 0x0, IRQ1_CNTL);
 940	he_writel(he_dev, 0x0, IRQ1_DATA);
 941
 942	he_writel(he_dev, 0x0, IRQ2_BASE);
 943	he_writel(he_dev, 0x0, IRQ2_HEAD);
 944	he_writel(he_dev, 0x0, IRQ2_CNTL);
 945	he_writel(he_dev, 0x0, IRQ2_DATA);
 946
 947	he_writel(he_dev, 0x0, IRQ3_BASE);
 948	he_writel(he_dev, 0x0, IRQ3_HEAD);
 949	he_writel(he_dev, 0x0, IRQ3_CNTL);
 950	he_writel(he_dev, 0x0, IRQ3_DATA);
 951
 952	/* 2.9.3.2 interrupt queue mapping registers */
 953
 954	he_writel(he_dev, 0x0, GRP_10_MAP);
 955	he_writel(he_dev, 0x0, GRP_32_MAP);
 956	he_writel(he_dev, 0x0, GRP_54_MAP);
 957	he_writel(he_dev, 0x0, GRP_76_MAP);
 958
 959	if (request_irq(he_dev->pci_dev->irq,
 960			he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
 961		hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
 962		return -EINVAL;
 963	}   
 964
 965	he_dev->irq = he_dev->pci_dev->irq;
 966
 967	return 0;
 968}
 969
 970static int he_start(struct atm_dev *dev)
 
 971{
 972	struct he_dev *he_dev;
 973	struct pci_dev *pci_dev;
 974	unsigned long membase;
 975
 976	u16 command;
 977	u32 gen_cntl_0, host_cntl, lb_swap;
 978	u8 cache_size, timer;
 979	
 980	unsigned err;
 981	unsigned int status, reg;
 982	int i, group;
 983
 984	he_dev = HE_DEV(dev);
 985	pci_dev = he_dev->pci_dev;
 986
 987	membase = pci_resource_start(pci_dev, 0);
 988	HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
 989
 990	/*
 991	 * pci bus controller initialization 
 992	 */
 993
 994	/* 4.3 pci bus controller-specific initialization */
 995	if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
 996		hprintk("can't read GEN_CNTL_0\n");
 997		return -EINVAL;
 998	}
 999	gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1000	if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1001		hprintk("can't write GEN_CNTL_0.\n");
1002		return -EINVAL;
1003	}
1004
1005	if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1006		hprintk("can't read PCI_COMMAND.\n");
1007		return -EINVAL;
1008	}
1009
1010	command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1011	if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1012		hprintk("can't enable memory.\n");
1013		return -EINVAL;
1014	}
1015
1016	if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1017		hprintk("can't read cache line size?\n");
1018		return -EINVAL;
1019	}
1020
1021	if (cache_size < 16) {
1022		cache_size = 16;
1023		if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1024			hprintk("can't set cache line size to %d\n", cache_size);
1025	}
1026
1027	if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1028		hprintk("can't read latency timer?\n");
1029		return -EINVAL;
1030	}
1031
1032	/* from table 3.9
1033	 *
1034	 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1035	 * 
1036	 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1037	 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1038	 *
1039	 */ 
1040#define LAT_TIMER 209
1041	if (timer < LAT_TIMER) {
1042		HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1043		timer = LAT_TIMER;
1044		if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1045			hprintk("can't set latency timer to %d\n", timer);
1046	}
1047
1048	if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1049		hprintk("can't set up page mapping\n");
1050		return -EINVAL;
1051	}
1052
1053	/* 4.4 card reset */
1054	he_writel(he_dev, 0x0, RESET_CNTL);
1055	he_writel(he_dev, 0xff, RESET_CNTL);
1056
1057	msleep(16);	/* 16 ms */
1058	status = he_readl(he_dev, RESET_CNTL);
1059	if ((status & BOARD_RST_STATUS) == 0) {
1060		hprintk("reset failed\n");
1061		return -EINVAL;
1062	}
1063
1064	/* 4.5 set bus width */
1065	host_cntl = he_readl(he_dev, HOST_CNTL);
1066	if (host_cntl & PCI_BUS_SIZE64)
1067		gen_cntl_0 |= ENBL_64;
1068	else
1069		gen_cntl_0 &= ~ENBL_64;
1070
1071	if (disable64 == 1) {
1072		hprintk("disabling 64-bit pci bus transfers\n");
1073		gen_cntl_0 &= ~ENBL_64;
1074	}
1075
1076	if (gen_cntl_0 & ENBL_64)
1077		hprintk("64-bit transfers enabled\n");
1078
1079	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1080
1081	/* 4.7 read prom contents */
1082	for (i = 0; i < PROD_ID_LEN; ++i)
1083		he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1084
1085	he_dev->media = read_prom_byte(he_dev, MEDIA);
1086
1087	for (i = 0; i < 6; ++i)
1088		dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1089
1090	hprintk("%s%s, %pM\n", he_dev->prod_id,
1091		he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
 
 
 
 
 
 
 
1092	he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1093						ATM_OC12_PCR : ATM_OC3_PCR;
1094
1095	/* 4.6 set host endianess */
1096	lb_swap = he_readl(he_dev, LB_SWAP);
1097	if (he_is622(he_dev))
1098		lb_swap &= ~XFER_SIZE;		/* 4 cells */
1099	else
1100		lb_swap |= XFER_SIZE;		/* 8 cells */
1101#ifdef __BIG_ENDIAN
1102	lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1103#else
1104	lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1105			DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1106#endif /* __BIG_ENDIAN */
1107	he_writel(he_dev, lb_swap, LB_SWAP);
1108
1109	/* 4.8 sdram controller initialization */
1110	he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1111
1112	/* 4.9 initialize rnum value */
1113	lb_swap |= SWAP_RNUM_MAX(0xf);
1114	he_writel(he_dev, lb_swap, LB_SWAP);
1115
1116	/* 4.10 initialize the interrupt queues */
1117	if ((err = he_init_irq(he_dev)) != 0)
1118		return err;
1119
1120	/* 4.11 enable pci bus controller state machines */
1121	host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1122				QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1123	he_writel(he_dev, host_cntl, HOST_CNTL);
1124
1125	gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1126	pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1127
1128	/*
1129	 * atm network controller initialization
1130	 */
1131
1132	/* 5.1.1 generic configuration state */
1133
1134	/*
1135	 *		local (cell) buffer memory map
1136	 *                    
1137	 *             HE155                          HE622
1138	 *                                                      
1139	 *        0 ____________1023 bytes  0 _______________________2047 bytes
1140	 *         |            |            |                   |   |
1141	 *         |  utility   |            |        rx0        |   |
1142	 *        5|____________|         255|___________________| u |
1143	 *        6|            |         256|                   | t |
1144	 *         |            |            |                   | i |
1145	 *         |    rx0     |     row    |        tx         | l |
1146	 *         |            |            |                   | i |
1147	 *         |            |         767|___________________| t |
1148	 *      517|____________|         768|                   | y |
1149	 * row  518|            |            |        rx1        |   |
1150	 *         |            |        1023|___________________|___|
1151	 *         |            |
1152	 *         |    tx      |
1153	 *         |            |
1154	 *         |            |
1155	 *     1535|____________|
1156	 *     1536|            |
1157	 *         |    rx1     |
1158	 *     2047|____________|
1159	 *
1160	 */
1161
1162	/* total 4096 connections */
1163	he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1164	he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1165
1166	if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1167		hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1168		return -ENODEV;
1169	}
1170
1171	if (nvpibits != -1) {
1172		he_dev->vpibits = nvpibits;
1173		he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1174	}
1175
1176	if (nvcibits != -1) {
1177		he_dev->vcibits = nvcibits;
1178		he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1179	}
1180
1181
1182	if (he_is622(he_dev)) {
1183		he_dev->cells_per_row = 40;
1184		he_dev->bytes_per_row = 2048;
1185		he_dev->r0_numrows = 256;
1186		he_dev->tx_numrows = 512;
1187		he_dev->r1_numrows = 256;
1188		he_dev->r0_startrow = 0;
1189		he_dev->tx_startrow = 256;
1190		he_dev->r1_startrow = 768;
1191	} else {
1192		he_dev->cells_per_row = 20;
1193		he_dev->bytes_per_row = 1024;
1194		he_dev->r0_numrows = 512;
1195		he_dev->tx_numrows = 1018;
1196		he_dev->r1_numrows = 512;
1197		he_dev->r0_startrow = 6;
1198		he_dev->tx_startrow = 518;
1199		he_dev->r1_startrow = 1536;
1200	}
1201
1202	he_dev->cells_per_lbuf = 4;
1203	he_dev->buffer_limit = 4;
1204	he_dev->r0_numbuffs = he_dev->r0_numrows *
1205				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1206	if (he_dev->r0_numbuffs > 2560)
1207		he_dev->r0_numbuffs = 2560;
1208
1209	he_dev->r1_numbuffs = he_dev->r1_numrows *
1210				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1211	if (he_dev->r1_numbuffs > 2560)
1212		he_dev->r1_numbuffs = 2560;
1213
1214	he_dev->tx_numbuffs = he_dev->tx_numrows *
1215				he_dev->cells_per_row / he_dev->cells_per_lbuf;
1216	if (he_dev->tx_numbuffs > 5120)
1217		he_dev->tx_numbuffs = 5120;
1218
1219	/* 5.1.2 configure hardware dependent registers */
1220
1221	he_writel(he_dev, 
1222		SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1223		RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1224		(he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1225		(he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1226								LBARB);
1227
1228	he_writel(he_dev, BANK_ON |
1229		(he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1230								SDRAMCON);
1231
1232	he_writel(he_dev,
1233		(he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1234						RM_RW_WAIT(1), RCMCONFIG);
1235	he_writel(he_dev,
1236		(he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1237						TM_RW_WAIT(1), TCMCONFIG);
1238
1239	he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1240
1241	he_writel(he_dev, 
1242		(he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1243		(he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1244		RX_VALVP(he_dev->vpibits) |
1245		RX_VALVC(he_dev->vcibits),			 RC_CONFIG);
1246
1247	he_writel(he_dev, DRF_THRESH(0x20) |
1248		(he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1249		TX_VCI_MASK(he_dev->vcibits) |
1250		LBFREE_CNT(he_dev->tx_numbuffs), 		TX_CONFIG);
1251
1252	he_writel(he_dev, 0x0, TXAAL5_PROTO);
1253
1254	he_writel(he_dev, PHY_INT_ENB |
1255		(he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1256								RH_CONFIG);
1257
1258	/* 5.1.3 initialize connection memory */
1259
1260	for (i = 0; i < TCM_MEM_SIZE; ++i)
1261		he_writel_tcm(he_dev, 0, i);
1262
1263	for (i = 0; i < RCM_MEM_SIZE; ++i)
1264		he_writel_rcm(he_dev, 0, i);
1265
1266	/*
1267	 *	transmit connection memory map
1268	 *
1269	 *                  tx memory
1270	 *          0x0 ___________________
1271	 *             |                   |
1272	 *             |                   |
1273	 *             |       TSRa        |
1274	 *             |                   |
1275	 *             |                   |
1276	 *       0x8000|___________________|
1277	 *             |                   |
1278	 *             |       TSRb        |
1279	 *       0xc000|___________________|
1280	 *             |                   |
1281	 *             |       TSRc        |
1282	 *       0xe000|___________________|
1283	 *             |       TSRd        |
1284	 *       0xf000|___________________|
1285	 *             |       tmABR       |
1286	 *      0x10000|___________________|
1287	 *             |                   |
1288	 *             |       tmTPD       |
1289	 *             |___________________|
1290	 *             |                   |
1291	 *                      ....
1292	 *      0x1ffff|___________________|
1293	 *
1294	 *
1295	 */
1296
1297	he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1298	he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1299	he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1300	he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1301	he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1302
1303
1304	/*
1305	 *	receive connection memory map
1306	 *
1307	 *          0x0 ___________________
1308	 *             |                   |
1309	 *             |                   |
1310	 *             |       RSRa        |
1311	 *             |                   |
1312	 *             |                   |
1313	 *       0x8000|___________________|
1314	 *             |                   |
1315	 *             |             rx0/1 |
1316	 *             |       LBM         |   link lists of local
1317	 *             |             tx    |   buffer memory 
1318	 *             |                   |
1319	 *       0xd000|___________________|
1320	 *             |                   |
1321	 *             |      rmABR        |
1322	 *       0xe000|___________________|
1323	 *             |                   |
1324	 *             |       RSRb        |
1325	 *             |___________________|
1326	 *             |                   |
1327	 *                      ....
1328	 *       0xffff|___________________|
1329	 */
1330
1331	he_writel(he_dev, 0x08000, RCMLBM_BA);
1332	he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1333	he_writel(he_dev, 0x0d800, RCMABR_BA);
1334
1335	/* 5.1.4 initialize local buffer free pools linked lists */
1336
1337	he_init_rx_lbfp0(he_dev);
1338	he_init_rx_lbfp1(he_dev);
1339
1340	he_writel(he_dev, 0x0, RLBC_H);
1341	he_writel(he_dev, 0x0, RLBC_T);
1342	he_writel(he_dev, 0x0, RLBC_H2);
1343
1344	he_writel(he_dev, 512, RXTHRSH);	/* 10% of r0+r1 buffers */
1345	he_writel(he_dev, 256, LITHRSH); 	/* 5% of r0+r1 buffers */
1346
1347	he_init_tx_lbfp(he_dev);
1348
1349	he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1350
1351	/* 5.1.5 initialize intermediate receive queues */
1352
1353	if (he_is622(he_dev)) {
1354		he_writel(he_dev, 0x000f, G0_INMQ_S);
1355		he_writel(he_dev, 0x200f, G0_INMQ_L);
1356
1357		he_writel(he_dev, 0x001f, G1_INMQ_S);
1358		he_writel(he_dev, 0x201f, G1_INMQ_L);
1359
1360		he_writel(he_dev, 0x002f, G2_INMQ_S);
1361		he_writel(he_dev, 0x202f, G2_INMQ_L);
1362
1363		he_writel(he_dev, 0x003f, G3_INMQ_S);
1364		he_writel(he_dev, 0x203f, G3_INMQ_L);
1365
1366		he_writel(he_dev, 0x004f, G4_INMQ_S);
1367		he_writel(he_dev, 0x204f, G4_INMQ_L);
1368
1369		he_writel(he_dev, 0x005f, G5_INMQ_S);
1370		he_writel(he_dev, 0x205f, G5_INMQ_L);
1371
1372		he_writel(he_dev, 0x006f, G6_INMQ_S);
1373		he_writel(he_dev, 0x206f, G6_INMQ_L);
1374
1375		he_writel(he_dev, 0x007f, G7_INMQ_S);
1376		he_writel(he_dev, 0x207f, G7_INMQ_L);
1377	} else {
1378		he_writel(he_dev, 0x0000, G0_INMQ_S);
1379		he_writel(he_dev, 0x0008, G0_INMQ_L);
1380
1381		he_writel(he_dev, 0x0001, G1_INMQ_S);
1382		he_writel(he_dev, 0x0009, G1_INMQ_L);
1383
1384		he_writel(he_dev, 0x0002, G2_INMQ_S);
1385		he_writel(he_dev, 0x000a, G2_INMQ_L);
1386
1387		he_writel(he_dev, 0x0003, G3_INMQ_S);
1388		he_writel(he_dev, 0x000b, G3_INMQ_L);
1389
1390		he_writel(he_dev, 0x0004, G4_INMQ_S);
1391		he_writel(he_dev, 0x000c, G4_INMQ_L);
1392
1393		he_writel(he_dev, 0x0005, G5_INMQ_S);
1394		he_writel(he_dev, 0x000d, G5_INMQ_L);
1395
1396		he_writel(he_dev, 0x0006, G6_INMQ_S);
1397		he_writel(he_dev, 0x000e, G6_INMQ_L);
1398
1399		he_writel(he_dev, 0x0007, G7_INMQ_S);
1400		he_writel(he_dev, 0x000f, G7_INMQ_L);
1401	}
1402
1403	/* 5.1.6 application tunable parameters */
1404
1405	he_writel(he_dev, 0x0, MCC);
1406	he_writel(he_dev, 0x0, OEC);
1407	he_writel(he_dev, 0x0, DCC);
1408	he_writel(he_dev, 0x0, CEC);
1409	
1410	/* 5.1.7 cs block initialization */
1411
1412	he_init_cs_block(he_dev);
1413
1414	/* 5.1.8 cs block connection memory initialization */
1415	
1416	if (he_init_cs_block_rcm(he_dev) < 0)
1417		return -ENOMEM;
1418
1419	/* 5.1.10 initialize host structures */
1420
1421	he_init_tpdrq(he_dev);
1422
1423	he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1424		sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1425	if (he_dev->tpd_pool == NULL) {
1426		hprintk("unable to create tpd pci_pool\n");
1427		return -ENOMEM;         
1428	}
1429
1430	INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1431
1432	if (he_init_group(he_dev, 0) != 0)
1433		return -ENOMEM;
1434
1435	for (group = 1; group < HE_NUM_GROUPS; ++group) {
1436		he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1437		he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1438		he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1439		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1440						G0_RBPS_BS + (group * 32));
1441
1442		he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1443		he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1444		he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1445						G0_RBPL_QI + (group * 32));
1446		he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1447
1448		he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1449		he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1450		he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1451						G0_RBRQ_Q + (group * 16));
1452		he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1453
1454		he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1455		he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1456		he_writel(he_dev, TBRQ_THRESH(0x1),
1457						G0_TBRQ_THRESH + (group * 16));
1458		he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1459	}
1460
1461	/* host status page */
1462
1463	he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1464				sizeof(struct he_hsp), &he_dev->hsp_phys);
1465	if (he_dev->hsp == NULL) {
1466		hprintk("failed to allocate host status page\n");
1467		return -ENOMEM;
1468	}
1469	memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1470	he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1471
1472	/* initialize framer */
1473
1474#ifdef CONFIG_ATM_HE_USE_SUNI
1475	if (he_isMM(he_dev))
1476		suni_init(he_dev->atm_dev);
1477	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1478		he_dev->atm_dev->phy->start(he_dev->atm_dev);
1479#endif /* CONFIG_ATM_HE_USE_SUNI */
1480
1481	if (sdh) {
1482		/* this really should be in suni.c but for now... */
1483		int val;
1484
1485		val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1486		val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1487		he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1488		he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1489	}
1490
1491	/* 5.1.12 enable transmit and receive */
1492
1493	reg = he_readl_mbox(he_dev, CS_ERCTL0);
1494	reg |= TX_ENABLE|ER_ENABLE;
1495	he_writel_mbox(he_dev, reg, CS_ERCTL0);
1496
1497	reg = he_readl(he_dev, RC_CONFIG);
1498	reg |= RX_ENABLE;
1499	he_writel(he_dev, reg, RC_CONFIG);
1500
1501	for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1502		he_dev->cs_stper[i].inuse = 0;
1503		he_dev->cs_stper[i].pcr = -1;
1504	}
1505	he_dev->total_bw = 0;
1506
1507
1508	/* atm linux initialization */
1509
1510	he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1511	he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1512
1513	he_dev->irq_peak = 0;
1514	he_dev->rbrq_peak = 0;
1515	he_dev->rbpl_peak = 0;
1516	he_dev->tbrq_peak = 0;
1517
1518	HPRINTK("hell bent for leather!\n");
1519
1520	return 0;
1521}
1522
1523static void
1524he_stop(struct he_dev *he_dev)
1525{
1526	struct he_buff *heb, *next;
1527	struct pci_dev *pci_dev;
1528	u32 gen_cntl_0, reg;
1529	u16 command;
1530
1531	pci_dev = he_dev->pci_dev;
1532
1533	/* disable interrupts */
1534
1535	if (he_dev->membase) {
1536		pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1537		gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1538		pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1539
1540		tasklet_disable(&he_dev->tasklet);
1541
1542		/* disable recv and transmit */
1543
1544		reg = he_readl_mbox(he_dev, CS_ERCTL0);
1545		reg &= ~(TX_ENABLE|ER_ENABLE);
1546		he_writel_mbox(he_dev, reg, CS_ERCTL0);
1547
1548		reg = he_readl(he_dev, RC_CONFIG);
1549		reg &= ~(RX_ENABLE);
1550		he_writel(he_dev, reg, RC_CONFIG);
1551	}
1552
1553#ifdef CONFIG_ATM_HE_USE_SUNI
1554	if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1555		he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1556#endif /* CONFIG_ATM_HE_USE_SUNI */
1557
1558	if (he_dev->irq)
1559		free_irq(he_dev->irq, he_dev);
1560
1561	if (he_dev->irq_base)
1562		pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1563			* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1564
1565	if (he_dev->hsp)
1566		pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1567						he_dev->hsp, he_dev->hsp_phys);
1568
1569	if (he_dev->rbpl_base) {
1570		list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1571			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1572
1573		pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1574			* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1575	}
1576
1577	kfree(he_dev->rbpl_virt);
1578	kfree(he_dev->rbpl_table);
1579
1580	if (he_dev->rbpl_pool)
1581		pci_pool_destroy(he_dev->rbpl_pool);
1582
1583	if (he_dev->rbrq_base)
1584		pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1585							he_dev->rbrq_base, he_dev->rbrq_phys);
1586
1587	if (he_dev->tbrq_base)
1588		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1589							he_dev->tbrq_base, he_dev->tbrq_phys);
1590
1591	if (he_dev->tpdrq_base)
1592		pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1593							he_dev->tpdrq_base, he_dev->tpdrq_phys);
1594
1595	if (he_dev->tpd_pool)
1596		pci_pool_destroy(he_dev->tpd_pool);
1597
1598	if (he_dev->pci_dev) {
1599		pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1600		command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1601		pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1602	}
1603	
1604	if (he_dev->membase)
1605		iounmap(he_dev->membase);
1606}
1607
1608static struct he_tpd *
1609__alloc_tpd(struct he_dev *he_dev)
1610{
1611	struct he_tpd *tpd;
1612	dma_addr_t mapping;
1613
1614	tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1615	if (tpd == NULL)
1616		return NULL;
1617			
1618	tpd->status = TPD_ADDR(mapping);
1619	tpd->reserved = 0; 
1620	tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1621	tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1622	tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1623
1624	return tpd;
1625}
1626
1627#define AAL5_LEN(buf,len) 						\
1628			((((unsigned char *)(buf))[(len)-6] << 8) |	\
1629				(((unsigned char *)(buf))[(len)-5]))
1630
1631/* 2.10.1.2 receive
1632 *
1633 * aal5 packets can optionally return the tcp checksum in the lower
1634 * 16 bits of the crc (RSR0_TCP_CKSUM)
1635 */
1636
1637#define TCP_CKSUM(buf,len) 						\
1638			((((unsigned char *)(buf))[(len)-2] << 8) |	\
1639				(((unsigned char *)(buf))[(len-1)]))
1640
1641static int
1642he_service_rbrq(struct he_dev *he_dev, int group)
1643{
1644	struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1645				((unsigned long)he_dev->rbrq_base |
1646					he_dev->hsp->group[group].rbrq_tail);
1647	unsigned cid, lastcid = -1;
1648	struct sk_buff *skb;
1649	struct atm_vcc *vcc = NULL;
1650	struct he_vcc *he_vcc;
1651	struct he_buff *heb, *next;
1652	int i;
1653	int pdus_assembled = 0;
1654	int updated = 0;
1655
1656	read_lock(&vcc_sklist_lock);
1657	while (he_dev->rbrq_head != rbrq_tail) {
1658		++updated;
1659
1660		HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1661			he_dev->rbrq_head, group,
1662			RBRQ_ADDR(he_dev->rbrq_head),
1663			RBRQ_BUFLEN(he_dev->rbrq_head),
1664			RBRQ_CID(he_dev->rbrq_head),
1665			RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1666			RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1667			RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1668			RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1669			RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1670			RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1671
1672		i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1673		heb = he_dev->rbpl_virt[i];
1674
1675		cid = RBRQ_CID(he_dev->rbrq_head);
1676		if (cid != lastcid)
1677			vcc = __find_vcc(he_dev, cid);
1678		lastcid = cid;
1679
1680		if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1681			hprintk("vcc/he_vcc == NULL  (cid 0x%x)\n", cid);
1682			if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1683				clear_bit(i, he_dev->rbpl_table);
1684				list_del(&heb->entry);
1685				pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1686			}
1687					
1688			goto next_rbrq_entry;
1689		}
1690
1691		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1692			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1693				atomic_inc(&vcc->stats->rx_drop);
1694			goto return_host_buffers;
1695		}
1696
1697		heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1698		clear_bit(i, he_dev->rbpl_table);
1699		list_move_tail(&heb->entry, &he_vcc->buffers);
1700		he_vcc->pdu_len += heb->len;
1701
1702		if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1703			lastcid = -1;
1704			HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1705			wake_up(&he_vcc->rx_waitq);
1706			goto return_host_buffers;
1707		}
1708
1709		if (!RBRQ_END_PDU(he_dev->rbrq_head))
1710			goto next_rbrq_entry;
1711
1712		if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1713				|| RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1714			HPRINTK("%s%s (%d.%d)\n",
1715				RBRQ_CRC_ERR(he_dev->rbrq_head)
1716							? "CRC_ERR " : "",
1717				RBRQ_LEN_ERR(he_dev->rbrq_head)
1718							? "LEN_ERR" : "",
1719							vcc->vpi, vcc->vci);
1720			atomic_inc(&vcc->stats->rx_err);
1721			goto return_host_buffers;
1722		}
1723
1724		skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1725							GFP_ATOMIC);
1726		if (!skb) {
1727			HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1728			goto return_host_buffers;
1729		}
1730
1731		if (rx_skb_reserve > 0)
1732			skb_reserve(skb, rx_skb_reserve);
1733
1734		__net_timestamp(skb);
1735
1736		list_for_each_entry(heb, &he_vcc->buffers, entry)
1737			memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1738
1739		switch (vcc->qos.aal) {
1740			case ATM_AAL0:
1741				/* 2.10.1.5 raw cell receive */
1742				skb->len = ATM_AAL0_SDU;
1743				skb_set_tail_pointer(skb, skb->len);
1744				break;
1745			case ATM_AAL5:
1746				/* 2.10.1.2 aal5 receive */
1747
1748				skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1749				skb_set_tail_pointer(skb, skb->len);
1750#ifdef USE_CHECKSUM_HW
1751				if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1752					skb->ip_summed = CHECKSUM_COMPLETE;
1753					skb->csum = TCP_CKSUM(skb->data,
1754							he_vcc->pdu_len);
1755				}
1756#endif
1757				break;
1758		}
1759
1760#ifdef should_never_happen
1761		if (skb->len > vcc->qos.rxtp.max_sdu)
1762			hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1763#endif
1764
1765#ifdef notdef
1766		ATM_SKB(skb)->vcc = vcc;
1767#endif
1768		spin_unlock(&he_dev->global_lock);
1769		vcc->push(vcc, skb);
1770		spin_lock(&he_dev->global_lock);
1771
1772		atomic_inc(&vcc->stats->rx);
1773
1774return_host_buffers:
1775		++pdus_assembled;
1776
1777		list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1778			pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1779		INIT_LIST_HEAD(&he_vcc->buffers);
1780		he_vcc->pdu_len = 0;
1781
1782next_rbrq_entry:
1783		he_dev->rbrq_head = (struct he_rbrq *)
1784				((unsigned long) he_dev->rbrq_base |
1785					RBRQ_MASK(he_dev->rbrq_head + 1));
1786
1787	}
1788	read_unlock(&vcc_sklist_lock);
1789
1790	if (updated) {
1791		if (updated > he_dev->rbrq_peak)
1792			he_dev->rbrq_peak = updated;
1793
1794		he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1795						G0_RBRQ_H + (group * 16));
1796	}
1797
1798	return pdus_assembled;
1799}
1800
1801static void
1802he_service_tbrq(struct he_dev *he_dev, int group)
1803{
1804	struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1805				((unsigned long)he_dev->tbrq_base |
1806					he_dev->hsp->group[group].tbrq_tail);
1807	struct he_tpd *tpd;
1808	int slot, updated = 0;
1809	struct he_tpd *__tpd;
1810
1811	/* 2.1.6 transmit buffer return queue */
1812
1813	while (he_dev->tbrq_head != tbrq_tail) {
1814		++updated;
1815
1816		HPRINTK("tbrq%d 0x%x%s%s\n",
1817			group,
1818			TBRQ_TPD(he_dev->tbrq_head), 
1819			TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1820			TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1821		tpd = NULL;
1822		list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1823			if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1824				tpd = __tpd;
1825				list_del(&__tpd->entry);
1826				break;
1827			}
1828		}
1829
1830		if (tpd == NULL) {
1831			hprintk("unable to locate tpd for dma buffer %x\n",
1832						TBRQ_TPD(he_dev->tbrq_head));
1833			goto next_tbrq_entry;
1834		}
1835
1836		if (TBRQ_EOS(he_dev->tbrq_head)) {
1837			HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1838				he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1839			if (tpd->vcc)
1840				wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1841
1842			goto next_tbrq_entry;
1843		}
1844
1845		for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1846			if (tpd->iovec[slot].addr)
1847				pci_unmap_single(he_dev->pci_dev,
1848					tpd->iovec[slot].addr,
1849					tpd->iovec[slot].len & TPD_LEN_MASK,
1850							PCI_DMA_TODEVICE);
1851			if (tpd->iovec[slot].len & TPD_LST)
1852				break;
1853				
1854		}
1855
1856		if (tpd->skb) {	/* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1857			if (tpd->vcc && tpd->vcc->pop)
1858				tpd->vcc->pop(tpd->vcc, tpd->skb);
1859			else
1860				dev_kfree_skb_any(tpd->skb);
1861		}
1862
1863next_tbrq_entry:
1864		if (tpd)
1865			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1866		he_dev->tbrq_head = (struct he_tbrq *)
1867				((unsigned long) he_dev->tbrq_base |
1868					TBRQ_MASK(he_dev->tbrq_head + 1));
1869	}
1870
1871	if (updated) {
1872		if (updated > he_dev->tbrq_peak)
1873			he_dev->tbrq_peak = updated;
1874
1875		he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1876						G0_TBRQ_H + (group * 16));
1877	}
1878}
1879
1880static void
1881he_service_rbpl(struct he_dev *he_dev, int group)
1882{
1883	struct he_rbp *new_tail;
1884	struct he_rbp *rbpl_head;
1885	struct he_buff *heb;
1886	dma_addr_t mapping;
1887	int i;
1888	int moved = 0;
1889
1890	rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1891					RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1892
1893	for (;;) {
1894		new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1895						RBPL_MASK(he_dev->rbpl_tail+1));
1896
1897		/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1898		if (new_tail == rbpl_head)
1899			break;
1900
1901		i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1902		if (i > (RBPL_TABLE_SIZE - 1)) {
1903			i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1904			if (i > (RBPL_TABLE_SIZE - 1))
1905				break;
1906		}
1907		he_dev->rbpl_hint = i + 1;
1908
1909		heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1910		if (!heb)
1911			break;
1912		heb->mapping = mapping;
1913		list_add(&heb->entry, &he_dev->rbpl_outstanding);
1914		he_dev->rbpl_virt[i] = heb;
1915		set_bit(i, he_dev->rbpl_table);
1916		new_tail->idx = i << RBP_IDX_OFFSET;
1917		new_tail->phys = mapping + offsetof(struct he_buff, data);
1918
1919		he_dev->rbpl_tail = new_tail;
1920		++moved;
1921	} 
1922
1923	if (moved)
1924		he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1925}
1926
1927static void
1928he_tasklet(unsigned long data)
1929{
1930	unsigned long flags;
1931	struct he_dev *he_dev = (struct he_dev *) data;
1932	int group, type;
1933	int updated = 0;
1934
1935	HPRINTK("tasklet (0x%lx)\n", data);
1936	spin_lock_irqsave(&he_dev->global_lock, flags);
1937
1938	while (he_dev->irq_head != he_dev->irq_tail) {
1939		++updated;
1940
1941		type = ITYPE_TYPE(he_dev->irq_head->isw);
1942		group = ITYPE_GROUP(he_dev->irq_head->isw);
1943
1944		switch (type) {
1945			case ITYPE_RBRQ_THRESH:
1946				HPRINTK("rbrq%d threshold\n", group);
1947				/* fall through */
1948			case ITYPE_RBRQ_TIMER:
1949				if (he_service_rbrq(he_dev, group))
1950					he_service_rbpl(he_dev, group);
1951				break;
1952			case ITYPE_TBRQ_THRESH:
1953				HPRINTK("tbrq%d threshold\n", group);
1954				/* fall through */
1955			case ITYPE_TPD_COMPLETE:
1956				he_service_tbrq(he_dev, group);
1957				break;
1958			case ITYPE_RBPL_THRESH:
1959				he_service_rbpl(he_dev, group);
1960				break;
1961			case ITYPE_RBPS_THRESH:
1962				/* shouldn't happen unless small buffers enabled */
1963				break;
1964			case ITYPE_PHY:
1965				HPRINTK("phy interrupt\n");
1966#ifdef CONFIG_ATM_HE_USE_SUNI
1967				spin_unlock_irqrestore(&he_dev->global_lock, flags);
1968				if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1969					he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1970				spin_lock_irqsave(&he_dev->global_lock, flags);
1971#endif
1972				break;
1973			case ITYPE_OTHER:
1974				switch (type|group) {
1975					case ITYPE_PARITY:
1976						hprintk("parity error\n");
1977						break;
1978					case ITYPE_ABORT:
1979						hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1980						break;
1981				}
1982				break;
1983			case ITYPE_TYPE(ITYPE_INVALID):
1984				/* see 8.1.1 -- check all queues */
1985
1986				HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1987
1988				he_service_rbrq(he_dev, 0);
1989				he_service_rbpl(he_dev, 0);
1990				he_service_tbrq(he_dev, 0);
1991				break;
1992			default:
1993				hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
1994		}
1995
1996		he_dev->irq_head->isw = ITYPE_INVALID;
1997
1998		he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
1999	}
2000
2001	if (updated) {
2002		if (updated > he_dev->irq_peak)
2003			he_dev->irq_peak = updated;
2004
2005		he_writel(he_dev,
2006			IRQ_SIZE(CONFIG_IRQ_SIZE) |
2007			IRQ_THRESH(CONFIG_IRQ_THRESH) |
2008			IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2009		(void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2010	}
2011	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2012}
2013
2014static irqreturn_t
2015he_irq_handler(int irq, void *dev_id)
2016{
2017	unsigned long flags;
2018	struct he_dev *he_dev = (struct he_dev * )dev_id;
2019	int handled = 0;
2020
2021	if (he_dev == NULL)
2022		return IRQ_NONE;
2023
2024	spin_lock_irqsave(&he_dev->global_lock, flags);
2025
2026	he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2027						(*he_dev->irq_tailoffset << 2));
2028
2029	if (he_dev->irq_tail == he_dev->irq_head) {
2030		HPRINTK("tailoffset not updated?\n");
2031		he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2032			((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2033		(void) he_readl(he_dev, INT_FIFO);	/* 8.1.2 controller errata */
2034	}
2035
2036#ifdef DEBUG
2037	if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2038		hprintk("spurious (or shared) interrupt?\n");
2039#endif
2040
2041	if (he_dev->irq_head != he_dev->irq_tail) {
2042		handled = 1;
2043		tasklet_schedule(&he_dev->tasklet);
2044		he_writel(he_dev, INT_CLEAR_A, INT_FIFO);	/* clear interrupt */
2045		(void) he_readl(he_dev, INT_FIFO);		/* flush posted writes */
2046	}
2047	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2048	return IRQ_RETVAL(handled);
2049
2050}
2051
2052static __inline__ void
2053__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2054{
2055	struct he_tpdrq *new_tail;
2056
2057	HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2058					tpd, cid, he_dev->tpdrq_tail);
2059
2060	/* new_tail = he_dev->tpdrq_tail; */
2061	new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2062					TPDRQ_MASK(he_dev->tpdrq_tail+1));
2063
2064	/*
2065	 * check to see if we are about to set the tail == head
2066	 * if true, update the head pointer from the adapter
2067	 * to see if this is really the case (reading the queue
2068	 * head for every enqueue would be unnecessarily slow)
2069	 */
2070
2071	if (new_tail == he_dev->tpdrq_head) {
2072		he_dev->tpdrq_head = (struct he_tpdrq *)
2073			(((unsigned long)he_dev->tpdrq_base) |
2074				TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2075
2076		if (new_tail == he_dev->tpdrq_head) {
2077			int slot;
2078
2079			hprintk("tpdrq full (cid 0x%x)\n", cid);
2080			/*
2081			 * FIXME
2082			 * push tpd onto a transmit backlog queue
2083			 * after service_tbrq, service the backlog
2084			 * for now, we just drop the pdu
2085			 */
2086			for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2087				if (tpd->iovec[slot].addr)
2088					pci_unmap_single(he_dev->pci_dev,
2089						tpd->iovec[slot].addr,
2090						tpd->iovec[slot].len & TPD_LEN_MASK,
2091								PCI_DMA_TODEVICE);
2092			}
2093			if (tpd->skb) {
2094				if (tpd->vcc->pop)
2095					tpd->vcc->pop(tpd->vcc, tpd->skb);
2096				else
2097					dev_kfree_skb_any(tpd->skb);
2098				atomic_inc(&tpd->vcc->stats->tx_err);
2099			}
2100			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2101			return;
2102		}
2103	}
2104
2105	/* 2.1.5 transmit packet descriptor ready queue */
2106	list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2107	he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2108	he_dev->tpdrq_tail->cid = cid;
2109	wmb();
2110
2111	he_dev->tpdrq_tail = new_tail;
2112
2113	he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2114	(void) he_readl(he_dev, TPDRQ_T);		/* flush posted writes */
2115}
2116
2117static int
2118he_open(struct atm_vcc *vcc)
2119{
2120	unsigned long flags;
2121	struct he_dev *he_dev = HE_DEV(vcc->dev);
2122	struct he_vcc *he_vcc;
2123	int err = 0;
2124	unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2125	short vpi = vcc->vpi;
2126	int vci = vcc->vci;
2127
2128	if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2129		return 0;
2130
2131	HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2132
2133	set_bit(ATM_VF_ADDR, &vcc->flags);
2134
2135	cid = he_mkcid(he_dev, vpi, vci);
2136
2137	he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2138	if (he_vcc == NULL) {
2139		hprintk("unable to allocate he_vcc during open\n");
2140		return -ENOMEM;
2141	}
2142
2143	INIT_LIST_HEAD(&he_vcc->buffers);
2144	he_vcc->pdu_len = 0;
2145	he_vcc->rc_index = -1;
2146
2147	init_waitqueue_head(&he_vcc->rx_waitq);
2148	init_waitqueue_head(&he_vcc->tx_waitq);
2149
2150	vcc->dev_data = he_vcc;
2151
2152	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2153		int pcr_goal;
2154
2155		pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2156		if (pcr_goal == 0)
2157			pcr_goal = he_dev->atm_dev->link_rate;
2158		if (pcr_goal < 0)	/* means round down, technically */
2159			pcr_goal = -pcr_goal;
2160
2161		HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2162
2163		switch (vcc->qos.aal) {
2164			case ATM_AAL5:
2165				tsr0_aal = TSR0_AAL5;
2166				tsr4 = TSR4_AAL5;
2167				break;
2168			case ATM_AAL0:
2169				tsr0_aal = TSR0_AAL0_SDU;
2170				tsr4 = TSR4_AAL0_SDU;
2171				break;
2172			default:
2173				err = -EINVAL;
2174				goto open_failed;
2175		}
2176
2177		spin_lock_irqsave(&he_dev->global_lock, flags);
2178		tsr0 = he_readl_tsr0(he_dev, cid);
2179		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2180
2181		if (TSR0_CONN_STATE(tsr0) != 0) {
2182			hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2183			err = -EBUSY;
2184			goto open_failed;
2185		}
2186
2187		switch (vcc->qos.txtp.traffic_class) {
2188			case ATM_UBR:
2189				/* 2.3.3.1 open connection ubr */
2190
2191				tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2192					TSR0_USE_WMIN | TSR0_UPDATE_GER;
2193				break;
2194
2195			case ATM_CBR:
2196				/* 2.3.3.2 open connection cbr */
2197
2198				/* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2199				if ((he_dev->total_bw + pcr_goal)
2200					> (he_dev->atm_dev->link_rate * 9 / 10))
2201				{
2202					err = -EBUSY;
2203					goto open_failed;
2204				}
2205
2206				spin_lock_irqsave(&he_dev->global_lock, flags);			/* also protects he_dev->cs_stper[] */
2207
2208				/* find an unused cs_stper register */
2209				for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2210					if (he_dev->cs_stper[reg].inuse == 0 || 
2211					    he_dev->cs_stper[reg].pcr == pcr_goal)
2212							break;
2213
2214				if (reg == HE_NUM_CS_STPER) {
2215					err = -EBUSY;
2216					spin_unlock_irqrestore(&he_dev->global_lock, flags);
2217					goto open_failed;
2218				}
2219
2220				he_dev->total_bw += pcr_goal;
2221
2222				he_vcc->rc_index = reg;
2223				++he_dev->cs_stper[reg].inuse;
2224				he_dev->cs_stper[reg].pcr = pcr_goal;
2225
2226				clock = he_is622(he_dev) ? 66667000 : 50000000;
2227				period = clock / pcr_goal;
2228				
2229				HPRINTK("rc_index = %d period = %d\n",
2230								reg, period);
2231
2232				he_writel_mbox(he_dev, rate_to_atmf(period/2),
2233							CS_STPER0 + reg);
2234				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2235
2236				tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2237							TSR0_RC_INDEX(reg);
2238
2239				break;
2240			default:
2241				err = -EINVAL;
2242				goto open_failed;
2243		}
2244
2245		spin_lock_irqsave(&he_dev->global_lock, flags);
2246
2247		he_writel_tsr0(he_dev, tsr0, cid);
2248		he_writel_tsr4(he_dev, tsr4 | 1, cid);
2249		he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2250					TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2251		he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2252		he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2253
2254		he_writel_tsr3(he_dev, 0x0, cid);
2255		he_writel_tsr5(he_dev, 0x0, cid);
2256		he_writel_tsr6(he_dev, 0x0, cid);
2257		he_writel_tsr7(he_dev, 0x0, cid);
2258		he_writel_tsr8(he_dev, 0x0, cid);
2259		he_writel_tsr10(he_dev, 0x0, cid);
2260		he_writel_tsr11(he_dev, 0x0, cid);
2261		he_writel_tsr12(he_dev, 0x0, cid);
2262		he_writel_tsr13(he_dev, 0x0, cid);
2263		he_writel_tsr14(he_dev, 0x0, cid);
2264		(void) he_readl_tsr0(he_dev, cid);		/* flush posted writes */
2265		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2266	}
2267
2268	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2269		unsigned aal;
2270
2271		HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2272		 				&HE_VCC(vcc)->rx_waitq);
2273
2274		switch (vcc->qos.aal) {
2275			case ATM_AAL5:
2276				aal = RSR0_AAL5;
2277				break;
2278			case ATM_AAL0:
2279				aal = RSR0_RAWCELL;
2280				break;
2281			default:
2282				err = -EINVAL;
2283				goto open_failed;
2284		}
2285
2286		spin_lock_irqsave(&he_dev->global_lock, flags);
2287
2288		rsr0 = he_readl_rsr0(he_dev, cid);
2289		if (rsr0 & RSR0_OPEN_CONN) {
2290			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2291
2292			hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2293			err = -EBUSY;
2294			goto open_failed;
2295		}
2296
2297		rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2298		rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2299		rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2300				(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2301
2302#ifdef USE_CHECKSUM_HW
2303		if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2304			rsr0 |= RSR0_TCP_CKSUM;
2305#endif
2306
2307		he_writel_rsr4(he_dev, rsr4, cid);
2308		he_writel_rsr1(he_dev, rsr1, cid);
2309		/* 5.1.11 last parameter initialized should be
2310			  the open/closed indication in rsr0 */
2311		he_writel_rsr0(he_dev,
2312			rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2313		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2314
2315		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2316	}
2317
2318open_failed:
2319
2320	if (err) {
2321		kfree(he_vcc);
2322		clear_bit(ATM_VF_ADDR, &vcc->flags);
2323	}
2324	else
2325		set_bit(ATM_VF_READY, &vcc->flags);
2326
2327	return err;
2328}
2329
2330static void
2331he_close(struct atm_vcc *vcc)
2332{
2333	unsigned long flags;
2334	DECLARE_WAITQUEUE(wait, current);
2335	struct he_dev *he_dev = HE_DEV(vcc->dev);
2336	struct he_tpd *tpd;
2337	unsigned cid;
2338	struct he_vcc *he_vcc = HE_VCC(vcc);
2339#define MAX_RETRY 30
2340	int retry = 0, sleep = 1, tx_inuse;
2341
2342	HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2343
2344	clear_bit(ATM_VF_READY, &vcc->flags);
2345	cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2346
2347	if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2348		int timeout;
2349
2350		HPRINTK("close rx cid 0x%x\n", cid);
2351
2352		/* 2.7.2.2 close receive operation */
2353
2354		/* wait for previous close (if any) to finish */
2355
2356		spin_lock_irqsave(&he_dev->global_lock, flags);
2357		while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2358			HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2359			udelay(250);
2360		}
2361
2362		set_current_state(TASK_UNINTERRUPTIBLE);
2363		add_wait_queue(&he_vcc->rx_waitq, &wait);
2364
2365		he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2366		(void) he_readl_rsr0(he_dev, cid);		/* flush posted writes */
2367		he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2368		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2369
2370		timeout = schedule_timeout(30*HZ);
2371
2372		remove_wait_queue(&he_vcc->rx_waitq, &wait);
2373		set_current_state(TASK_RUNNING);
2374
2375		if (timeout == 0)
2376			hprintk("close rx timeout cid 0x%x\n", cid);
2377
2378		HPRINTK("close rx cid 0x%x complete\n", cid);
2379
2380	}
2381
2382	if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2383		volatile unsigned tsr4, tsr0;
2384		int timeout;
2385
2386		HPRINTK("close tx cid 0x%x\n", cid);
2387		
2388		/* 2.1.2
2389		 *
2390		 * ... the host must first stop queueing packets to the TPDRQ
2391		 * on the connection to be closed, then wait for all outstanding
2392		 * packets to be transmitted and their buffers returned to the
2393		 * TBRQ. When the last packet on the connection arrives in the
2394		 * TBRQ, the host issues the close command to the adapter.
2395		 */
2396
2397		while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2398		       (retry < MAX_RETRY)) {
2399			msleep(sleep);
2400			if (sleep < 250)
2401				sleep = sleep * 2;
2402
2403			++retry;
2404		}
2405
2406		if (tx_inuse > 1)
2407			hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2408
2409		/* 2.3.1.1 generic close operations with flush */
2410
2411		spin_lock_irqsave(&he_dev->global_lock, flags);
2412		he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2413					/* also clears TSR4_SESSION_ENDED */
2414
2415		switch (vcc->qos.txtp.traffic_class) {
2416			case ATM_UBR:
2417				he_writel_tsr1(he_dev, 
2418					TSR1_MCR(rate_to_atmf(200000))
2419					| TSR1_PCR(0), cid);
2420				break;
2421			case ATM_CBR:
2422				he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2423				break;
2424		}
2425		(void) he_readl_tsr4(he_dev, cid);		/* flush posted writes */
2426
2427		tpd = __alloc_tpd(he_dev);
2428		if (tpd == NULL) {
2429			hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2430			goto close_tx_incomplete;
2431		}
2432		tpd->status |= TPD_EOS | TPD_INT;
2433		tpd->skb = NULL;
2434		tpd->vcc = vcc;
2435		wmb();
2436
2437		set_current_state(TASK_UNINTERRUPTIBLE);
2438		add_wait_queue(&he_vcc->tx_waitq, &wait);
2439		__enqueue_tpd(he_dev, tpd, cid);
2440		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2441
2442		timeout = schedule_timeout(30*HZ);
2443
2444		remove_wait_queue(&he_vcc->tx_waitq, &wait);
2445		set_current_state(TASK_RUNNING);
2446
2447		spin_lock_irqsave(&he_dev->global_lock, flags);
2448
2449		if (timeout == 0) {
2450			hprintk("close tx timeout cid 0x%x\n", cid);
2451			goto close_tx_incomplete;
2452		}
2453
2454		while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2455			HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2456			udelay(250);
2457		}
2458
2459		while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2460			HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2461			udelay(250);
2462		}
2463
2464close_tx_incomplete:
2465
2466		if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2467			int reg = he_vcc->rc_index;
2468
2469			HPRINTK("cs_stper reg = %d\n", reg);
2470
2471			if (he_dev->cs_stper[reg].inuse == 0)
2472				hprintk("cs_stper[%d].inuse = 0!\n", reg);
2473			else
2474				--he_dev->cs_stper[reg].inuse;
2475
2476			he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2477		}
2478		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2479
2480		HPRINTK("close tx cid 0x%x complete\n", cid);
2481	}
2482
2483	kfree(he_vcc);
2484
2485	clear_bit(ATM_VF_ADDR, &vcc->flags);
2486}
2487
2488static int
2489he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2490{
2491	unsigned long flags;
2492	struct he_dev *he_dev = HE_DEV(vcc->dev);
2493	unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2494	struct he_tpd *tpd;
2495#ifdef USE_SCATTERGATHER
2496	int i, slot = 0;
2497#endif
2498
2499#define HE_TPD_BUFSIZE 0xffff
2500
2501	HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2502
2503	if ((skb->len > HE_TPD_BUFSIZE) ||
2504	    ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2505		hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2506		if (vcc->pop)
2507			vcc->pop(vcc, skb);
2508		else
2509			dev_kfree_skb_any(skb);
2510		atomic_inc(&vcc->stats->tx_err);
2511		return -EINVAL;
2512	}
2513
2514#ifndef USE_SCATTERGATHER
2515	if (skb_shinfo(skb)->nr_frags) {
2516		hprintk("no scatter/gather support\n");
2517		if (vcc->pop)
2518			vcc->pop(vcc, skb);
2519		else
2520			dev_kfree_skb_any(skb);
2521		atomic_inc(&vcc->stats->tx_err);
2522		return -EINVAL;
2523	}
2524#endif
2525	spin_lock_irqsave(&he_dev->global_lock, flags);
2526
2527	tpd = __alloc_tpd(he_dev);
2528	if (tpd == NULL) {
2529		if (vcc->pop)
2530			vcc->pop(vcc, skb);
2531		else
2532			dev_kfree_skb_any(skb);
2533		atomic_inc(&vcc->stats->tx_err);
2534		spin_unlock_irqrestore(&he_dev->global_lock, flags);
2535		return -ENOMEM;
2536	}
2537
2538	if (vcc->qos.aal == ATM_AAL5)
2539		tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2540	else {
2541		char *pti_clp = (void *) (skb->data + 3);
2542		int clp, pti;
2543
2544		pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2545		clp = (*pti_clp & ATM_HDR_CLP);
2546		tpd->status |= TPD_CELLTYPE(pti);
2547		if (clp)
2548			tpd->status |= TPD_CLP;
2549
2550		skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2551	}
2552
2553#ifdef USE_SCATTERGATHER
2554	tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2555				skb_headlen(skb), PCI_DMA_TODEVICE);
2556	tpd->iovec[slot].len = skb_headlen(skb);
2557	++slot;
2558
2559	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2560		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2561
2562		if (slot == TPD_MAXIOV) {	/* queue tpd; start new tpd */
2563			tpd->vcc = vcc;
2564			tpd->skb = NULL;	/* not the last fragment
2565						   so dont ->push() yet */
2566			wmb();
2567
2568			__enqueue_tpd(he_dev, tpd, cid);
2569			tpd = __alloc_tpd(he_dev);
2570			if (tpd == NULL) {
2571				if (vcc->pop)
2572					vcc->pop(vcc, skb);
2573				else
2574					dev_kfree_skb_any(skb);
2575				atomic_inc(&vcc->stats->tx_err);
2576				spin_unlock_irqrestore(&he_dev->global_lock, flags);
2577				return -ENOMEM;
2578			}
2579			tpd->status |= TPD_USERCELL;
2580			slot = 0;
2581		}
2582
2583		tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2584			(void *) page_address(frag->page) + frag->page_offset,
2585				frag->size, PCI_DMA_TODEVICE);
2586		tpd->iovec[slot].len = frag->size;
2587		++slot;
2588
2589	}
2590
2591	tpd->iovec[slot - 1].len |= TPD_LST;
2592#else
2593	tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2594	tpd->length0 = skb->len | TPD_LST;
2595#endif
2596	tpd->status |= TPD_INT;
2597
2598	tpd->vcc = vcc;
2599	tpd->skb = skb;
2600	wmb();
2601	ATM_SKB(skb)->vcc = vcc;
2602
2603	__enqueue_tpd(he_dev, tpd, cid);
2604	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2605
2606	atomic_inc(&vcc->stats->tx);
2607
2608	return 0;
2609}
2610
2611static int
2612he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2613{
2614	unsigned long flags;
2615	struct he_dev *he_dev = HE_DEV(atm_dev);
2616	struct he_ioctl_reg reg;
2617	int err = 0;
2618
2619	switch (cmd) {
2620		case HE_GET_REG:
2621			if (!capable(CAP_NET_ADMIN))
2622				return -EPERM;
2623
2624			if (copy_from_user(&reg, arg,
2625					   sizeof(struct he_ioctl_reg)))
2626				return -EFAULT;
2627
2628			spin_lock_irqsave(&he_dev->global_lock, flags);
2629			switch (reg.type) {
2630				case HE_REGTYPE_PCI:
2631					if (reg.addr >= HE_REGMAP_SIZE) {
2632						err = -EINVAL;
2633						break;
2634					}
2635
2636					reg.val = he_readl(he_dev, reg.addr);
2637					break;
2638				case HE_REGTYPE_RCM:
2639					reg.val =
2640						he_readl_rcm(he_dev, reg.addr);
2641					break;
2642				case HE_REGTYPE_TCM:
2643					reg.val =
2644						he_readl_tcm(he_dev, reg.addr);
2645					break;
2646				case HE_REGTYPE_MBOX:
2647					reg.val =
2648						he_readl_mbox(he_dev, reg.addr);
2649					break;
2650				default:
2651					err = -EINVAL;
2652					break;
2653			}
2654			spin_unlock_irqrestore(&he_dev->global_lock, flags);
2655			if (err == 0)
2656				if (copy_to_user(arg, &reg,
2657							sizeof(struct he_ioctl_reg)))
2658					return -EFAULT;
2659			break;
2660		default:
2661#ifdef CONFIG_ATM_HE_USE_SUNI
2662			if (atm_dev->phy && atm_dev->phy->ioctl)
2663				err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2664#else /* CONFIG_ATM_HE_USE_SUNI */
2665			err = -EINVAL;
2666#endif /* CONFIG_ATM_HE_USE_SUNI */
2667			break;
2668	}
2669
2670	return err;
2671}
2672
2673static void
2674he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2675{
2676	unsigned long flags;
2677	struct he_dev *he_dev = HE_DEV(atm_dev);
2678
2679	HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2680
2681	spin_lock_irqsave(&he_dev->global_lock, flags);
2682	he_writel(he_dev, val, FRAMER + (addr*4));
2683	(void) he_readl(he_dev, FRAMER + (addr*4));		/* flush posted writes */
2684	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2685}
2686 
2687	
2688static unsigned char
2689he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2690{ 
2691	unsigned long flags;
2692	struct he_dev *he_dev = HE_DEV(atm_dev);
2693	unsigned reg;
2694
2695	spin_lock_irqsave(&he_dev->global_lock, flags);
2696	reg = he_readl(he_dev, FRAMER + (addr*4));
2697	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2698
2699	HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2700	return reg;
2701}
2702
2703static int
2704he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2705{
2706	unsigned long flags;
2707	struct he_dev *he_dev = HE_DEV(dev);
2708	int left, i;
2709#ifdef notdef
2710	struct he_rbrq *rbrq_tail;
2711	struct he_tpdrq *tpdrq_head;
2712	int rbpl_head, rbpl_tail;
2713#endif
2714	static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2715
2716
2717	left = *pos;
2718	if (!left--)
2719		return sprintf(page, "ATM he driver\n");
2720
2721	if (!left--)
2722		return sprintf(page, "%s%s\n\n",
2723			he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2724
2725	if (!left--)
2726		return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2727
2728	spin_lock_irqsave(&he_dev->global_lock, flags);
2729	mcc += he_readl(he_dev, MCC);
2730	oec += he_readl(he_dev, OEC);
2731	dcc += he_readl(he_dev, DCC);
2732	cec += he_readl(he_dev, CEC);
2733	spin_unlock_irqrestore(&he_dev->global_lock, flags);
2734
2735	if (!left--)
2736		return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2737							mcc, oec, dcc, cec);
2738
2739	if (!left--)
2740		return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2741				CONFIG_IRQ_SIZE, he_dev->irq_peak);
2742
2743	if (!left--)
2744		return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2745						CONFIG_TPDRQ_SIZE);
2746
2747	if (!left--)
2748		return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2749				CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2750
2751	if (!left--)
2752		return sprintf(page, "tbrq_size = %d  peak = %d\n",
2753					CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2754
2755
2756#ifdef notdef
2757	rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2758	rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2759
2760	inuse = rbpl_head - rbpl_tail;
2761	if (inuse < 0)
2762		inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2763	inuse /= sizeof(struct he_rbp);
2764
2765	if (!left--)
2766		return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2767						CONFIG_RBPL_SIZE, inuse);
2768#endif
2769
2770	if (!left--)
2771		return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2772
2773	for (i = 0; i < HE_NUM_CS_STPER; ++i)
2774		if (!left--)
2775			return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2776						he_dev->cs_stper[i].pcr,
2777						he_dev->cs_stper[i].inuse);
2778
2779	if (!left--)
2780		return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2781			he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2782
2783	return 0;
2784}
2785
2786/* eeprom routines  -- see 4.7 */
2787
2788static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2789{
2790	u32 val = 0, tmp_read = 0;
2791	int i, j = 0;
2792	u8 byte_read = 0;
2793
2794	val = readl(he_dev->membase + HOST_CNTL);
2795	val &= 0xFFFFE0FF;
2796       
2797	/* Turn on write enable */
2798	val |= 0x800;
2799	he_writel(he_dev, val, HOST_CNTL);
2800       
2801	/* Send READ instruction */
2802	for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2803		he_writel(he_dev, val | readtab[i], HOST_CNTL);
2804		udelay(EEPROM_DELAY);
2805	}
2806       
2807	/* Next, we need to send the byte address to read from */
2808	for (i = 7; i >= 0; i--) {
2809		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2810		udelay(EEPROM_DELAY);
2811		he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2812		udelay(EEPROM_DELAY);
2813	}
2814       
2815	j = 0;
2816
2817	val &= 0xFFFFF7FF;      /* Turn off write enable */
2818	he_writel(he_dev, val, HOST_CNTL);
2819       
2820	/* Now, we can read data from the EEPROM by clocking it in */
2821	for (i = 7; i >= 0; i--) {
2822		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2823		udelay(EEPROM_DELAY);
2824		tmp_read = he_readl(he_dev, HOST_CNTL);
2825		byte_read |= (unsigned char)
2826			   ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2827		he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2828		udelay(EEPROM_DELAY);
2829	}
2830       
2831	he_writel(he_dev, val | ID_CS, HOST_CNTL);
2832	udelay(EEPROM_DELAY);
2833
2834	return byte_read;
2835}
2836
2837MODULE_LICENSE("GPL");
2838MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2839MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2840module_param(disable64, bool, 0);
2841MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2842module_param(nvpibits, short, 0);
2843MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2844module_param(nvcibits, short, 0);
2845MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2846module_param(rx_skb_reserve, short, 0);
2847MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2848module_param(irq_coalesce, bool, 0);
2849MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2850module_param(sdh, bool, 0);
2851MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2852
2853static struct pci_device_id he_pci_tbl[] = {
2854	{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2855	{ 0, }
2856};
2857
2858MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2859
2860static struct pci_driver he_driver = {
2861	.name =		"he",
2862	.probe =	he_init_one,
2863	.remove =	he_remove_one,
2864	.id_table =	he_pci_tbl,
2865};
2866
2867module_pci_driver(he_driver);