Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3  A FORE Systems 200E-series driver for ATM on Linux.
   4  Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
   5
   6  Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
   7
   8  This driver simultaneously supports PCA-200E and SBA-200E adapters
   9  on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
  10
  11*/
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/slab.h>
  16#include <linux/init.h>
  17#include <linux/capability.h>
  18#include <linux/interrupt.h>
  19#include <linux/bitops.h>
  20#include <linux/pci.h>
  21#include <linux/module.h>
  22#include <linux/atmdev.h>
  23#include <linux/sonet.h>
 
  24#include <linux/dma-mapping.h>
  25#include <linux/delay.h>
  26#include <linux/firmware.h>
  27#include <linux/pgtable.h>
  28#include <asm/io.h>
  29#include <asm/string.h>
  30#include <asm/page.h>
  31#include <asm/irq.h>
  32#include <asm/dma.h>
  33#include <asm/byteorder.h>
  34#include <linux/uaccess.h>
  35#include <linux/atomic.h>
  36
  37#ifdef CONFIG_SBUS
  38#include <linux/of.h>
  39#include <linux/platform_device.h>
  40#include <asm/idprom.h>
  41#include <asm/openprom.h>
  42#include <asm/oplib.h>
 
  43#endif
  44
  45#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
  46#define FORE200E_USE_TASKLET
  47#endif
  48
  49#if 0 /* enable the debugging code of the buffer supply queues */
  50#define FORE200E_BSQ_DEBUG
  51#endif
  52
  53#if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
  54#define FORE200E_52BYTE_AAL0_SDU
  55#endif
  56
  57#include "fore200e.h"
  58#include "suni.h"
  59
  60#define FORE200E_VERSION "0.3e"
  61
  62#define FORE200E         "fore200e: "
  63
  64#if 0 /* override .config */
  65#define CONFIG_ATM_FORE200E_DEBUG 1
  66#endif
  67#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
  68#define DPRINTK(level, format, args...)  do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
  69                                                  printk(FORE200E format, ##args); } while (0)
  70#else
  71#define DPRINTK(level, format, args...)  do {} while (0)
  72#endif
  73
  74
  75#define FORE200E_ALIGN(addr, alignment) \
  76        ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
  77
  78#define FORE200E_DMA_INDEX(dma_addr, type, index)  ((dma_addr) + (index) * sizeof(type))
  79
  80#define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
  81
  82#define FORE200E_NEXT_ENTRY(index, modulo)         (index = ((index) + 1) % (modulo))
  83
  84#if 1
  85#define ASSERT(expr)     if (!(expr)) { \
  86			     printk(FORE200E "assertion failed! %s[%d]: %s\n", \
  87				    __func__, __LINE__, #expr); \
  88			     panic(FORE200E "%s", __func__); \
  89			 }
  90#else
  91#define ASSERT(expr)     do {} while (0)
  92#endif
  93
  94
  95static const struct atmdev_ops   fore200e_ops;
  96
 
 
 
  97MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
  98MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
 
 
  99
 100static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
 101    { BUFFER_S1_NBR, BUFFER_L1_NBR },
 102    { BUFFER_S2_NBR, BUFFER_L2_NBR }
 103};
 104
 105static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
 106    { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
 107    { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
 108};
 109
 110
 111#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
 112static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
 113#endif
 114
 115
 116#if 0 /* currently unused */
 117static int 
 118fore200e_fore2atm_aal(enum fore200e_aal aal)
 119{
 120    switch(aal) {
 121    case FORE200E_AAL0:  return ATM_AAL0;
 122    case FORE200E_AAL34: return ATM_AAL34;
 123    case FORE200E_AAL5:  return ATM_AAL5;
 124    }
 125
 126    return -EINVAL;
 127}
 128#endif
 129
 130
 131static enum fore200e_aal
 132fore200e_atm2fore_aal(int aal)
 133{
 134    switch(aal) {
 135    case ATM_AAL0:  return FORE200E_AAL0;
 136    case ATM_AAL34: return FORE200E_AAL34;
 137    case ATM_AAL1:
 138    case ATM_AAL2:
 139    case ATM_AAL5:  return FORE200E_AAL5;
 140    }
 141
 142    return -EINVAL;
 143}
 144
 145
 146static char*
 147fore200e_irq_itoa(int irq)
 148{
 149    static char str[8];
 150    sprintf(str, "%d", irq);
 151    return str;
 152}
 153
 154
 155/* allocate and align a chunk of memory intended to hold the data behing exchanged
 156   between the driver and the adapter (using streaming DVMA) */
 157
 158static int
 159fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
 160{
 161    unsigned long offset = 0;
 162
 163    if (alignment <= sizeof(int))
 164	alignment = 0;
 165
 166    chunk->alloc_size = size + alignment;
 167    chunk->direction  = direction;
 168
 169    chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
 170    if (chunk->alloc_addr == NULL)
 171	return -ENOMEM;
 172
 173    if (alignment > 0)
 174	offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 
 175    
 176    chunk->align_addr = chunk->alloc_addr + offset;
 177
 178    chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
 179				     size, direction);
 180    if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
 181	kfree(chunk->alloc_addr);
 182	return -ENOMEM;
 183    }
 184    return 0;
 185}
 186
 187
 188/* free a chunk of memory */
 189
 190static void
 191fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
 192{
 193    dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
 194		     chunk->direction);
 195    kfree(chunk->alloc_addr);
 196}
 197
 198/*
 199 * Allocate a DMA consistent chunk of memory intended to act as a communication
 200 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
 201 * and the adapter.
 202 */
 203static int
 204fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
 205		int size, int nbr, int alignment)
 206{
 207	/* returned chunks are page-aligned */
 208	chunk->alloc_size = size * nbr;
 209	chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
 210					       &chunk->dma_addr, GFP_KERNEL);
 211	if (!chunk->alloc_addr)
 212		return -ENOMEM;
 213	chunk->align_addr = chunk->alloc_addr;
 214	return 0;
 215}
 216
 217/*
 218 * Free a DMA consistent chunk of memory.
 219 */
 220static void
 221fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
 222{
 223	dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
 224			  chunk->dma_addr);
 225}
 226
 227static void
 228fore200e_spin(int msecs)
 229{
 230    unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
 231    while (time_before(jiffies, timeout));
 232}
 233
 234
 235static int
 236fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
 237{
 238    unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
 239    int           ok;
 240
 241    mb();
 242    do {
 243	if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
 244	    break;
 245
 246    } while (time_before(jiffies, timeout));
 247
 248#if 1
 249    if (!ok) {
 250	printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
 251	       *addr, val);
 252    }
 253#endif
 254
 255    return ok;
 256}
 257
 258
 259static int
 260fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
 261{
 262    unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
 263    int           ok;
 264
 265    do {
 266	if ((ok = (fore200e->bus->read(addr) == val)))
 267	    break;
 268
 269    } while (time_before(jiffies, timeout));
 270
 271#if 1
 272    if (!ok) {
 273	printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
 274	       fore200e->bus->read(addr), val);
 275    }
 276#endif
 277
 278    return ok;
 279}
 280
 281
 282static void
 283fore200e_free_rx_buf(struct fore200e* fore200e)
 284{
 285    int scheme, magn, nbr;
 286    struct buffer* buffer;
 287
 288    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
 289	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
 290
 291	    if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
 292
 293		for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
 294
 295		    struct chunk* data = &buffer[ nbr ].data;
 296
 297		    if (data->alloc_addr != NULL)
 298			fore200e_chunk_free(fore200e, data);
 299		}
 300	    }
 301	}
 302    }
 303}
 304
 305
 306static void
 307fore200e_uninit_bs_queue(struct fore200e* fore200e)
 308{
 309    int scheme, magn;
 310    
 311    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
 312	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
 313
 314	    struct chunk* status    = &fore200e->host_bsq[ scheme ][ magn ].status;
 315	    struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
 316	    
 317	    if (status->alloc_addr)
 318		fore200e_dma_chunk_free(fore200e, status);
 319	    
 320	    if (rbd_block->alloc_addr)
 321		fore200e_dma_chunk_free(fore200e, rbd_block);
 322	}
 323    }
 324}
 325
 326
 327static int
 328fore200e_reset(struct fore200e* fore200e, int diag)
 329{
 330    int ok;
 331
 332    fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
 333    
 334    fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
 335
 336    fore200e->bus->reset(fore200e);
 337
 338    if (diag) {
 339	ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
 340	if (ok == 0) {
 341	    
 342	    printk(FORE200E "device %s self-test failed\n", fore200e->name);
 343	    return -ENODEV;
 344	}
 345
 346	printk(FORE200E "device %s self-test passed\n", fore200e->name);
 347	
 348	fore200e->state = FORE200E_STATE_RESET;
 349    }
 350
 351    return 0;
 352}
 353
 354
 355static void
 356fore200e_shutdown(struct fore200e* fore200e)
 357{
 358    printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
 359	   fore200e->name, fore200e->phys_base, 
 360	   fore200e_irq_itoa(fore200e->irq));
 361    
 362    if (fore200e->state > FORE200E_STATE_RESET) {
 363	/* first, reset the board to prevent further interrupts or data transfers */
 364	fore200e_reset(fore200e, 0);
 365    }
 366    
 367    /* then, release all allocated resources */
 368    switch(fore200e->state) {
 369
 370    case FORE200E_STATE_COMPLETE:
 371	kfree(fore200e->stats);
 372
 373	fallthrough;
 374    case FORE200E_STATE_IRQ:
 375	free_irq(fore200e->irq, fore200e->atm_dev);
 376
 377	fallthrough;
 378    case FORE200E_STATE_ALLOC_BUF:
 379	fore200e_free_rx_buf(fore200e);
 380
 381	fallthrough;
 382    case FORE200E_STATE_INIT_BSQ:
 383	fore200e_uninit_bs_queue(fore200e);
 384
 385	fallthrough;
 386    case FORE200E_STATE_INIT_RXQ:
 387	fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
 388	fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
 389
 390	fallthrough;
 391    case FORE200E_STATE_INIT_TXQ:
 392	fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
 393	fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
 394
 395	fallthrough;
 396    case FORE200E_STATE_INIT_CMDQ:
 397	fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
 398
 399	fallthrough;
 400    case FORE200E_STATE_INITIALIZE:
 401	/* nothing to do for that state */
 402
 403    case FORE200E_STATE_START_FW:
 404	/* nothing to do for that state */
 405
 406    case FORE200E_STATE_RESET:
 407	/* nothing to do for that state */
 408
 409    case FORE200E_STATE_MAP:
 410	fore200e->bus->unmap(fore200e);
 411
 412	fallthrough;
 413    case FORE200E_STATE_CONFIGURE:
 414	/* nothing to do for that state */
 415
 416    case FORE200E_STATE_REGISTER:
 417	/* XXX shouldn't we *start* by deregistering the device? */
 418	atm_dev_deregister(fore200e->atm_dev);
 419
 420	fallthrough;
 421    case FORE200E_STATE_BLANK:
 422	/* nothing to do for that state */
 423	break;
 424    }
 425}
 426
 427
 428#ifdef CONFIG_PCI
 429
 430static u32 fore200e_pca_read(volatile u32 __iomem *addr)
 431{
 432    /* on big-endian hosts, the board is configured to convert
 433       the endianess of slave RAM accesses  */
 434    return le32_to_cpu(readl(addr));
 435}
 436
 437
 438static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
 439{
 440    /* on big-endian hosts, the board is configured to convert
 441       the endianess of slave RAM accesses  */
 442    writel(cpu_to_le32(val), addr);
 443}
 444
 445static int
 446fore200e_pca_irq_check(struct fore200e* fore200e)
 447{
 448    /* this is a 1 bit register */
 449    int irq_posted = readl(fore200e->regs.pca.psr);
 450
 451#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
 452    if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
 453	DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
 454    }
 455#endif
 456
 457    return irq_posted;
 458}
 459
 460
 461static void
 462fore200e_pca_irq_ack(struct fore200e* fore200e)
 463{
 464    writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
 465}
 466
 467
 468static void
 469fore200e_pca_reset(struct fore200e* fore200e)
 470{
 471    writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
 472    fore200e_spin(10);
 473    writel(0, fore200e->regs.pca.hcr);
 474}
 475
 476
 477static int fore200e_pca_map(struct fore200e* fore200e)
 478{
 479    DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
 480
 481    fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
 482    
 483    if (fore200e->virt_base == NULL) {
 484	printk(FORE200E "can't map device %s\n", fore200e->name);
 485	return -EFAULT;
 486    }
 487
 488    DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
 489
 490    /* gain access to the PCA specific registers  */
 491    fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
 492    fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
 493    fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
 494
 495    fore200e->state = FORE200E_STATE_MAP;
 496    return 0;
 497}
 498
 499
 500static void
 501fore200e_pca_unmap(struct fore200e* fore200e)
 502{
 503    DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
 504
 505    if (fore200e->virt_base != NULL)
 506	iounmap(fore200e->virt_base);
 507}
 508
 509
 510static int fore200e_pca_configure(struct fore200e *fore200e)
 511{
 512    struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
 513    u8              master_ctrl, latency;
 514
 515    DPRINTK(2, "device %s being configured\n", fore200e->name);
 516
 517    if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
 518	printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
 519	return -EIO;
 520    }
 521
 522    pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
 523
 524    master_ctrl = master_ctrl
 525#if defined(__BIG_ENDIAN)
 526	/* request the PCA board to convert the endianess of slave RAM accesses */
 527	| PCA200E_CTRL_CONVERT_ENDIAN
 528#endif
 529#if 0
 530        | PCA200E_CTRL_DIS_CACHE_RD
 531        | PCA200E_CTRL_DIS_WRT_INVAL
 532        | PCA200E_CTRL_ENA_CONT_REQ_MODE
 533        | PCA200E_CTRL_2_CACHE_WRT_INVAL
 534#endif
 535	| PCA200E_CTRL_LARGE_PCI_BURSTS;
 536    
 537    pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
 538
 539    /* raise latency from 32 (default) to 192, as this seems to prevent NIC
 540       lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
 541       this may impact the performances of other PCI devices on the same bus, though */
 542    latency = 192;
 543    pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
 544
 545    fore200e->state = FORE200E_STATE_CONFIGURE;
 546    return 0;
 547}
 548
 549
 550static int __init
 551fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
 552{
 553    struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
 554    struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
 555    struct prom_opcode      opcode;
 556    int                     ok;
 557    u32                     prom_dma;
 558
 559    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
 560
 561    opcode.opcode = OPCODE_GET_PROM;
 562    opcode.pad    = 0;
 563
 564    prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
 565			      DMA_FROM_DEVICE);
 566    if (dma_mapping_error(fore200e->dev, prom_dma))
 567	return -ENOMEM;
 568
 569    fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
 570    
 571    *entry->status = STATUS_PENDING;
 572
 573    fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
 574
 575    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
 576
 577    *entry->status = STATUS_FREE;
 578
 579    dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
 580
 581    if (ok == 0) {
 582	printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
 583	return -EIO;
 584    }
 585
 586#if defined(__BIG_ENDIAN)
 587    
 588#define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
 589
 590    /* MAC address is stored as little-endian */
 591    swap_here(&prom->mac_addr[0]);
 592    swap_here(&prom->mac_addr[4]);
 593#endif
 594    
 595    return 0;
 596}
 597
 598
 599static int
 600fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
 601{
 602    struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
 603
 604    return sprintf(page, "   PCI bus/slot/function:\t%d/%d/%d\n",
 605		   pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
 606}
 607
 608static const struct fore200e_bus fore200e_pci_ops = {
 609	.model_name		= "PCA-200E",
 610	.proc_name		= "pca200e",
 611	.descr_alignment	= 32,
 612	.buffer_alignment	= 4,
 613	.status_alignment	= 32,
 614	.read			= fore200e_pca_read,
 615	.write			= fore200e_pca_write,
 616	.configure		= fore200e_pca_configure,
 617	.map			= fore200e_pca_map,
 618	.reset			= fore200e_pca_reset,
 619	.prom_read		= fore200e_pca_prom_read,
 620	.unmap			= fore200e_pca_unmap,
 621	.irq_check		= fore200e_pca_irq_check,
 622	.irq_ack		= fore200e_pca_irq_ack,
 623	.proc_read		= fore200e_pca_proc_read,
 624};
 625#endif /* CONFIG_PCI */
 626
 627#ifdef CONFIG_SBUS
 628
 629static u32 fore200e_sba_read(volatile u32 __iomem *addr)
 630{
 631    return sbus_readl(addr);
 632}
 633
 634static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
 635{
 636    sbus_writel(val, addr);
 637}
 638
 639static void fore200e_sba_irq_enable(struct fore200e *fore200e)
 640{
 641	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
 642	fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
 643}
 644
 645static int fore200e_sba_irq_check(struct fore200e *fore200e)
 646{
 647	return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
 648}
 649
 650static void fore200e_sba_irq_ack(struct fore200e *fore200e)
 651{
 652	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
 653	fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
 654}
 655
 656static void fore200e_sba_reset(struct fore200e *fore200e)
 657{
 658	fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
 659	fore200e_spin(10);
 660	fore200e->bus->write(0, fore200e->regs.sba.hcr);
 661}
 662
 663static int __init fore200e_sba_map(struct fore200e *fore200e)
 664{
 665	struct platform_device *op = to_platform_device(fore200e->dev);
 666	unsigned int bursts;
 667
 668	/* gain access to the SBA specific registers  */
 669	fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
 670	fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
 671	fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
 672	fore200e->virt_base    = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
 673
 674	if (!fore200e->virt_base) {
 675		printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
 676		return -EFAULT;
 677	}
 678
 679	DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
 680    
 681	fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
 682
 683	/* get the supported DVMA burst sizes */
 684	bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
 685
 686	if (sbus_can_dma_64bit())
 687		sbus_set_sbus64(&op->dev, bursts);
 688
 689	fore200e->state = FORE200E_STATE_MAP;
 690	return 0;
 691}
 692
 693static void fore200e_sba_unmap(struct fore200e *fore200e)
 694{
 695	struct platform_device *op = to_platform_device(fore200e->dev);
 696
 697	of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
 698	of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
 699	of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
 700	of_iounmap(&op->resource[3], fore200e->virt_base,    SBA200E_RAM_LENGTH);
 701}
 702
 703static int __init fore200e_sba_configure(struct fore200e *fore200e)
 704{
 705	fore200e->state = FORE200E_STATE_CONFIGURE;
 706	return 0;
 707}
 708
 709static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
 710{
 711	struct platform_device *op = to_platform_device(fore200e->dev);
 712	const u8 *prop;
 713	int len;
 714
 715	prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
 716	if (!prop)
 717		return -ENODEV;
 718	memcpy(&prom->mac_addr[4], prop, 4);
 719
 720	prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
 721	if (!prop)
 722		return -ENODEV;
 723	memcpy(&prom->mac_addr[2], prop, 4);
 724
 725	prom->serial_number = of_getintprop_default(op->dev.of_node,
 726						    "serialnumber", 0);
 727	prom->hw_revision = of_getintprop_default(op->dev.of_node,
 728						  "promversion", 0);
 729    
 730	return 0;
 731}
 732
 733static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
 734{
 735	struct platform_device *op = to_platform_device(fore200e->dev);
 736	const struct linux_prom_registers *regs;
 737
 738	regs = of_get_property(op->dev.of_node, "reg", NULL);
 739
 740	return sprintf(page, "   SBUS slot/device:\t\t%d/'%pOFn'\n",
 741		       (regs ? regs->which_io : 0), op->dev.of_node);
 742}
 743
 744static const struct fore200e_bus fore200e_sbus_ops = {
 745	.model_name		= "SBA-200E",
 746	.proc_name		= "sba200e",
 747	.descr_alignment	= 32,
 748	.buffer_alignment	= 64,
 749	.status_alignment	= 32,
 750	.read			= fore200e_sba_read,
 751	.write			= fore200e_sba_write,
 752	.configure		= fore200e_sba_configure,
 753	.map			= fore200e_sba_map,
 754	.reset			= fore200e_sba_reset,
 755	.prom_read		= fore200e_sba_prom_read,
 756	.unmap			= fore200e_sba_unmap,
 757	.irq_enable		= fore200e_sba_irq_enable,
 758	.irq_check		= fore200e_sba_irq_check,
 759	.irq_ack		= fore200e_sba_irq_ack,
 760	.proc_read		= fore200e_sba_proc_read,
 761};
 762#endif /* CONFIG_SBUS */
 763
 764static void
 765fore200e_tx_irq(struct fore200e* fore200e)
 766{
 767    struct host_txq*        txq = &fore200e->host_txq;
 768    struct host_txq_entry*  entry;
 769    struct atm_vcc*         vcc;
 770    struct fore200e_vc_map* vc_map;
 771
 772    if (fore200e->host_txq.txing == 0)
 773	return;
 774
 775    for (;;) {
 776	
 777	entry = &txq->host_entry[ txq->tail ];
 778
 779        if ((*entry->status & STATUS_COMPLETE) == 0) {
 780	    break;
 781	}
 782
 783	DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 
 784		entry, txq->tail, entry->vc_map, entry->skb);
 785
 786	/* free copy of misaligned data */
 787	kfree(entry->data);
 788	
 789	/* remove DMA mapping */
 790	dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
 791				 DMA_TO_DEVICE);
 792
 793	vc_map = entry->vc_map;
 794
 795	/* vcc closed since the time the entry was submitted for tx? */
 796	if ((vc_map->vcc == NULL) ||
 797	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
 798
 799	    DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
 800		    fore200e->atm_dev->number);
 801
 802	    dev_kfree_skb_any(entry->skb);
 803	}
 804	else {
 805	    ASSERT(vc_map->vcc);
 806
 807	    /* vcc closed then immediately re-opened? */
 808	    if (vc_map->incarn != entry->incarn) {
 809
 810		/* when a vcc is closed, some PDUs may be still pending in the tx queue.
 811		   if the same vcc is immediately re-opened, those pending PDUs must
 812		   not be popped after the completion of their emission, as they refer
 813		   to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
 814		   would be decremented by the size of the (unrelated) skb, possibly
 815		   leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
 816		   we thus bind the tx entry to the current incarnation of the vcc
 817		   when the entry is submitted for tx. When the tx later completes,
 818		   if the incarnation number of the tx entry does not match the one
 819		   of the vcc, then this implies that the vcc has been closed then re-opened.
 820		   we thus just drop the skb here. */
 821
 822		DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
 823			fore200e->atm_dev->number);
 824
 825		dev_kfree_skb_any(entry->skb);
 826	    }
 827	    else {
 828		vcc = vc_map->vcc;
 829		ASSERT(vcc);
 830
 831		/* notify tx completion */
 832		if (vcc->pop) {
 833		    vcc->pop(vcc, entry->skb);
 834		}
 835		else {
 836		    dev_kfree_skb_any(entry->skb);
 837		}
 838
 839		/* check error condition */
 840		if (*entry->status & STATUS_ERROR)
 841		    atomic_inc(&vcc->stats->tx_err);
 842		else
 843		    atomic_inc(&vcc->stats->tx);
 844	    }
 845	}
 846
 847	*entry->status = STATUS_FREE;
 848
 849	fore200e->host_txq.txing--;
 850
 851	FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
 852    }
 853}
 854
 855
 856#ifdef FORE200E_BSQ_DEBUG
 857int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
 858{
 859    struct buffer* buffer;
 860    int count = 0;
 861
 862    buffer = bsq->freebuf;
 863    while (buffer) {
 864
 865	if (buffer->supplied) {
 866	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
 867		   where, scheme, magn, buffer->index);
 868	}
 869
 870	if (buffer->magn != magn) {
 871	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
 872		   where, scheme, magn, buffer->index, buffer->magn);
 873	}
 874
 875	if (buffer->scheme != scheme) {
 876	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
 877		   where, scheme, magn, buffer->index, buffer->scheme);
 878	}
 879
 880	if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
 881	    printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
 882		   where, scheme, magn, buffer->index);
 883	}
 884
 885	count++;
 886	buffer = buffer->next;
 887    }
 888
 889    if (count != bsq->freebuf_count) {
 890	printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
 891	       where, scheme, magn, count, bsq->freebuf_count);
 892    }
 893    return 0;
 894}
 895#endif
 896
 897
 898static void
 899fore200e_supply(struct fore200e* fore200e)
 900{
 901    int  scheme, magn, i;
 902
 903    struct host_bsq*       bsq;
 904    struct host_bsq_entry* entry;
 905    struct buffer*         buffer;
 906
 907    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
 908	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
 909
 910	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
 911
 912#ifdef FORE200E_BSQ_DEBUG
 913	    bsq_audit(1, bsq, scheme, magn);
 914#endif
 915	    while (bsq->freebuf_count >= RBD_BLK_SIZE) {
 916
 917		DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
 918			RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
 919
 920		entry = &bsq->host_entry[ bsq->head ];
 921
 922		for (i = 0; i < RBD_BLK_SIZE; i++) {
 923
 924		    /* take the first buffer in the free buffer list */
 925		    buffer = bsq->freebuf;
 926		    if (!buffer) {
 927			printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
 928			       scheme, magn, bsq->freebuf_count);
 929			return;
 930		    }
 931		    bsq->freebuf = buffer->next;
 932		    
 933#ifdef FORE200E_BSQ_DEBUG
 934		    if (buffer->supplied)
 935			printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
 936			       scheme, magn, buffer->index);
 937		    buffer->supplied = 1;
 938#endif
 939		    entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
 940		    entry->rbd_block->rbd[ i ].handle       = FORE200E_BUF2HDL(buffer);
 941		}
 942
 943		FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
 944
 945 		/* decrease accordingly the number of free rx buffers */
 946		bsq->freebuf_count -= RBD_BLK_SIZE;
 947
 948		*entry->status = STATUS_PENDING;
 949		fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
 950	    }
 951	}
 952    }
 953}
 954
 955
 956static int
 957fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
 958{
 959    struct sk_buff*      skb;
 960    struct buffer*       buffer;
 961    struct fore200e_vcc* fore200e_vcc;
 962    int                  i, pdu_len = 0;
 963#ifdef FORE200E_52BYTE_AAL0_SDU
 964    u32                  cell_header = 0;
 965#endif
 966
 967    ASSERT(vcc);
 968    
 969    fore200e_vcc = FORE200E_VCC(vcc);
 970    ASSERT(fore200e_vcc);
 971
 972#ifdef FORE200E_52BYTE_AAL0_SDU
 973    if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
 974
 975	cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
 976	              (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
 977                      (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
 978                      (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 
 979                       rpd->atm_header.clp;
 980	pdu_len = 4;
 981    }
 982#endif
 983    
 984    /* compute total PDU length */
 985    for (i = 0; i < rpd->nseg; i++)
 986	pdu_len += rpd->rsd[ i ].length;
 987    
 988    skb = alloc_skb(pdu_len, GFP_ATOMIC);
 989    if (skb == NULL) {
 990	DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
 991
 992	atomic_inc(&vcc->stats->rx_drop);
 993	return -ENOMEM;
 994    } 
 995
 996    __net_timestamp(skb);
 997    
 998#ifdef FORE200E_52BYTE_AAL0_SDU
 999    if (cell_header) {
1000	*((u32*)skb_put(skb, 4)) = cell_header;
1001    }
1002#endif
1003
1004    /* reassemble segments */
1005    for (i = 0; i < rpd->nseg; i++) {
1006	
1007	/* rebuild rx buffer address from rsd handle */
1008	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1009	
1010	/* Make device DMA transfer visible to CPU.  */
1011	dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
1012				rpd->rsd[i].length, DMA_FROM_DEVICE);
1013	
1014	skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1015
1016	/* Now let the device get at it again.  */
1017	dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
1018				   rpd->rsd[i].length, DMA_FROM_DEVICE);
1019    }
1020
1021    DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1022    
1023    if (pdu_len < fore200e_vcc->rx_min_pdu)
1024	fore200e_vcc->rx_min_pdu = pdu_len;
1025    if (pdu_len > fore200e_vcc->rx_max_pdu)
1026	fore200e_vcc->rx_max_pdu = pdu_len;
1027    fore200e_vcc->rx_pdu++;
1028
1029    /* push PDU */
1030    if (atm_charge(vcc, skb->truesize) == 0) {
1031
1032	DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1033		vcc->itf, vcc->vpi, vcc->vci);
1034
1035	dev_kfree_skb_any(skb);
1036
1037	atomic_inc(&vcc->stats->rx_drop);
1038	return -ENOMEM;
1039    }
1040
1041    vcc->push(vcc, skb);
1042    atomic_inc(&vcc->stats->rx);
1043
1044    return 0;
1045}
1046
1047
1048static void
1049fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1050{
1051    struct host_bsq* bsq;
1052    struct buffer*   buffer;
1053    int              i;
1054    
1055    for (i = 0; i < rpd->nseg; i++) {
1056
1057	/* rebuild rx buffer address from rsd handle */
1058	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1059
1060	bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1061
1062#ifdef FORE200E_BSQ_DEBUG
1063	bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1064
1065	if (buffer->supplied == 0)
1066	    printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1067		   buffer->scheme, buffer->magn, buffer->index);
1068	buffer->supplied = 0;
1069#endif
1070
1071	/* re-insert the buffer into the free buffer list */
1072	buffer->next = bsq->freebuf;
1073	bsq->freebuf = buffer;
1074
1075	/* then increment the number of free rx buffers */
1076	bsq->freebuf_count++;
1077    }
1078}
1079
1080
1081static void
1082fore200e_rx_irq(struct fore200e* fore200e)
1083{
1084    struct host_rxq*        rxq = &fore200e->host_rxq;
1085    struct host_rxq_entry*  entry;
1086    struct atm_vcc*         vcc;
1087    struct fore200e_vc_map* vc_map;
1088
1089    for (;;) {
1090	
1091	entry = &rxq->host_entry[ rxq->head ];
1092
1093	/* no more received PDUs */
1094	if ((*entry->status & STATUS_COMPLETE) == 0)
1095	    break;
1096
1097	vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1098
1099	if ((vc_map->vcc == NULL) ||
1100	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1101
1102	    DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1103		    fore200e->atm_dev->number,
1104		    entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1105	}
1106	else {
1107	    vcc = vc_map->vcc;
1108	    ASSERT(vcc);
1109
1110	    if ((*entry->status & STATUS_ERROR) == 0) {
1111
1112		fore200e_push_rpd(fore200e, vcc, entry->rpd);
1113	    }
1114	    else {
1115		DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1116			fore200e->atm_dev->number,
1117			entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1118		atomic_inc(&vcc->stats->rx_err);
1119	    }
1120	}
1121
1122	FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1123
1124	fore200e_collect_rpd(fore200e, entry->rpd);
1125
1126	/* rewrite the rpd address to ack the received PDU */
1127	fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1128	*entry->status = STATUS_FREE;
1129
1130	fore200e_supply(fore200e);
1131    }
1132}
1133
1134
1135#ifndef FORE200E_USE_TASKLET
1136static void
1137fore200e_irq(struct fore200e* fore200e)
1138{
1139    unsigned long flags;
1140
1141    spin_lock_irqsave(&fore200e->q_lock, flags);
1142    fore200e_rx_irq(fore200e);
1143    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1144
1145    spin_lock_irqsave(&fore200e->q_lock, flags);
1146    fore200e_tx_irq(fore200e);
1147    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1148}
1149#endif
1150
1151
1152static irqreturn_t
1153fore200e_interrupt(int irq, void* dev)
1154{
1155    struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1156
1157    if (fore200e->bus->irq_check(fore200e) == 0) {
1158	
1159	DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1160	return IRQ_NONE;
1161    }
1162    DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1163
1164#ifdef FORE200E_USE_TASKLET
1165    tasklet_schedule(&fore200e->tx_tasklet);
1166    tasklet_schedule(&fore200e->rx_tasklet);
1167#else
1168    fore200e_irq(fore200e);
1169#endif
1170    
1171    fore200e->bus->irq_ack(fore200e);
1172    return IRQ_HANDLED;
1173}
1174
1175
1176#ifdef FORE200E_USE_TASKLET
1177static void
1178fore200e_tx_tasklet(unsigned long data)
1179{
1180    struct fore200e* fore200e = (struct fore200e*) data;
1181    unsigned long flags;
1182
1183    DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1184
1185    spin_lock_irqsave(&fore200e->q_lock, flags);
1186    fore200e_tx_irq(fore200e);
1187    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1188}
1189
1190
1191static void
1192fore200e_rx_tasklet(unsigned long data)
1193{
1194    struct fore200e* fore200e = (struct fore200e*) data;
1195    unsigned long    flags;
1196
1197    DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1198
1199    spin_lock_irqsave(&fore200e->q_lock, flags);
1200    fore200e_rx_irq((struct fore200e*) data);
1201    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1202}
1203#endif
1204
1205
1206static int
1207fore200e_select_scheme(struct atm_vcc* vcc)
1208{
1209    /* fairly balance the VCs over (identical) buffer schemes */
1210    int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1211
1212    DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1213	    vcc->itf, vcc->vpi, vcc->vci, scheme);
1214
1215    return scheme;
1216}
1217
1218
1219static int 
1220fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1221{
1222    struct host_cmdq*        cmdq  = &fore200e->host_cmdq;
1223    struct host_cmdq_entry*  entry = &cmdq->host_entry[ cmdq->head ];
1224    struct activate_opcode   activ_opcode;
1225    struct deactivate_opcode deactiv_opcode;
1226    struct vpvc              vpvc;
1227    int                      ok;
1228    enum fore200e_aal        aal = fore200e_atm2fore_aal(vcc->qos.aal);
1229
1230    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1231    
1232    if (activate) {
1233	FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1234	
1235	activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1236	activ_opcode.aal    = aal;
1237	activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1238	activ_opcode.pad    = 0;
1239    }
1240    else {
1241	deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1242	deactiv_opcode.pad    = 0;
1243    }
1244
1245    vpvc.vci = vcc->vci;
1246    vpvc.vpi = vcc->vpi;
1247
1248    *entry->status = STATUS_PENDING;
1249
1250    if (activate) {
1251
1252#ifdef FORE200E_52BYTE_AAL0_SDU
1253	mtu = 48;
1254#endif
1255	/* the MTU is not used by the cp, except in the case of AAL0 */
1256	fore200e->bus->write(mtu,                        &entry->cp_entry->cmd.activate_block.mtu);
1257	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1258	fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1259    }
1260    else {
1261	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1262	fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1263    }
1264
1265    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1266
1267    *entry->status = STATUS_FREE;
1268
1269    if (ok == 0) {
1270	printk(FORE200E "unable to %s VC %d.%d.%d\n",
1271	       activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1272	return -EIO;
1273    }
1274
1275    DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 
1276	    activate ? "open" : "clos");
1277
1278    return 0;
1279}
1280
1281
1282#define FORE200E_MAX_BACK2BACK_CELLS 255    /* XXX depends on CDVT */
1283
1284static void
1285fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1286{
1287    if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1288    
1289	/* compute the data cells to idle cells ratio from the tx PCR */
1290	rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1291	rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1292    }
1293    else {
1294	/* disable rate control */
1295	rate->data_cells = rate->idle_cells = 0;
1296    }
1297}
1298
1299
1300static int
1301fore200e_open(struct atm_vcc *vcc)
1302{
1303    struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1304    struct fore200e_vcc*    fore200e_vcc;
1305    struct fore200e_vc_map* vc_map;
1306    unsigned long	    flags;
1307    int			    vci = vcc->vci;
1308    short		    vpi = vcc->vpi;
1309
1310    ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1311    ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1312
1313    spin_lock_irqsave(&fore200e->q_lock, flags);
1314
1315    vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1316    if (vc_map->vcc) {
1317
1318	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1319
1320	printk(FORE200E "VC %d.%d.%d already in use\n",
1321	       fore200e->atm_dev->number, vpi, vci);
1322
1323	return -EINVAL;
1324    }
1325
1326    vc_map->vcc = vcc;
1327
1328    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1329
1330    fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1331    if (fore200e_vcc == NULL) {
1332	vc_map->vcc = NULL;
1333	return -ENOMEM;
1334    }
1335
1336    DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1337	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1338	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1339	    fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1340	    vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1341	    fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1342	    vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1343    
1344    /* pseudo-CBR bandwidth requested? */
1345    if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1346	
1347	mutex_lock(&fore200e->rate_mtx);
1348	if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1349	    mutex_unlock(&fore200e->rate_mtx);
1350
1351	    kfree(fore200e_vcc);
1352	    vc_map->vcc = NULL;
1353	    return -EAGAIN;
1354	}
1355
1356	/* reserve bandwidth */
1357	fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1358	mutex_unlock(&fore200e->rate_mtx);
1359    }
1360    
1361    vcc->itf = vcc->dev->number;
1362
1363    set_bit(ATM_VF_PARTIAL,&vcc->flags);
1364    set_bit(ATM_VF_ADDR, &vcc->flags);
1365
1366    vcc->dev_data = fore200e_vcc;
1367    
1368    if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1369
1370	vc_map->vcc = NULL;
1371
1372	clear_bit(ATM_VF_ADDR, &vcc->flags);
1373	clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1374
1375	vcc->dev_data = NULL;
1376
1377	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1378
1379	kfree(fore200e_vcc);
1380	return -EINVAL;
1381    }
1382    
1383    /* compute rate control parameters */
1384    if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1385	
1386	fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1387	set_bit(ATM_VF_HASQOS, &vcc->flags);
1388
1389	DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1390		vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1391		vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 
1392		fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1393    }
1394    
1395    fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1396    fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1397    fore200e_vcc->tx_pdu     = fore200e_vcc->rx_pdu     = 0;
1398
1399    /* new incarnation of the vcc */
1400    vc_map->incarn = ++fore200e->incarn_count;
1401
1402    /* VC unusable before this flag is set */
1403    set_bit(ATM_VF_READY, &vcc->flags);
1404
1405    return 0;
1406}
1407
1408
1409static void
1410fore200e_close(struct atm_vcc* vcc)
1411{
 
1412    struct fore200e_vcc*    fore200e_vcc;
1413    struct fore200e*        fore200e;
1414    struct fore200e_vc_map* vc_map;
1415    unsigned long           flags;
1416
1417    ASSERT(vcc);
1418    fore200e = FORE200E_DEV(vcc->dev);
1419
1420    ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1421    ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1422
1423    DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1424
1425    clear_bit(ATM_VF_READY, &vcc->flags);
1426
1427    fore200e_activate_vcin(fore200e, 0, vcc, 0);
1428
1429    spin_lock_irqsave(&fore200e->q_lock, flags);
1430
1431    vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1432
1433    /* the vc is no longer considered as "in use" by fore200e_open() */
1434    vc_map->vcc = NULL;
1435
1436    vcc->itf = vcc->vci = vcc->vpi = 0;
1437
1438    fore200e_vcc = FORE200E_VCC(vcc);
1439    vcc->dev_data = NULL;
1440
1441    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1442
1443    /* release reserved bandwidth, if any */
1444    if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1445
1446	mutex_lock(&fore200e->rate_mtx);
1447	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1448	mutex_unlock(&fore200e->rate_mtx);
1449
1450	clear_bit(ATM_VF_HASQOS, &vcc->flags);
1451    }
1452
1453    clear_bit(ATM_VF_ADDR, &vcc->flags);
1454    clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1455
1456    ASSERT(fore200e_vcc);
1457    kfree(fore200e_vcc);
1458}
1459
1460
1461static int
1462fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1463{
1464    struct fore200e*        fore200e;
1465    struct fore200e_vcc*    fore200e_vcc;
1466    struct fore200e_vc_map* vc_map;
1467    struct host_txq*        txq;
1468    struct host_txq_entry*  entry;
1469    struct tpd*             tpd;
1470    struct tpd_haddr        tpd_haddr;
1471    int                     retry        = CONFIG_ATM_FORE200E_TX_RETRY;
1472    int                     tx_copy      = 0;
1473    int                     tx_len       = skb->len;
1474    u32*                    cell_header  = NULL;
1475    unsigned char*          skb_data;
1476    int                     skb_len;
1477    unsigned char*          data;
1478    unsigned long           flags;
1479
1480    if (!vcc)
1481        return -EINVAL;
1482
1483    fore200e = FORE200E_DEV(vcc->dev);
1484    fore200e_vcc = FORE200E_VCC(vcc);
1485
1486    if (!fore200e)
1487        return -EINVAL;
1488
1489    txq = &fore200e->host_txq;
1490    if (!fore200e_vcc)
1491        return -EINVAL;
1492
1493    if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1494	DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1495	dev_kfree_skb_any(skb);
1496	return -EINVAL;
1497    }
1498
1499#ifdef FORE200E_52BYTE_AAL0_SDU
1500    if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1501	cell_header = (u32*) skb->data;
1502	skb_data    = skb->data + 4;    /* skip 4-byte cell header */
1503	skb_len     = tx_len = skb->len  - 4;
1504
1505	DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1506    }
1507    else 
1508#endif
1509    {
1510	skb_data = skb->data;
1511	skb_len  = skb->len;
1512    }
1513    
1514    if (((unsigned long)skb_data) & 0x3) {
1515
1516	DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1517	tx_copy = 1;
1518	tx_len  = skb_len;
1519    }
1520
1521    if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1522
1523        /* this simply NUKES the PCA board */
1524	DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1525	tx_copy = 1;
1526	tx_len  = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1527    }
1528    
1529    if (tx_copy) {
1530	data = kmalloc(tx_len, GFP_ATOMIC);
1531	if (data == NULL) {
1532	    if (vcc->pop) {
1533		vcc->pop(vcc, skb);
1534	    }
1535	    else {
1536		dev_kfree_skb_any(skb);
1537	    }
1538	    return -ENOMEM;
1539	}
1540
1541	memcpy(data, skb_data, skb_len);
1542	if (skb_len < tx_len)
1543	    memset(data + skb_len, 0x00, tx_len - skb_len);
1544    }
1545    else {
1546	data = skb_data;
1547    }
1548
1549    vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1550    ASSERT(vc_map->vcc == vcc);
1551
1552  retry_here:
1553
1554    spin_lock_irqsave(&fore200e->q_lock, flags);
1555
1556    entry = &txq->host_entry[ txq->head ];
1557
1558    if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1559
1560	/* try to free completed tx queue entries */
1561	fore200e_tx_irq(fore200e);
1562
1563	if (*entry->status != STATUS_FREE) {
1564
1565	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1566
1567	    /* retry once again? */
1568	    if (--retry > 0) {
1569		udelay(50);
1570		goto retry_here;
1571	    }
1572
1573	    atomic_inc(&vcc->stats->tx_err);
1574
1575	    fore200e->tx_sat++;
1576	    DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1577		    fore200e->name, fore200e->cp_queues->heartbeat);
1578	    if (vcc->pop) {
1579		vcc->pop(vcc, skb);
1580	    }
1581	    else {
1582		dev_kfree_skb_any(skb);
1583	    }
1584
1585	    if (tx_copy)
1586		kfree(data);
1587
1588	    return -ENOBUFS;
1589	}
1590    }
1591
1592    entry->incarn = vc_map->incarn;
1593    entry->vc_map = vc_map;
1594    entry->skb    = skb;
1595    entry->data   = tx_copy ? data : NULL;
1596
1597    tpd = entry->tpd;
1598    tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
1599					  DMA_TO_DEVICE);
1600    if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
1601	if (tx_copy)
1602	    kfree(data);
1603	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1604	return -ENOMEM;
1605    }
1606    tpd->tsd[ 0 ].length = tx_len;
1607
1608    FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1609    txq->txing++;
1610
1611    /* The dma_map call above implies a dma_sync so the device can use it,
1612     * thus no explicit dma_sync call is necessary here.
1613     */
1614    
1615    DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 
1616	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1617	    tpd->tsd[0].length, skb_len);
1618
1619    if (skb_len < fore200e_vcc->tx_min_pdu)
1620	fore200e_vcc->tx_min_pdu = skb_len;
1621    if (skb_len > fore200e_vcc->tx_max_pdu)
1622	fore200e_vcc->tx_max_pdu = skb_len;
1623    fore200e_vcc->tx_pdu++;
1624
1625    /* set tx rate control information */
1626    tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1627    tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1628
1629    if (cell_header) {
1630	tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1631	tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1632	tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1633	tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1634	tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1635    }
1636    else {
1637	/* set the ATM header, common to all cells conveying the PDU */
1638	tpd->atm_header.clp = 0;
1639	tpd->atm_header.plt = 0;
1640	tpd->atm_header.vci = vcc->vci;
1641	tpd->atm_header.vpi = vcc->vpi;
1642	tpd->atm_header.gfc = 0;
1643    }
1644
1645    tpd->spec.length = tx_len;
1646    tpd->spec.nseg   = 1;
1647    tpd->spec.aal    = fore200e_atm2fore_aal(vcc->qos.aal);
1648    tpd->spec.intr   = 1;
1649
1650    tpd_haddr.size  = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT);  /* size is expressed in 32 byte blocks */
1651    tpd_haddr.pad   = 0;
1652    tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;          /* shift the address, as we are in a bitfield */
1653
1654    *entry->status = STATUS_PENDING;
1655    fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1656
1657    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1658
1659    return 0;
1660}
1661
1662
1663static int
1664fore200e_getstats(struct fore200e* fore200e)
1665{
1666    struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1667    struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1668    struct stats_opcode     opcode;
1669    int                     ok;
1670    u32                     stats_dma_addr;
1671
1672    if (fore200e->stats == NULL) {
1673	fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
1674	if (fore200e->stats == NULL)
1675	    return -ENOMEM;
1676    }
1677    
1678    stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
1679				    sizeof(struct stats), DMA_FROM_DEVICE);
1680    if (dma_mapping_error(fore200e->dev, stats_dma_addr))
1681    	return -ENOMEM;
1682    
1683    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1684
1685    opcode.opcode = OPCODE_GET_STATS;
1686    opcode.pad    = 0;
1687
1688    fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1689    
1690    *entry->status = STATUS_PENDING;
1691
1692    fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1693
1694    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1695
1696    *entry->status = STATUS_FREE;
1697
1698    dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1699    
1700    if (ok == 0) {
1701	printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1702	return -EIO;
1703    }
1704
1705    return 0;
1706}
1707
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1708#if 0 /* currently unused */
1709static int
1710fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1711{
1712    struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1713    struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1714    struct oc3_opcode       opcode;
1715    int                     ok;
1716    u32                     oc3_regs_dma_addr;
1717
1718    oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1719
1720    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1721
1722    opcode.opcode = OPCODE_GET_OC3;
1723    opcode.reg    = 0;
1724    opcode.value  = 0;
1725    opcode.mask   = 0;
1726
1727    fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1728    
1729    *entry->status = STATUS_PENDING;
1730
1731    fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1732
1733    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1734
1735    *entry->status = STATUS_FREE;
1736
1737    fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1738    
1739    if (ok == 0) {
1740	printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1741	return -EIO;
1742    }
1743
1744    return 0;
1745}
1746#endif
1747
1748
1749static int
1750fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1751{
1752    struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1753    struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1754    struct oc3_opcode       opcode;
1755    int                     ok;
1756
1757    DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1758
1759    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1760
1761    opcode.opcode = OPCODE_SET_OC3;
1762    opcode.reg    = reg;
1763    opcode.value  = value;
1764    opcode.mask   = mask;
1765
1766    fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1767    
1768    *entry->status = STATUS_PENDING;
1769
1770    fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1771
1772    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1773
1774    *entry->status = STATUS_FREE;
1775
1776    if (ok == 0) {
1777	printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1778	return -EIO;
1779    }
1780
1781    return 0;
1782}
1783
1784
1785static int
1786fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1787{
1788    u32 mct_value, mct_mask;
1789    int error;
1790
1791    if (!capable(CAP_NET_ADMIN))
1792	return -EPERM;
1793    
1794    switch (loop_mode) {
1795
1796    case ATM_LM_NONE:
1797	mct_value = 0; 
1798	mct_mask  = SUNI_MCT_DLE | SUNI_MCT_LLE;
1799	break;
1800	
1801    case ATM_LM_LOC_PHY:
1802	mct_value = mct_mask = SUNI_MCT_DLE;
1803	break;
1804
1805    case ATM_LM_RMT_PHY:
1806	mct_value = mct_mask = SUNI_MCT_LLE;
1807	break;
1808
1809    default:
1810	return -EINVAL;
1811    }
1812
1813    error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1814    if (error == 0)
1815	fore200e->loop_mode = loop_mode;
1816
1817    return error;
1818}
1819
1820
1821static int
1822fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1823{
1824    struct sonet_stats tmp;
1825
1826    if (fore200e_getstats(fore200e) < 0)
1827	return -EIO;
1828
1829    tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1830    tmp.line_bip    = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1831    tmp.path_bip    = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1832    tmp.line_febe   = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1833    tmp.path_febe   = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1834    tmp.corr_hcs    = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1835    tmp.uncorr_hcs  = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1836    tmp.tx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_transmitted)  +
1837	              be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1838	              be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1839    tmp.rx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_received)     +
1840	              be32_to_cpu(fore200e->stats->aal34.cells_received)    +
1841	              be32_to_cpu(fore200e->stats->aal5.cells_received);
1842
1843    if (arg)
1844	return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;	
1845    
1846    return 0;
1847}
1848
1849
1850static int
1851fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1852{
1853    struct fore200e* fore200e = FORE200E_DEV(dev);
1854    
1855    DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1856
1857    switch (cmd) {
1858
1859    case SONET_GETSTAT:
1860	return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1861
1862    case SONET_GETDIAG:
1863	return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1864
1865    case ATM_SETLOOP:
1866	return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1867
1868    case ATM_GETLOOP:
1869	return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1870
1871    case ATM_QUERYLOOP:
1872	return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1873    }
1874
1875    return -ENOSYS; /* not implemented */
1876}
1877
1878
1879static int
1880fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1881{
1882    struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1883    struct fore200e*     fore200e     = FORE200E_DEV(vcc->dev);
1884
1885    if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1886	DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1887	return -EINVAL;
1888    }
1889
1890    DPRINTK(2, "change_qos %d.%d.%d, "
1891	    "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1892	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1893	    "available_cell_rate = %u",
1894	    vcc->itf, vcc->vpi, vcc->vci,
1895	    fore200e_traffic_class[ qos->txtp.traffic_class ],
1896	    qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1897	    fore200e_traffic_class[ qos->rxtp.traffic_class ],
1898	    qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
1899	    flags, fore200e->available_cell_rate);
1900
1901    if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
1902
1903	mutex_lock(&fore200e->rate_mtx);
1904	if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
1905	    mutex_unlock(&fore200e->rate_mtx);
1906	    return -EAGAIN;
1907	}
1908
1909	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1910	fore200e->available_cell_rate -= qos->txtp.max_pcr;
1911
1912	mutex_unlock(&fore200e->rate_mtx);
1913	
1914	memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
1915	
1916	/* update rate control parameters */
1917	fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
1918
1919	set_bit(ATM_VF_HASQOS, &vcc->flags);
1920
1921	return 0;
1922    }
1923    
1924    return -EINVAL;
1925}
1926    
1927
1928static int fore200e_irq_request(struct fore200e *fore200e)
1929{
1930    if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
1931
1932	printk(FORE200E "unable to reserve IRQ %s for device %s\n",
1933	       fore200e_irq_itoa(fore200e->irq), fore200e->name);
1934	return -EBUSY;
1935    }
1936
1937    printk(FORE200E "IRQ %s reserved for device %s\n",
1938	   fore200e_irq_itoa(fore200e->irq), fore200e->name);
1939
1940#ifdef FORE200E_USE_TASKLET
1941    tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
1942    tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
1943#endif
1944
1945    fore200e->state = FORE200E_STATE_IRQ;
1946    return 0;
1947}
1948
1949
1950static int fore200e_get_esi(struct fore200e *fore200e)
1951{
1952    struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
1953    int ok, i;
1954
1955    if (!prom)
1956	return -ENOMEM;
1957
1958    ok = fore200e->bus->prom_read(fore200e, prom);
1959    if (ok < 0) {
1960	kfree(prom);
1961	return -EBUSY;
1962    }
1963	
1964    printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
1965	   fore200e->name, 
1966	   (prom->hw_revision & 0xFF) + '@',    /* probably meaningless with SBA boards */
1967	   prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
1968	
1969    for (i = 0; i < ESI_LEN; i++) {
1970	fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
1971    }
1972    
1973    kfree(prom);
1974
1975    return 0;
1976}
1977
1978
1979static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
1980{
1981    int scheme, magn, nbr, size, i;
1982
1983    struct host_bsq* bsq;
1984    struct buffer*   buffer;
1985
1986    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1987	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1988
1989	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
1990
1991	    nbr  = fore200e_rx_buf_nbr[ scheme ][ magn ];
1992	    size = fore200e_rx_buf_size[ scheme ][ magn ];
1993
1994	    DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
1995
1996	    /* allocate the array of receive buffers */
1997	    buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
1998                                           GFP_KERNEL);
1999
2000	    if (buffer == NULL)
2001		return -ENOMEM;
2002
2003	    bsq->freebuf = NULL;
2004
2005	    for (i = 0; i < nbr; i++) {
2006
2007		buffer[ i ].scheme = scheme;
2008		buffer[ i ].magn   = magn;
2009#ifdef FORE200E_BSQ_DEBUG
2010		buffer[ i ].index  = i;
2011		buffer[ i ].supplied = 0;
2012#endif
2013
2014		/* allocate the receive buffer body */
2015		if (fore200e_chunk_alloc(fore200e,
2016					 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2017					 DMA_FROM_DEVICE) < 0) {
2018		    
2019		    while (i > 0)
2020			fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2021		    kfree(buffer);
2022		    
2023		    return -ENOMEM;
2024		}
2025
2026		/* insert the buffer into the free buffer list */
2027		buffer[ i ].next = bsq->freebuf;
2028		bsq->freebuf = &buffer[ i ];
2029	    }
2030	    /* all the buffers are free, initially */
2031	    bsq->freebuf_count = nbr;
2032
2033#ifdef FORE200E_BSQ_DEBUG
2034	    bsq_audit(3, bsq, scheme, magn);
2035#endif
2036	}
2037    }
2038
2039    fore200e->state = FORE200E_STATE_ALLOC_BUF;
2040    return 0;
2041}
2042
2043
2044static int fore200e_init_bs_queue(struct fore200e *fore200e)
2045{
2046    int scheme, magn, i;
2047
2048    struct host_bsq*     bsq;
2049    struct cp_bsq_entry __iomem * cp_entry;
2050
2051    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2052	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2053
2054	    DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2055
2056	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2057
2058	    /* allocate and align the array of status words */
2059	    if (fore200e_dma_chunk_alloc(fore200e,
2060					       &bsq->status,
2061					       sizeof(enum status), 
2062					       QUEUE_SIZE_BS,
2063					       fore200e->bus->status_alignment) < 0) {
2064		return -ENOMEM;
2065	    }
2066
2067	    /* allocate and align the array of receive buffer descriptors */
2068	    if (fore200e_dma_chunk_alloc(fore200e,
2069					       &bsq->rbd_block,
2070					       sizeof(struct rbd_block),
2071					       QUEUE_SIZE_BS,
2072					       fore200e->bus->descr_alignment) < 0) {
2073		
2074		fore200e_dma_chunk_free(fore200e, &bsq->status);
2075		return -ENOMEM;
2076	    }
2077	    
2078	    /* get the base address of the cp resident buffer supply queue entries */
2079	    cp_entry = fore200e->virt_base + 
2080		       fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2081	    
2082	    /* fill the host resident and cp resident buffer supply queue entries */
2083	    for (i = 0; i < QUEUE_SIZE_BS; i++) {
2084		
2085		bsq->host_entry[ i ].status = 
2086		                     FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2087	        bsq->host_entry[ i ].rbd_block =
2088		                     FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2089		bsq->host_entry[ i ].rbd_block_dma =
2090		                     FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2091		bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2092		
2093		*bsq->host_entry[ i ].status = STATUS_FREE;
2094		
2095		fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 
2096				     &cp_entry[ i ].status_haddr);
2097	    }
2098	}
2099    }
2100
2101    fore200e->state = FORE200E_STATE_INIT_BSQ;
2102    return 0;
2103}
2104
2105
2106static int fore200e_init_rx_queue(struct fore200e *fore200e)
2107{
2108    struct host_rxq*     rxq =  &fore200e->host_rxq;
2109    struct cp_rxq_entry __iomem * cp_entry;
2110    int i;
2111
2112    DPRINTK(2, "receive queue is being initialized\n");
2113
2114    /* allocate and align the array of status words */
2115    if (fore200e_dma_chunk_alloc(fore200e,
2116				       &rxq->status,
2117				       sizeof(enum status), 
2118				       QUEUE_SIZE_RX,
2119				       fore200e->bus->status_alignment) < 0) {
2120	return -ENOMEM;
2121    }
2122
2123    /* allocate and align the array of receive PDU descriptors */
2124    if (fore200e_dma_chunk_alloc(fore200e,
2125				       &rxq->rpd,
2126				       sizeof(struct rpd), 
2127				       QUEUE_SIZE_RX,
2128				       fore200e->bus->descr_alignment) < 0) {
2129	
2130	fore200e_dma_chunk_free(fore200e, &rxq->status);
2131	return -ENOMEM;
2132    }
2133
2134    /* get the base address of the cp resident rx queue entries */
2135    cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2136
2137    /* fill the host resident and cp resident rx entries */
2138    for (i=0; i < QUEUE_SIZE_RX; i++) {
2139	
2140	rxq->host_entry[ i ].status = 
2141	                     FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2142	rxq->host_entry[ i ].rpd = 
2143	                     FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2144	rxq->host_entry[ i ].rpd_dma = 
2145	                     FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2146	rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2147
2148	*rxq->host_entry[ i ].status = STATUS_FREE;
2149
2150	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 
2151			     &cp_entry[ i ].status_haddr);
2152
2153	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2154			     &cp_entry[ i ].rpd_haddr);
2155    }
2156
2157    /* set the head entry of the queue */
2158    rxq->head = 0;
2159
2160    fore200e->state = FORE200E_STATE_INIT_RXQ;
2161    return 0;
2162}
2163
2164
2165static int fore200e_init_tx_queue(struct fore200e *fore200e)
2166{
2167    struct host_txq*     txq =  &fore200e->host_txq;
2168    struct cp_txq_entry __iomem * cp_entry;
2169    int i;
2170
2171    DPRINTK(2, "transmit queue is being initialized\n");
2172
2173    /* allocate and align the array of status words */
2174    if (fore200e_dma_chunk_alloc(fore200e,
2175				       &txq->status,
2176				       sizeof(enum status), 
2177				       QUEUE_SIZE_TX,
2178				       fore200e->bus->status_alignment) < 0) {
2179	return -ENOMEM;
2180    }
2181
2182    /* allocate and align the array of transmit PDU descriptors */
2183    if (fore200e_dma_chunk_alloc(fore200e,
2184				       &txq->tpd,
2185				       sizeof(struct tpd), 
2186				       QUEUE_SIZE_TX,
2187				       fore200e->bus->descr_alignment) < 0) {
2188	
2189	fore200e_dma_chunk_free(fore200e, &txq->status);
2190	return -ENOMEM;
2191    }
2192
2193    /* get the base address of the cp resident tx queue entries */
2194    cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2195
2196    /* fill the host resident and cp resident tx entries */
2197    for (i=0; i < QUEUE_SIZE_TX; i++) {
2198	
2199	txq->host_entry[ i ].status = 
2200	                     FORE200E_INDEX(txq->status.align_addr, enum status, i);
2201	txq->host_entry[ i ].tpd = 
2202	                     FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2203	txq->host_entry[ i ].tpd_dma  = 
2204                             FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2205	txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2206
2207	*txq->host_entry[ i ].status = STATUS_FREE;
2208	
2209	fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 
2210			     &cp_entry[ i ].status_haddr);
2211	
2212        /* although there is a one-to-one mapping of tx queue entries and tpds,
2213	   we do not write here the DMA (physical) base address of each tpd into
2214	   the related cp resident entry, because the cp relies on this write
2215	   operation to detect that a new pdu has been submitted for tx */
2216    }
2217
2218    /* set the head and tail entries of the queue */
2219    txq->head = 0;
2220    txq->tail = 0;
2221
2222    fore200e->state = FORE200E_STATE_INIT_TXQ;
2223    return 0;
2224}
2225
2226
2227static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2228{
2229    struct host_cmdq*     cmdq =  &fore200e->host_cmdq;
2230    struct cp_cmdq_entry __iomem * cp_entry;
2231    int i;
2232
2233    DPRINTK(2, "command queue is being initialized\n");
2234
2235    /* allocate and align the array of status words */
2236    if (fore200e_dma_chunk_alloc(fore200e,
2237				       &cmdq->status,
2238				       sizeof(enum status), 
2239				       QUEUE_SIZE_CMD,
2240				       fore200e->bus->status_alignment) < 0) {
2241	return -ENOMEM;
2242    }
2243    
2244    /* get the base address of the cp resident cmd queue entries */
2245    cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2246
2247    /* fill the host resident and cp resident cmd entries */
2248    for (i=0; i < QUEUE_SIZE_CMD; i++) {
2249	
2250	cmdq->host_entry[ i ].status   = 
2251                              FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2252	cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2253
2254	*cmdq->host_entry[ i ].status = STATUS_FREE;
2255
2256	fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 
2257                             &cp_entry[ i ].status_haddr);
2258    }
2259
2260    /* set the head entry of the queue */
2261    cmdq->head = 0;
2262
2263    fore200e->state = FORE200E_STATE_INIT_CMDQ;
2264    return 0;
2265}
2266
2267
2268static void fore200e_param_bs_queue(struct fore200e *fore200e,
2269				    enum buffer_scheme scheme,
2270				    enum buffer_magn magn, int queue_length,
2271				    int pool_size, int supply_blksize)
2272{
2273    struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2274
2275    fore200e->bus->write(queue_length,                           &bs_spec->queue_length);
2276    fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2277    fore200e->bus->write(pool_size,                              &bs_spec->pool_size);
2278    fore200e->bus->write(supply_blksize,                         &bs_spec->supply_blksize);
2279}
2280
2281
2282static int fore200e_initialize(struct fore200e *fore200e)
2283{
2284    struct cp_queues __iomem * cpq;
2285    int               ok, scheme, magn;
2286
2287    DPRINTK(2, "device %s being initialized\n", fore200e->name);
2288
2289    mutex_init(&fore200e->rate_mtx);
2290    spin_lock_init(&fore200e->q_lock);
2291
2292    cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2293
2294    /* enable cp to host interrupts */
2295    fore200e->bus->write(1, &cpq->imask);
2296
2297    if (fore200e->bus->irq_enable)
2298	fore200e->bus->irq_enable(fore200e);
2299    
2300    fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2301
2302    fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2303    fore200e->bus->write(QUEUE_SIZE_RX,  &cpq->init.rx_queue_len);
2304    fore200e->bus->write(QUEUE_SIZE_TX,  &cpq->init.tx_queue_len);
2305
2306    fore200e->bus->write(RSD_EXTENSION,  &cpq->init.rsd_extension);
2307    fore200e->bus->write(TSD_EXTENSION,  &cpq->init.tsd_extension);
2308
2309    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2310	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2311	    fore200e_param_bs_queue(fore200e, scheme, magn,
2312				    QUEUE_SIZE_BS, 
2313				    fore200e_rx_buf_nbr[ scheme ][ magn ],
2314				    RBD_BLK_SIZE);
2315
2316    /* issue the initialize command */
2317    fore200e->bus->write(STATUS_PENDING,    &cpq->init.status);
2318    fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2319
2320    ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2321    if (ok == 0) {
2322	printk(FORE200E "device %s initialization failed\n", fore200e->name);
2323	return -ENODEV;
2324    }
2325
2326    printk(FORE200E "device %s initialized\n", fore200e->name);
2327
2328    fore200e->state = FORE200E_STATE_INITIALIZE;
2329    return 0;
2330}
2331
2332
2333static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2334{
2335    struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2336
2337#if 0
2338    printk("%c", c);
2339#endif
2340    fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2341}
2342
2343
2344static int fore200e_monitor_getc(struct fore200e *fore200e)
2345{
2346    struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2347    unsigned long      timeout = jiffies + msecs_to_jiffies(50);
2348    int                c;
2349
2350    while (time_before(jiffies, timeout)) {
2351
2352	c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2353
2354	if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2355
2356	    fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2357#if 0
2358	    printk("%c", c & 0xFF);
2359#endif
2360	    return c & 0xFF;
2361	}
2362    }
2363
2364    return -1;
2365}
2366
2367
2368static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2369{
2370    while (*str) {
2371
2372	/* the i960 monitor doesn't accept any new character if it has something to say */
2373	while (fore200e_monitor_getc(fore200e) >= 0);
2374	
2375	fore200e_monitor_putc(fore200e, *str++);
2376    }
2377
2378    while (fore200e_monitor_getc(fore200e) >= 0);
2379}
2380
2381#ifdef __LITTLE_ENDIAN
2382#define FW_EXT ".bin"
2383#else
2384#define FW_EXT "_ecd.bin2"
2385#endif
2386
2387static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2388{
2389    const struct firmware *firmware;
2390    const struct fw_header *fw_header;
2391    const __le32 *fw_data;
2392    u32 fw_size;
2393    u32 __iomem *load_addr;
2394    char buf[48];
2395    int err;
2396
2397    sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2398    if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
2399	printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2400	return err;
2401    }
2402
2403    fw_data = (const __le32 *)firmware->data;
2404    fw_size = firmware->size / sizeof(u32);
2405    fw_header = (const struct fw_header *)firmware->data;
2406    load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2407
2408    DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2409	    fore200e->name, load_addr, fw_size);
2410
2411    if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2412	printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2413	goto release;
2414    }
2415
2416    for (; fw_size--; fw_data++, load_addr++)
2417	fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2418
2419    DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2420
2421#if defined(__sparc_v9__)
2422    /* reported to be required by SBA cards on some sparc64 hosts */
2423    fore200e_spin(100);
2424#endif
2425
2426    sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2427    fore200e_monitor_puts(fore200e, buf);
2428
2429    if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2430	printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2431	goto release;
2432    }
2433
2434    printk(FORE200E "device %s firmware started\n", fore200e->name);
2435
2436    fore200e->state = FORE200E_STATE_START_FW;
2437    err = 0;
2438
2439release:
2440    release_firmware(firmware);
2441    return err;
2442}
2443
2444
2445static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2446{
2447    struct atm_dev* atm_dev;
2448
2449    DPRINTK(2, "device %s being registered\n", fore200e->name);
2450
2451    atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2452                               -1, NULL);
2453    if (atm_dev == NULL) {
2454	printk(FORE200E "unable to register device %s\n", fore200e->name);
2455	return -ENODEV;
2456    }
2457
2458    atm_dev->dev_data = fore200e;
2459    fore200e->atm_dev = atm_dev;
2460
2461    atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2462    atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2463
2464    fore200e->available_cell_rate = ATM_OC3_PCR;
2465
2466    fore200e->state = FORE200E_STATE_REGISTER;
2467    return 0;
2468}
2469
2470
2471static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2472{
2473    if (fore200e_register(fore200e, parent) < 0)
2474	return -ENODEV;
2475    
2476    if (fore200e->bus->configure(fore200e) < 0)
2477	return -ENODEV;
2478
2479    if (fore200e->bus->map(fore200e) < 0)
2480	return -ENODEV;
2481
2482    if (fore200e_reset(fore200e, 1) < 0)
2483	return -ENODEV;
2484
2485    if (fore200e_load_and_start_fw(fore200e) < 0)
2486	return -ENODEV;
2487
2488    if (fore200e_initialize(fore200e) < 0)
2489	return -ENODEV;
2490
2491    if (fore200e_init_cmd_queue(fore200e) < 0)
2492	return -ENOMEM;
2493
2494    if (fore200e_init_tx_queue(fore200e) < 0)
2495	return -ENOMEM;
2496
2497    if (fore200e_init_rx_queue(fore200e) < 0)
2498	return -ENOMEM;
2499
2500    if (fore200e_init_bs_queue(fore200e) < 0)
2501	return -ENOMEM;
2502
2503    if (fore200e_alloc_rx_buf(fore200e) < 0)
2504	return -ENOMEM;
2505
2506    if (fore200e_get_esi(fore200e) < 0)
2507	return -EIO;
2508
2509    if (fore200e_irq_request(fore200e) < 0)
2510	return -EBUSY;
2511
2512    fore200e_supply(fore200e);
2513
2514    /* all done, board initialization is now complete */
2515    fore200e->state = FORE200E_STATE_COMPLETE;
2516    return 0;
2517}
2518
2519#ifdef CONFIG_SBUS
 
2520static int fore200e_sba_probe(struct platform_device *op)
2521{
 
2522	struct fore200e *fore200e;
2523	static int index = 0;
2524	int err;
2525
 
 
 
 
2526	fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2527	if (!fore200e)
2528		return -ENOMEM;
2529
2530	fore200e->bus = &fore200e_sbus_ops;
2531	fore200e->dev = &op->dev;
2532	fore200e->irq = op->archdata.irqs[0];
2533	fore200e->phys_base = op->resource[0].start;
2534
2535	sprintf(fore200e->name, "SBA-200E-%d", index);
2536
2537	err = fore200e_init(fore200e, &op->dev);
2538	if (err < 0) {
2539		fore200e_shutdown(fore200e);
2540		kfree(fore200e);
2541		return err;
2542	}
2543
2544	index++;
2545	dev_set_drvdata(&op->dev, fore200e);
2546
2547	return 0;
2548}
2549
2550static void fore200e_sba_remove(struct platform_device *op)
2551{
2552	struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2553
2554	fore200e_shutdown(fore200e);
2555	kfree(fore200e);
 
 
2556}
2557
2558static const struct of_device_id fore200e_sba_match[] = {
2559	{
2560		.name = SBA200E_PROM_NAME,
2561	},
2562	{},
2563};
2564MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2565
2566static struct platform_driver fore200e_sba_driver = {
2567	.driver = {
2568		.name = "fore_200e",
2569		.of_match_table = fore200e_sba_match,
2570	},
2571	.probe		= fore200e_sba_probe,
2572	.remove		= fore200e_sba_remove,
2573};
2574#endif
2575
2576#ifdef CONFIG_PCI
2577static int fore200e_pca_detect(struct pci_dev *pci_dev,
2578			       const struct pci_device_id *pci_ent)
2579{
2580    struct fore200e* fore200e;
2581    int err = 0;
2582    static int index = 0;
2583
2584    if (pci_enable_device(pci_dev)) {
2585	err = -EINVAL;
2586	goto out;
2587    }
2588
2589    if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2590	err = -EINVAL;
2591	goto out;
2592    }
2593    
2594    fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2595    if (fore200e == NULL) {
2596	err = -ENOMEM;
2597	goto out_disable;
2598    }
2599
2600    fore200e->bus       = &fore200e_pci_ops;
2601    fore200e->dev	= &pci_dev->dev;
2602    fore200e->irq       = pci_dev->irq;
2603    fore200e->phys_base = pci_resource_start(pci_dev, 0);
2604
2605    sprintf(fore200e->name, "PCA-200E-%d", index - 1);
2606
2607    pci_set_master(pci_dev);
2608
2609    printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
2610	   fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2611
2612    sprintf(fore200e->name, "PCA-200E-%d", index);
2613
2614    err = fore200e_init(fore200e, &pci_dev->dev);
2615    if (err < 0) {
2616	fore200e_shutdown(fore200e);
2617	goto out_free;
2618    }
2619
2620    ++index;
2621    pci_set_drvdata(pci_dev, fore200e);
2622
2623out:
2624    return err;
2625
2626out_free:
2627    kfree(fore200e);
2628out_disable:
2629    pci_disable_device(pci_dev);
2630    goto out;
2631}
2632
2633
2634static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2635{
2636    struct fore200e *fore200e;
2637
2638    fore200e = pci_get_drvdata(pci_dev);
2639
2640    fore200e_shutdown(fore200e);
2641    kfree(fore200e);
2642    pci_disable_device(pci_dev);
2643}
2644
2645
2646static const struct pci_device_id fore200e_pca_tbl[] = {
2647    { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
2648    { 0, }
2649};
2650
2651MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2652
2653static struct pci_driver fore200e_pca_driver = {
2654    .name =     "fore_200e",
2655    .probe =    fore200e_pca_detect,
2656    .remove =   fore200e_pca_remove_one,
2657    .id_table = fore200e_pca_tbl,
2658};
2659#endif
2660
2661static int __init fore200e_module_init(void)
2662{
2663	int err = 0;
2664
2665	printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2666
2667#ifdef CONFIG_SBUS
2668	err = platform_driver_register(&fore200e_sba_driver);
2669	if (err)
2670		return err;
2671#endif
2672
2673#ifdef CONFIG_PCI
2674	err = pci_register_driver(&fore200e_pca_driver);
2675#endif
2676
2677#ifdef CONFIG_SBUS
2678	if (err)
2679		platform_driver_unregister(&fore200e_sba_driver);
2680#endif
2681
2682	return err;
2683}
2684
2685static void __exit fore200e_module_cleanup(void)
2686{
2687#ifdef CONFIG_PCI
2688	pci_unregister_driver(&fore200e_pca_driver);
2689#endif
2690#ifdef CONFIG_SBUS
2691	platform_driver_unregister(&fore200e_sba_driver);
2692#endif
2693}
2694
2695static int
2696fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2697{
2698    struct fore200e*     fore200e  = FORE200E_DEV(dev);
2699    struct fore200e_vcc* fore200e_vcc;
2700    struct atm_vcc*      vcc;
2701    int                  i, len, left = *pos;
2702    unsigned long        flags;
2703
2704    if (!left--) {
2705
2706	if (fore200e_getstats(fore200e) < 0)
2707	    return -EIO;
2708
2709	len = sprintf(page,"\n"
2710		       " device:\n"
2711		       "   internal name:\t\t%s\n", fore200e->name);
2712
2713	/* print bus-specific information */
2714	if (fore200e->bus->proc_read)
2715	    len += fore200e->bus->proc_read(fore200e, page + len);
2716	
2717	len += sprintf(page + len,
2718		"   interrupt line:\t\t%s\n"
2719		"   physical base address:\t0x%p\n"
2720		"   virtual base address:\t0x%p\n"
2721		"   factory address (ESI):\t%pM\n"
2722		"   board serial number:\t\t%d\n\n",
2723		fore200e_irq_itoa(fore200e->irq),
2724		(void*)fore200e->phys_base,
2725		fore200e->virt_base,
2726		fore200e->esi,
2727		fore200e->esi[4] * 256 + fore200e->esi[5]);
2728
2729	return len;
2730    }
2731
2732    if (!left--)
2733	return sprintf(page,
2734		       "   free small bufs, scheme 1:\t%d\n"
2735		       "   free large bufs, scheme 1:\t%d\n"
2736		       "   free small bufs, scheme 2:\t%d\n"
2737		       "   free large bufs, scheme 2:\t%d\n",
2738		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2739		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2740		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2741		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2742
2743    if (!left--) {
2744	u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2745
2746	len = sprintf(page,"\n\n"
2747		      " cell processor:\n"
2748		      "   heartbeat state:\t\t");
2749	
2750	if (hb >> 16 != 0xDEAD)
2751	    len += sprintf(page + len, "0x%08x\n", hb);
2752	else
2753	    len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2754
2755	return len;
2756    }
2757
2758    if (!left--) {
2759	static const char* media_name[] = {
2760	    "unshielded twisted pair",
2761	    "multimode optical fiber ST",
2762	    "multimode optical fiber SC",
2763	    "single-mode optical fiber ST",
2764	    "single-mode optical fiber SC",
2765	    "unknown"
2766	};
2767
2768	static const char* oc3_mode[] = {
2769	    "normal operation",
2770	    "diagnostic loopback",
2771	    "line loopback",
2772	    "unknown"
2773	};
2774
2775	u32 fw_release     = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2776	u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2777	u32 oc3_revision   = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2778	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2779	u32 oc3_index;
2780
2781	if (media_index > 4)
2782		media_index = 5;
2783	
2784	switch (fore200e->loop_mode) {
2785	    case ATM_LM_NONE:    oc3_index = 0;
2786		                 break;
2787	    case ATM_LM_LOC_PHY: oc3_index = 1;
2788		                 break;
2789	    case ATM_LM_RMT_PHY: oc3_index = 2;
2790		                 break;
2791	    default:             oc3_index = 3;
2792	}
2793
2794	return sprintf(page,
2795		       "   firmware release:\t\t%d.%d.%d\n"
2796		       "   monitor release:\t\t%d.%d\n"
2797		       "   media type:\t\t\t%s\n"
2798		       "   OC-3 revision:\t\t0x%x\n"
2799                       "   OC-3 mode:\t\t\t%s",
2800		       fw_release >> 16, fw_release << 16 >> 24,  fw_release << 24 >> 24,
2801		       mon960_release >> 16, mon960_release << 16 >> 16,
2802		       media_name[ media_index ],
2803		       oc3_revision,
2804		       oc3_mode[ oc3_index ]);
2805    }
2806
2807    if (!left--) {
2808	struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2809
2810	return sprintf(page,
2811		       "\n\n"
2812		       " monitor:\n"
2813		       "   version number:\t\t%d\n"
2814		       "   boot status word:\t\t0x%08x\n",
2815		       fore200e->bus->read(&cp_monitor->mon_version),
2816		       fore200e->bus->read(&cp_monitor->bstat));
2817    }
2818
2819    if (!left--)
2820	return sprintf(page,
2821		       "\n"
2822		       " device statistics:\n"
2823		       "  4b5b:\n"
2824		       "     crc_header_errors:\t\t%10u\n"
2825		       "     framing_errors:\t\t%10u\n",
2826		       be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2827		       be32_to_cpu(fore200e->stats->phy.framing_errors));
2828    
2829    if (!left--)
2830	return sprintf(page, "\n"
2831		       "  OC-3:\n"
2832		       "     section_bip8_errors:\t%10u\n"
2833		       "     path_bip8_errors:\t\t%10u\n"
2834		       "     line_bip24_errors:\t\t%10u\n"
2835		       "     line_febe_errors:\t\t%10u\n"
2836		       "     path_febe_errors:\t\t%10u\n"
2837		       "     corr_hcs_errors:\t\t%10u\n"
2838		       "     ucorr_hcs_errors:\t\t%10u\n",
2839		       be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2840		       be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2841		       be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2842		       be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2843		       be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2844		       be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2845		       be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2846
2847    if (!left--)
2848	return sprintf(page,"\n"
2849		       "   ATM:\t\t\t\t     cells\n"
2850		       "     TX:\t\t\t%10u\n"
2851		       "     RX:\t\t\t%10u\n"
2852		       "     vpi out of range:\t\t%10u\n"
2853		       "     vpi no conn:\t\t%10u\n"
2854		       "     vci out of range:\t\t%10u\n"
2855		       "     vci no conn:\t\t%10u\n",
2856		       be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2857		       be32_to_cpu(fore200e->stats->atm.cells_received),
2858		       be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2859		       be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2860		       be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2861		       be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2862    
2863    if (!left--)
2864	return sprintf(page,"\n"
2865		       "   AAL0:\t\t\t     cells\n"
2866		       "     TX:\t\t\t%10u\n"
2867		       "     RX:\t\t\t%10u\n"
2868		       "     dropped:\t\t\t%10u\n",
2869		       be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2870		       be32_to_cpu(fore200e->stats->aal0.cells_received),
2871		       be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2872    
2873    if (!left--)
2874	return sprintf(page,"\n"
2875		       "   AAL3/4:\n"
2876		       "     SAR sublayer:\t\t     cells\n"
2877		       "       TX:\t\t\t%10u\n"
2878		       "       RX:\t\t\t%10u\n"
2879		       "       dropped:\t\t\t%10u\n"
2880		       "       CRC errors:\t\t%10u\n"
2881		       "       protocol errors:\t\t%10u\n\n"
2882		       "     CS  sublayer:\t\t      PDUs\n"
2883		       "       TX:\t\t\t%10u\n"
2884		       "       RX:\t\t\t%10u\n"
2885		       "       dropped:\t\t\t%10u\n"
2886		       "       protocol errors:\t\t%10u\n",
2887		       be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
2888		       be32_to_cpu(fore200e->stats->aal34.cells_received),
2889		       be32_to_cpu(fore200e->stats->aal34.cells_dropped),
2890		       be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
2891		       be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
2892		       be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
2893		       be32_to_cpu(fore200e->stats->aal34.cspdus_received),
2894		       be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
2895		       be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
2896    
2897    if (!left--)
2898	return sprintf(page,"\n"
2899		       "   AAL5:\n"
2900		       "     SAR sublayer:\t\t     cells\n"
2901		       "       TX:\t\t\t%10u\n"
2902		       "       RX:\t\t\t%10u\n"
2903		       "       dropped:\t\t\t%10u\n"
2904		       "       congestions:\t\t%10u\n\n"
2905		       "     CS  sublayer:\t\t      PDUs\n"
2906		       "       TX:\t\t\t%10u\n"
2907		       "       RX:\t\t\t%10u\n"
2908		       "       dropped:\t\t\t%10u\n"
2909		       "       CRC errors:\t\t%10u\n"
2910		       "       protocol errors:\t\t%10u\n",
2911		       be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
2912		       be32_to_cpu(fore200e->stats->aal5.cells_received),
2913		       be32_to_cpu(fore200e->stats->aal5.cells_dropped),
2914		       be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
2915		       be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
2916		       be32_to_cpu(fore200e->stats->aal5.cspdus_received),
2917		       be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
2918		       be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
2919		       be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
2920    
2921    if (!left--)
2922	return sprintf(page,"\n"
2923		       "   AUX:\t\t       allocation failures\n"
2924		       "     small b1:\t\t\t%10u\n"
2925		       "     large b1:\t\t\t%10u\n"
2926		       "     small b2:\t\t\t%10u\n"
2927		       "     large b2:\t\t\t%10u\n"
2928		       "     RX PDUs:\t\t\t%10u\n"
2929		       "     TX PDUs:\t\t\t%10lu\n",
2930		       be32_to_cpu(fore200e->stats->aux.small_b1_failed),
2931		       be32_to_cpu(fore200e->stats->aux.large_b1_failed),
2932		       be32_to_cpu(fore200e->stats->aux.small_b2_failed),
2933		       be32_to_cpu(fore200e->stats->aux.large_b2_failed),
2934		       be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
2935		       fore200e->tx_sat);
2936    
2937    if (!left--)
2938	return sprintf(page,"\n"
2939		       " receive carrier:\t\t\t%s\n",
2940		       fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
2941    
2942    if (!left--) {
2943        return sprintf(page,"\n"
2944		       " VCCs:\n  address   VPI VCI   AAL "
2945		       "TX PDUs   TX min/max size  RX PDUs   RX min/max size\n");
2946    }
2947
2948    for (i = 0; i < NBR_CONNECT; i++) {
2949
2950	vcc = fore200e->vc_map[i].vcc;
2951
2952	if (vcc == NULL)
2953	    continue;
2954
2955	spin_lock_irqsave(&fore200e->q_lock, flags);
2956
2957	if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
2958
2959	    fore200e_vcc = FORE200E_VCC(vcc);
2960	    ASSERT(fore200e_vcc);
2961
2962	    len = sprintf(page,
2963			  "  %pK  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
2964			  vcc,
2965			  vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
2966			  fore200e_vcc->tx_pdu,
2967			  fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
2968			  fore200e_vcc->tx_max_pdu,
2969			  fore200e_vcc->rx_pdu,
2970			  fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
2971			  fore200e_vcc->rx_max_pdu);
2972
2973	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
2974	    return len;
2975	}
2976
2977	spin_unlock_irqrestore(&fore200e->q_lock, flags);
2978    }
2979    
2980    return 0;
2981}
2982
2983module_init(fore200e_module_init);
2984module_exit(fore200e_module_cleanup);
2985
2986
2987static const struct atmdev_ops fore200e_ops = {
2988	.open       = fore200e_open,
2989	.close      = fore200e_close,
2990	.ioctl      = fore200e_ioctl,
 
 
2991	.send       = fore200e_send,
2992	.change_qos = fore200e_change_qos,
2993	.proc_read  = fore200e_proc_read,
2994	.owner      = THIS_MODULE
2995};
2996
2997MODULE_LICENSE("GPL");
2998#ifdef CONFIG_PCI
2999#ifdef __LITTLE_ENDIAN__
3000MODULE_FIRMWARE("pca200e.bin");
3001#else
3002MODULE_FIRMWARE("pca200e_ecd.bin2");
3003#endif
3004#endif /* CONFIG_PCI */
3005#ifdef CONFIG_SBUS
3006MODULE_FIRMWARE("sba200e_ecd.bin2");
3007#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3  A FORE Systems 200E-series driver for ATM on Linux.
   4  Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
   5
   6  Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
   7
   8  This driver simultaneously supports PCA-200E and SBA-200E adapters
   9  on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
  10
  11*/
  12
  13
  14#include <linux/kernel.h>
  15#include <linux/slab.h>
  16#include <linux/init.h>
  17#include <linux/capability.h>
  18#include <linux/interrupt.h>
  19#include <linux/bitops.h>
  20#include <linux/pci.h>
  21#include <linux/module.h>
  22#include <linux/atmdev.h>
  23#include <linux/sonet.h>
  24#include <linux/atm_suni.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/delay.h>
  27#include <linux/firmware.h>
 
  28#include <asm/io.h>
  29#include <asm/string.h>
  30#include <asm/page.h>
  31#include <asm/irq.h>
  32#include <asm/dma.h>
  33#include <asm/byteorder.h>
  34#include <linux/uaccess.h>
  35#include <linux/atomic.h>
  36
  37#ifdef CONFIG_SBUS
  38#include <linux/of.h>
  39#include <linux/of_device.h>
  40#include <asm/idprom.h>
  41#include <asm/openprom.h>
  42#include <asm/oplib.h>
  43#include <asm/pgtable.h>
  44#endif
  45
  46#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
  47#define FORE200E_USE_TASKLET
  48#endif
  49
  50#if 0 /* enable the debugging code of the buffer supply queues */
  51#define FORE200E_BSQ_DEBUG
  52#endif
  53
  54#if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
  55#define FORE200E_52BYTE_AAL0_SDU
  56#endif
  57
  58#include "fore200e.h"
  59#include "suni.h"
  60
  61#define FORE200E_VERSION "0.3e"
  62
  63#define FORE200E         "fore200e: "
  64
  65#if 0 /* override .config */
  66#define CONFIG_ATM_FORE200E_DEBUG 1
  67#endif
  68#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
  69#define DPRINTK(level, format, args...)  do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
  70                                                  printk(FORE200E format, ##args); } while (0)
  71#else
  72#define DPRINTK(level, format, args...)  do {} while (0)
  73#endif
  74
  75
  76#define FORE200E_ALIGN(addr, alignment) \
  77        ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
  78
  79#define FORE200E_DMA_INDEX(dma_addr, type, index)  ((dma_addr) + (index) * sizeof(type))
  80
  81#define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
  82
  83#define FORE200E_NEXT_ENTRY(index, modulo)         (index = ((index) + 1) % (modulo))
  84
  85#if 1
  86#define ASSERT(expr)     if (!(expr)) { \
  87			     printk(FORE200E "assertion failed! %s[%d]: %s\n", \
  88				    __func__, __LINE__, #expr); \
  89			     panic(FORE200E "%s", __func__); \
  90			 }
  91#else
  92#define ASSERT(expr)     do {} while (0)
  93#endif
  94
  95
  96static const struct atmdev_ops   fore200e_ops;
  97
  98static LIST_HEAD(fore200e_boards);
  99
 100
 101MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
 102MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
 103MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
 104
 105
 106static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
 107    { BUFFER_S1_NBR, BUFFER_L1_NBR },
 108    { BUFFER_S2_NBR, BUFFER_L2_NBR }
 109};
 110
 111static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
 112    { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
 113    { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
 114};
 115
 116
 117#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
 118static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
 119#endif
 120
 121
 122#if 0 /* currently unused */
 123static int 
 124fore200e_fore2atm_aal(enum fore200e_aal aal)
 125{
 126    switch(aal) {
 127    case FORE200E_AAL0:  return ATM_AAL0;
 128    case FORE200E_AAL34: return ATM_AAL34;
 129    case FORE200E_AAL5:  return ATM_AAL5;
 130    }
 131
 132    return -EINVAL;
 133}
 134#endif
 135
 136
 137static enum fore200e_aal
 138fore200e_atm2fore_aal(int aal)
 139{
 140    switch(aal) {
 141    case ATM_AAL0:  return FORE200E_AAL0;
 142    case ATM_AAL34: return FORE200E_AAL34;
 143    case ATM_AAL1:
 144    case ATM_AAL2:
 145    case ATM_AAL5:  return FORE200E_AAL5;
 146    }
 147
 148    return -EINVAL;
 149}
 150
 151
 152static char*
 153fore200e_irq_itoa(int irq)
 154{
 155    static char str[8];
 156    sprintf(str, "%d", irq);
 157    return str;
 158}
 159
 160
 161/* allocate and align a chunk of memory intended to hold the data behing exchanged
 162   between the driver and the adapter (using streaming DVMA) */
 163
 164static int
 165fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
 166{
 167    unsigned long offset = 0;
 168
 169    if (alignment <= sizeof(int))
 170	alignment = 0;
 171
 172    chunk->alloc_size = size + alignment;
 173    chunk->direction  = direction;
 174
 175    chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
 176    if (chunk->alloc_addr == NULL)
 177	return -ENOMEM;
 178
 179    if (alignment > 0)
 180	offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 
 181    
 182    chunk->align_addr = chunk->alloc_addr + offset;
 183
 184    chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
 185				     size, direction);
 186    if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
 187	kfree(chunk->alloc_addr);
 188	return -ENOMEM;
 189    }
 190    return 0;
 191}
 192
 193
 194/* free a chunk of memory */
 195
 196static void
 197fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
 198{
 199    dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
 200		     chunk->direction);
 201    kfree(chunk->alloc_addr);
 202}
 203
 204/*
 205 * Allocate a DMA consistent chunk of memory intended to act as a communication
 206 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
 207 * and the adapter.
 208 */
 209static int
 210fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
 211		int size, int nbr, int alignment)
 212{
 213	/* returned chunks are page-aligned */
 214	chunk->alloc_size = size * nbr;
 215	chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
 216					       &chunk->dma_addr, GFP_KERNEL);
 217	if (!chunk->alloc_addr)
 218		return -ENOMEM;
 219	chunk->align_addr = chunk->alloc_addr;
 220	return 0;
 221}
 222
 223/*
 224 * Free a DMA consistent chunk of memory.
 225 */
 226static void
 227fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
 228{
 229	dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
 230			  chunk->dma_addr);
 231}
 232
 233static void
 234fore200e_spin(int msecs)
 235{
 236    unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
 237    while (time_before(jiffies, timeout));
 238}
 239
 240
 241static int
 242fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
 243{
 244    unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
 245    int           ok;
 246
 247    mb();
 248    do {
 249	if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
 250	    break;
 251
 252    } while (time_before(jiffies, timeout));
 253
 254#if 1
 255    if (!ok) {
 256	printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
 257	       *addr, val);
 258    }
 259#endif
 260
 261    return ok;
 262}
 263
 264
 265static int
 266fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
 267{
 268    unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
 269    int           ok;
 270
 271    do {
 272	if ((ok = (fore200e->bus->read(addr) == val)))
 273	    break;
 274
 275    } while (time_before(jiffies, timeout));
 276
 277#if 1
 278    if (!ok) {
 279	printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
 280	       fore200e->bus->read(addr), val);
 281    }
 282#endif
 283
 284    return ok;
 285}
 286
 287
 288static void
 289fore200e_free_rx_buf(struct fore200e* fore200e)
 290{
 291    int scheme, magn, nbr;
 292    struct buffer* buffer;
 293
 294    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
 295	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
 296
 297	    if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
 298
 299		for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
 300
 301		    struct chunk* data = &buffer[ nbr ].data;
 302
 303		    if (data->alloc_addr != NULL)
 304			fore200e_chunk_free(fore200e, data);
 305		}
 306	    }
 307	}
 308    }
 309}
 310
 311
 312static void
 313fore200e_uninit_bs_queue(struct fore200e* fore200e)
 314{
 315    int scheme, magn;
 316    
 317    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
 318	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
 319
 320	    struct chunk* status    = &fore200e->host_bsq[ scheme ][ magn ].status;
 321	    struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
 322	    
 323	    if (status->alloc_addr)
 324		fore200e_dma_chunk_free(fore200e, status);
 325	    
 326	    if (rbd_block->alloc_addr)
 327		fore200e_dma_chunk_free(fore200e, rbd_block);
 328	}
 329    }
 330}
 331
 332
 333static int
 334fore200e_reset(struct fore200e* fore200e, int diag)
 335{
 336    int ok;
 337
 338    fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
 339    
 340    fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
 341
 342    fore200e->bus->reset(fore200e);
 343
 344    if (diag) {
 345	ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
 346	if (ok == 0) {
 347	    
 348	    printk(FORE200E "device %s self-test failed\n", fore200e->name);
 349	    return -ENODEV;
 350	}
 351
 352	printk(FORE200E "device %s self-test passed\n", fore200e->name);
 353	
 354	fore200e->state = FORE200E_STATE_RESET;
 355    }
 356
 357    return 0;
 358}
 359
 360
 361static void
 362fore200e_shutdown(struct fore200e* fore200e)
 363{
 364    printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
 365	   fore200e->name, fore200e->phys_base, 
 366	   fore200e_irq_itoa(fore200e->irq));
 367    
 368    if (fore200e->state > FORE200E_STATE_RESET) {
 369	/* first, reset the board to prevent further interrupts or data transfers */
 370	fore200e_reset(fore200e, 0);
 371    }
 372    
 373    /* then, release all allocated resources */
 374    switch(fore200e->state) {
 375
 376    case FORE200E_STATE_COMPLETE:
 377	kfree(fore200e->stats);
 378
 379	/* fall through */
 380    case FORE200E_STATE_IRQ:
 381	free_irq(fore200e->irq, fore200e->atm_dev);
 382
 383	/* fall through */
 384    case FORE200E_STATE_ALLOC_BUF:
 385	fore200e_free_rx_buf(fore200e);
 386
 387	/* fall through */
 388    case FORE200E_STATE_INIT_BSQ:
 389	fore200e_uninit_bs_queue(fore200e);
 390
 391	/* fall through */
 392    case FORE200E_STATE_INIT_RXQ:
 393	fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
 394	fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
 395
 396	/* fall through */
 397    case FORE200E_STATE_INIT_TXQ:
 398	fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
 399	fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
 400
 401	/* fall through */
 402    case FORE200E_STATE_INIT_CMDQ:
 403	fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
 404
 405	/* fall through */
 406    case FORE200E_STATE_INITIALIZE:
 407	/* nothing to do for that state */
 408
 409    case FORE200E_STATE_START_FW:
 410	/* nothing to do for that state */
 411
 412    case FORE200E_STATE_RESET:
 413	/* nothing to do for that state */
 414
 415    case FORE200E_STATE_MAP:
 416	fore200e->bus->unmap(fore200e);
 417
 418	/* fall through */
 419    case FORE200E_STATE_CONFIGURE:
 420	/* nothing to do for that state */
 421
 422    case FORE200E_STATE_REGISTER:
 423	/* XXX shouldn't we *start* by deregistering the device? */
 424	atm_dev_deregister(fore200e->atm_dev);
 425
 
 426    case FORE200E_STATE_BLANK:
 427	/* nothing to do for that state */
 428	break;
 429    }
 430}
 431
 432
 433#ifdef CONFIG_PCI
 434
 435static u32 fore200e_pca_read(volatile u32 __iomem *addr)
 436{
 437    /* on big-endian hosts, the board is configured to convert
 438       the endianess of slave RAM accesses  */
 439    return le32_to_cpu(readl(addr));
 440}
 441
 442
 443static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
 444{
 445    /* on big-endian hosts, the board is configured to convert
 446       the endianess of slave RAM accesses  */
 447    writel(cpu_to_le32(val), addr);
 448}
 449
 450static int
 451fore200e_pca_irq_check(struct fore200e* fore200e)
 452{
 453    /* this is a 1 bit register */
 454    int irq_posted = readl(fore200e->regs.pca.psr);
 455
 456#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
 457    if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
 458	DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
 459    }
 460#endif
 461
 462    return irq_posted;
 463}
 464
 465
 466static void
 467fore200e_pca_irq_ack(struct fore200e* fore200e)
 468{
 469    writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
 470}
 471
 472
 473static void
 474fore200e_pca_reset(struct fore200e* fore200e)
 475{
 476    writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
 477    fore200e_spin(10);
 478    writel(0, fore200e->regs.pca.hcr);
 479}
 480
 481
 482static int fore200e_pca_map(struct fore200e* fore200e)
 483{
 484    DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
 485
 486    fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
 487    
 488    if (fore200e->virt_base == NULL) {
 489	printk(FORE200E "can't map device %s\n", fore200e->name);
 490	return -EFAULT;
 491    }
 492
 493    DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
 494
 495    /* gain access to the PCA specific registers  */
 496    fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
 497    fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
 498    fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
 499
 500    fore200e->state = FORE200E_STATE_MAP;
 501    return 0;
 502}
 503
 504
 505static void
 506fore200e_pca_unmap(struct fore200e* fore200e)
 507{
 508    DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
 509
 510    if (fore200e->virt_base != NULL)
 511	iounmap(fore200e->virt_base);
 512}
 513
 514
 515static int fore200e_pca_configure(struct fore200e *fore200e)
 516{
 517    struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
 518    u8              master_ctrl, latency;
 519
 520    DPRINTK(2, "device %s being configured\n", fore200e->name);
 521
 522    if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
 523	printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
 524	return -EIO;
 525    }
 526
 527    pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
 528
 529    master_ctrl = master_ctrl
 530#if defined(__BIG_ENDIAN)
 531	/* request the PCA board to convert the endianess of slave RAM accesses */
 532	| PCA200E_CTRL_CONVERT_ENDIAN
 533#endif
 534#if 0
 535        | PCA200E_CTRL_DIS_CACHE_RD
 536        | PCA200E_CTRL_DIS_WRT_INVAL
 537        | PCA200E_CTRL_ENA_CONT_REQ_MODE
 538        | PCA200E_CTRL_2_CACHE_WRT_INVAL
 539#endif
 540	| PCA200E_CTRL_LARGE_PCI_BURSTS;
 541    
 542    pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
 543
 544    /* raise latency from 32 (default) to 192, as this seems to prevent NIC
 545       lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
 546       this may impact the performances of other PCI devices on the same bus, though */
 547    latency = 192;
 548    pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
 549
 550    fore200e->state = FORE200E_STATE_CONFIGURE;
 551    return 0;
 552}
 553
 554
 555static int __init
 556fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
 557{
 558    struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
 559    struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
 560    struct prom_opcode      opcode;
 561    int                     ok;
 562    u32                     prom_dma;
 563
 564    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
 565
 566    opcode.opcode = OPCODE_GET_PROM;
 567    opcode.pad    = 0;
 568
 569    prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
 570			      DMA_FROM_DEVICE);
 571    if (dma_mapping_error(fore200e->dev, prom_dma))
 572	return -ENOMEM;
 573
 574    fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
 575    
 576    *entry->status = STATUS_PENDING;
 577
 578    fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
 579
 580    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
 581
 582    *entry->status = STATUS_FREE;
 583
 584    dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
 585
 586    if (ok == 0) {
 587	printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
 588	return -EIO;
 589    }
 590
 591#if defined(__BIG_ENDIAN)
 592    
 593#define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
 594
 595    /* MAC address is stored as little-endian */
 596    swap_here(&prom->mac_addr[0]);
 597    swap_here(&prom->mac_addr[4]);
 598#endif
 599    
 600    return 0;
 601}
 602
 603
 604static int
 605fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
 606{
 607    struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
 608
 609    return sprintf(page, "   PCI bus/slot/function:\t%d/%d/%d\n",
 610		   pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
 611}
 612
 613static const struct fore200e_bus fore200e_pci_ops = {
 614	.model_name		= "PCA-200E",
 615	.proc_name		= "pca200e",
 616	.descr_alignment	= 32,
 617	.buffer_alignment	= 4,
 618	.status_alignment	= 32,
 619	.read			= fore200e_pca_read,
 620	.write			= fore200e_pca_write,
 621	.configure		= fore200e_pca_configure,
 622	.map			= fore200e_pca_map,
 623	.reset			= fore200e_pca_reset,
 624	.prom_read		= fore200e_pca_prom_read,
 625	.unmap			= fore200e_pca_unmap,
 626	.irq_check		= fore200e_pca_irq_check,
 627	.irq_ack		= fore200e_pca_irq_ack,
 628	.proc_read		= fore200e_pca_proc_read,
 629};
 630#endif /* CONFIG_PCI */
 631
 632#ifdef CONFIG_SBUS
 633
 634static u32 fore200e_sba_read(volatile u32 __iomem *addr)
 635{
 636    return sbus_readl(addr);
 637}
 638
 639static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
 640{
 641    sbus_writel(val, addr);
 642}
 643
 644static void fore200e_sba_irq_enable(struct fore200e *fore200e)
 645{
 646	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
 647	fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
 648}
 649
 650static int fore200e_sba_irq_check(struct fore200e *fore200e)
 651{
 652	return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
 653}
 654
 655static void fore200e_sba_irq_ack(struct fore200e *fore200e)
 656{
 657	u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
 658	fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
 659}
 660
 661static void fore200e_sba_reset(struct fore200e *fore200e)
 662{
 663	fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
 664	fore200e_spin(10);
 665	fore200e->bus->write(0, fore200e->regs.sba.hcr);
 666}
 667
 668static int __init fore200e_sba_map(struct fore200e *fore200e)
 669{
 670	struct platform_device *op = to_platform_device(fore200e->dev);
 671	unsigned int bursts;
 672
 673	/* gain access to the SBA specific registers  */
 674	fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
 675	fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
 676	fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
 677	fore200e->virt_base    = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
 678
 679	if (!fore200e->virt_base) {
 680		printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
 681		return -EFAULT;
 682	}
 683
 684	DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
 685    
 686	fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
 687
 688	/* get the supported DVMA burst sizes */
 689	bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
 690
 691	if (sbus_can_dma_64bit())
 692		sbus_set_sbus64(&op->dev, bursts);
 693
 694	fore200e->state = FORE200E_STATE_MAP;
 695	return 0;
 696}
 697
 698static void fore200e_sba_unmap(struct fore200e *fore200e)
 699{
 700	struct platform_device *op = to_platform_device(fore200e->dev);
 701
 702	of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
 703	of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
 704	of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
 705	of_iounmap(&op->resource[3], fore200e->virt_base,    SBA200E_RAM_LENGTH);
 706}
 707
 708static int __init fore200e_sba_configure(struct fore200e *fore200e)
 709{
 710	fore200e->state = FORE200E_STATE_CONFIGURE;
 711	return 0;
 712}
 713
 714static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
 715{
 716	struct platform_device *op = to_platform_device(fore200e->dev);
 717	const u8 *prop;
 718	int len;
 719
 720	prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
 721	if (!prop)
 722		return -ENODEV;
 723	memcpy(&prom->mac_addr[4], prop, 4);
 724
 725	prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
 726	if (!prop)
 727		return -ENODEV;
 728	memcpy(&prom->mac_addr[2], prop, 4);
 729
 730	prom->serial_number = of_getintprop_default(op->dev.of_node,
 731						    "serialnumber", 0);
 732	prom->hw_revision = of_getintprop_default(op->dev.of_node,
 733						  "promversion", 0);
 734    
 735	return 0;
 736}
 737
 738static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
 739{
 740	struct platform_device *op = to_platform_device(fore200e->dev);
 741	const struct linux_prom_registers *regs;
 742
 743	regs = of_get_property(op->dev.of_node, "reg", NULL);
 744
 745	return sprintf(page, "   SBUS slot/device:\t\t%d/'%pOFn'\n",
 746		       (regs ? regs->which_io : 0), op->dev.of_node);
 747}
 748
 749static const struct fore200e_bus fore200e_sbus_ops = {
 750	.model_name		= "SBA-200E",
 751	.proc_name		= "sba200e",
 752	.descr_alignment	= 32,
 753	.buffer_alignment	= 64,
 754	.status_alignment	= 32,
 755	.read			= fore200e_sba_read,
 756	.write			= fore200e_sba_write,
 757	.configure		= fore200e_sba_configure,
 758	.map			= fore200e_sba_map,
 759	.reset			= fore200e_sba_reset,
 760	.prom_read		= fore200e_sba_prom_read,
 761	.unmap			= fore200e_sba_unmap,
 762	.irq_enable		= fore200e_sba_irq_enable,
 763	.irq_check		= fore200e_sba_irq_check,
 764	.irq_ack		= fore200e_sba_irq_ack,
 765	.proc_read		= fore200e_sba_proc_read,
 766};
 767#endif /* CONFIG_SBUS */
 768
 769static void
 770fore200e_tx_irq(struct fore200e* fore200e)
 771{
 772    struct host_txq*        txq = &fore200e->host_txq;
 773    struct host_txq_entry*  entry;
 774    struct atm_vcc*         vcc;
 775    struct fore200e_vc_map* vc_map;
 776
 777    if (fore200e->host_txq.txing == 0)
 778	return;
 779
 780    for (;;) {
 781	
 782	entry = &txq->host_entry[ txq->tail ];
 783
 784        if ((*entry->status & STATUS_COMPLETE) == 0) {
 785	    break;
 786	}
 787
 788	DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 
 789		entry, txq->tail, entry->vc_map, entry->skb);
 790
 791	/* free copy of misaligned data */
 792	kfree(entry->data);
 793	
 794	/* remove DMA mapping */
 795	dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
 796				 DMA_TO_DEVICE);
 797
 798	vc_map = entry->vc_map;
 799
 800	/* vcc closed since the time the entry was submitted for tx? */
 801	if ((vc_map->vcc == NULL) ||
 802	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
 803
 804	    DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
 805		    fore200e->atm_dev->number);
 806
 807	    dev_kfree_skb_any(entry->skb);
 808	}
 809	else {
 810	    ASSERT(vc_map->vcc);
 811
 812	    /* vcc closed then immediately re-opened? */
 813	    if (vc_map->incarn != entry->incarn) {
 814
 815		/* when a vcc is closed, some PDUs may be still pending in the tx queue.
 816		   if the same vcc is immediately re-opened, those pending PDUs must
 817		   not be popped after the completion of their emission, as they refer
 818		   to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
 819		   would be decremented by the size of the (unrelated) skb, possibly
 820		   leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
 821		   we thus bind the tx entry to the current incarnation of the vcc
 822		   when the entry is submitted for tx. When the tx later completes,
 823		   if the incarnation number of the tx entry does not match the one
 824		   of the vcc, then this implies that the vcc has been closed then re-opened.
 825		   we thus just drop the skb here. */
 826
 827		DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
 828			fore200e->atm_dev->number);
 829
 830		dev_kfree_skb_any(entry->skb);
 831	    }
 832	    else {
 833		vcc = vc_map->vcc;
 834		ASSERT(vcc);
 835
 836		/* notify tx completion */
 837		if (vcc->pop) {
 838		    vcc->pop(vcc, entry->skb);
 839		}
 840		else {
 841		    dev_kfree_skb_any(entry->skb);
 842		}
 843
 844		/* check error condition */
 845		if (*entry->status & STATUS_ERROR)
 846		    atomic_inc(&vcc->stats->tx_err);
 847		else
 848		    atomic_inc(&vcc->stats->tx);
 849	    }
 850	}
 851
 852	*entry->status = STATUS_FREE;
 853
 854	fore200e->host_txq.txing--;
 855
 856	FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
 857    }
 858}
 859
 860
 861#ifdef FORE200E_BSQ_DEBUG
 862int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
 863{
 864    struct buffer* buffer;
 865    int count = 0;
 866
 867    buffer = bsq->freebuf;
 868    while (buffer) {
 869
 870	if (buffer->supplied) {
 871	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
 872		   where, scheme, magn, buffer->index);
 873	}
 874
 875	if (buffer->magn != magn) {
 876	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
 877		   where, scheme, magn, buffer->index, buffer->magn);
 878	}
 879
 880	if (buffer->scheme != scheme) {
 881	    printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
 882		   where, scheme, magn, buffer->index, buffer->scheme);
 883	}
 884
 885	if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
 886	    printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
 887		   where, scheme, magn, buffer->index);
 888	}
 889
 890	count++;
 891	buffer = buffer->next;
 892    }
 893
 894    if (count != bsq->freebuf_count) {
 895	printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
 896	       where, scheme, magn, count, bsq->freebuf_count);
 897    }
 898    return 0;
 899}
 900#endif
 901
 902
 903static void
 904fore200e_supply(struct fore200e* fore200e)
 905{
 906    int  scheme, magn, i;
 907
 908    struct host_bsq*       bsq;
 909    struct host_bsq_entry* entry;
 910    struct buffer*         buffer;
 911
 912    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
 913	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
 914
 915	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
 916
 917#ifdef FORE200E_BSQ_DEBUG
 918	    bsq_audit(1, bsq, scheme, magn);
 919#endif
 920	    while (bsq->freebuf_count >= RBD_BLK_SIZE) {
 921
 922		DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
 923			RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
 924
 925		entry = &bsq->host_entry[ bsq->head ];
 926
 927		for (i = 0; i < RBD_BLK_SIZE; i++) {
 928
 929		    /* take the first buffer in the free buffer list */
 930		    buffer = bsq->freebuf;
 931		    if (!buffer) {
 932			printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
 933			       scheme, magn, bsq->freebuf_count);
 934			return;
 935		    }
 936		    bsq->freebuf = buffer->next;
 937		    
 938#ifdef FORE200E_BSQ_DEBUG
 939		    if (buffer->supplied)
 940			printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
 941			       scheme, magn, buffer->index);
 942		    buffer->supplied = 1;
 943#endif
 944		    entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
 945		    entry->rbd_block->rbd[ i ].handle       = FORE200E_BUF2HDL(buffer);
 946		}
 947
 948		FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
 949
 950 		/* decrease accordingly the number of free rx buffers */
 951		bsq->freebuf_count -= RBD_BLK_SIZE;
 952
 953		*entry->status = STATUS_PENDING;
 954		fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
 955	    }
 956	}
 957    }
 958}
 959
 960
 961static int
 962fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
 963{
 964    struct sk_buff*      skb;
 965    struct buffer*       buffer;
 966    struct fore200e_vcc* fore200e_vcc;
 967    int                  i, pdu_len = 0;
 968#ifdef FORE200E_52BYTE_AAL0_SDU
 969    u32                  cell_header = 0;
 970#endif
 971
 972    ASSERT(vcc);
 973    
 974    fore200e_vcc = FORE200E_VCC(vcc);
 975    ASSERT(fore200e_vcc);
 976
 977#ifdef FORE200E_52BYTE_AAL0_SDU
 978    if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
 979
 980	cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
 981	              (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
 982                      (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
 983                      (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 
 984                       rpd->atm_header.clp;
 985	pdu_len = 4;
 986    }
 987#endif
 988    
 989    /* compute total PDU length */
 990    for (i = 0; i < rpd->nseg; i++)
 991	pdu_len += rpd->rsd[ i ].length;
 992    
 993    skb = alloc_skb(pdu_len, GFP_ATOMIC);
 994    if (skb == NULL) {
 995	DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
 996
 997	atomic_inc(&vcc->stats->rx_drop);
 998	return -ENOMEM;
 999    } 
1000
1001    __net_timestamp(skb);
1002    
1003#ifdef FORE200E_52BYTE_AAL0_SDU
1004    if (cell_header) {
1005	*((u32*)skb_put(skb, 4)) = cell_header;
1006    }
1007#endif
1008
1009    /* reassemble segments */
1010    for (i = 0; i < rpd->nseg; i++) {
1011	
1012	/* rebuild rx buffer address from rsd handle */
1013	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1014	
1015	/* Make device DMA transfer visible to CPU.  */
1016	dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
1017				rpd->rsd[i].length, DMA_FROM_DEVICE);
1018	
1019	skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1020
1021	/* Now let the device get at it again.  */
1022	dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
1023				   rpd->rsd[i].length, DMA_FROM_DEVICE);
1024    }
1025
1026    DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1027    
1028    if (pdu_len < fore200e_vcc->rx_min_pdu)
1029	fore200e_vcc->rx_min_pdu = pdu_len;
1030    if (pdu_len > fore200e_vcc->rx_max_pdu)
1031	fore200e_vcc->rx_max_pdu = pdu_len;
1032    fore200e_vcc->rx_pdu++;
1033
1034    /* push PDU */
1035    if (atm_charge(vcc, skb->truesize) == 0) {
1036
1037	DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1038		vcc->itf, vcc->vpi, vcc->vci);
1039
1040	dev_kfree_skb_any(skb);
1041
1042	atomic_inc(&vcc->stats->rx_drop);
1043	return -ENOMEM;
1044    }
1045
1046    vcc->push(vcc, skb);
1047    atomic_inc(&vcc->stats->rx);
1048
1049    return 0;
1050}
1051
1052
1053static void
1054fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1055{
1056    struct host_bsq* bsq;
1057    struct buffer*   buffer;
1058    int              i;
1059    
1060    for (i = 0; i < rpd->nseg; i++) {
1061
1062	/* rebuild rx buffer address from rsd handle */
1063	buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1064
1065	bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1066
1067#ifdef FORE200E_BSQ_DEBUG
1068	bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1069
1070	if (buffer->supplied == 0)
1071	    printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1072		   buffer->scheme, buffer->magn, buffer->index);
1073	buffer->supplied = 0;
1074#endif
1075
1076	/* re-insert the buffer into the free buffer list */
1077	buffer->next = bsq->freebuf;
1078	bsq->freebuf = buffer;
1079
1080	/* then increment the number of free rx buffers */
1081	bsq->freebuf_count++;
1082    }
1083}
1084
1085
1086static void
1087fore200e_rx_irq(struct fore200e* fore200e)
1088{
1089    struct host_rxq*        rxq = &fore200e->host_rxq;
1090    struct host_rxq_entry*  entry;
1091    struct atm_vcc*         vcc;
1092    struct fore200e_vc_map* vc_map;
1093
1094    for (;;) {
1095	
1096	entry = &rxq->host_entry[ rxq->head ];
1097
1098	/* no more received PDUs */
1099	if ((*entry->status & STATUS_COMPLETE) == 0)
1100	    break;
1101
1102	vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1103
1104	if ((vc_map->vcc == NULL) ||
1105	    (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1106
1107	    DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1108		    fore200e->atm_dev->number,
1109		    entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1110	}
1111	else {
1112	    vcc = vc_map->vcc;
1113	    ASSERT(vcc);
1114
1115	    if ((*entry->status & STATUS_ERROR) == 0) {
1116
1117		fore200e_push_rpd(fore200e, vcc, entry->rpd);
1118	    }
1119	    else {
1120		DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1121			fore200e->atm_dev->number,
1122			entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1123		atomic_inc(&vcc->stats->rx_err);
1124	    }
1125	}
1126
1127	FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1128
1129	fore200e_collect_rpd(fore200e, entry->rpd);
1130
1131	/* rewrite the rpd address to ack the received PDU */
1132	fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1133	*entry->status = STATUS_FREE;
1134
1135	fore200e_supply(fore200e);
1136    }
1137}
1138
1139
1140#ifndef FORE200E_USE_TASKLET
1141static void
1142fore200e_irq(struct fore200e* fore200e)
1143{
1144    unsigned long flags;
1145
1146    spin_lock_irqsave(&fore200e->q_lock, flags);
1147    fore200e_rx_irq(fore200e);
1148    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1149
1150    spin_lock_irqsave(&fore200e->q_lock, flags);
1151    fore200e_tx_irq(fore200e);
1152    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1153}
1154#endif
1155
1156
1157static irqreturn_t
1158fore200e_interrupt(int irq, void* dev)
1159{
1160    struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1161
1162    if (fore200e->bus->irq_check(fore200e) == 0) {
1163	
1164	DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1165	return IRQ_NONE;
1166    }
1167    DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1168
1169#ifdef FORE200E_USE_TASKLET
1170    tasklet_schedule(&fore200e->tx_tasklet);
1171    tasklet_schedule(&fore200e->rx_tasklet);
1172#else
1173    fore200e_irq(fore200e);
1174#endif
1175    
1176    fore200e->bus->irq_ack(fore200e);
1177    return IRQ_HANDLED;
1178}
1179
1180
1181#ifdef FORE200E_USE_TASKLET
1182static void
1183fore200e_tx_tasklet(unsigned long data)
1184{
1185    struct fore200e* fore200e = (struct fore200e*) data;
1186    unsigned long flags;
1187
1188    DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1189
1190    spin_lock_irqsave(&fore200e->q_lock, flags);
1191    fore200e_tx_irq(fore200e);
1192    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1193}
1194
1195
1196static void
1197fore200e_rx_tasklet(unsigned long data)
1198{
1199    struct fore200e* fore200e = (struct fore200e*) data;
1200    unsigned long    flags;
1201
1202    DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1203
1204    spin_lock_irqsave(&fore200e->q_lock, flags);
1205    fore200e_rx_irq((struct fore200e*) data);
1206    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1207}
1208#endif
1209
1210
1211static int
1212fore200e_select_scheme(struct atm_vcc* vcc)
1213{
1214    /* fairly balance the VCs over (identical) buffer schemes */
1215    int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1216
1217    DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1218	    vcc->itf, vcc->vpi, vcc->vci, scheme);
1219
1220    return scheme;
1221}
1222
1223
1224static int 
1225fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1226{
1227    struct host_cmdq*        cmdq  = &fore200e->host_cmdq;
1228    struct host_cmdq_entry*  entry = &cmdq->host_entry[ cmdq->head ];
1229    struct activate_opcode   activ_opcode;
1230    struct deactivate_opcode deactiv_opcode;
1231    struct vpvc              vpvc;
1232    int                      ok;
1233    enum fore200e_aal        aal = fore200e_atm2fore_aal(vcc->qos.aal);
1234
1235    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1236    
1237    if (activate) {
1238	FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1239	
1240	activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1241	activ_opcode.aal    = aal;
1242	activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1243	activ_opcode.pad    = 0;
1244    }
1245    else {
1246	deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1247	deactiv_opcode.pad    = 0;
1248    }
1249
1250    vpvc.vci = vcc->vci;
1251    vpvc.vpi = vcc->vpi;
1252
1253    *entry->status = STATUS_PENDING;
1254
1255    if (activate) {
1256
1257#ifdef FORE200E_52BYTE_AAL0_SDU
1258	mtu = 48;
1259#endif
1260	/* the MTU is not used by the cp, except in the case of AAL0 */
1261	fore200e->bus->write(mtu,                        &entry->cp_entry->cmd.activate_block.mtu);
1262	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1263	fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1264    }
1265    else {
1266	fore200e->bus->write(*(u32*)&vpvc,         (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1267	fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1268    }
1269
1270    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1271
1272    *entry->status = STATUS_FREE;
1273
1274    if (ok == 0) {
1275	printk(FORE200E "unable to %s VC %d.%d.%d\n",
1276	       activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1277	return -EIO;
1278    }
1279
1280    DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 
1281	    activate ? "open" : "clos");
1282
1283    return 0;
1284}
1285
1286
1287#define FORE200E_MAX_BACK2BACK_CELLS 255    /* XXX depends on CDVT */
1288
1289static void
1290fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1291{
1292    if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1293    
1294	/* compute the data cells to idle cells ratio from the tx PCR */
1295	rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1296	rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1297    }
1298    else {
1299	/* disable rate control */
1300	rate->data_cells = rate->idle_cells = 0;
1301    }
1302}
1303
1304
1305static int
1306fore200e_open(struct atm_vcc *vcc)
1307{
1308    struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1309    struct fore200e_vcc*    fore200e_vcc;
1310    struct fore200e_vc_map* vc_map;
1311    unsigned long	    flags;
1312    int			    vci = vcc->vci;
1313    short		    vpi = vcc->vpi;
1314
1315    ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1316    ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1317
1318    spin_lock_irqsave(&fore200e->q_lock, flags);
1319
1320    vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1321    if (vc_map->vcc) {
1322
1323	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1324
1325	printk(FORE200E "VC %d.%d.%d already in use\n",
1326	       fore200e->atm_dev->number, vpi, vci);
1327
1328	return -EINVAL;
1329    }
1330
1331    vc_map->vcc = vcc;
1332
1333    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1334
1335    fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1336    if (fore200e_vcc == NULL) {
1337	vc_map->vcc = NULL;
1338	return -ENOMEM;
1339    }
1340
1341    DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1342	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1343	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1344	    fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1345	    vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1346	    fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1347	    vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1348    
1349    /* pseudo-CBR bandwidth requested? */
1350    if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1351	
1352	mutex_lock(&fore200e->rate_mtx);
1353	if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1354	    mutex_unlock(&fore200e->rate_mtx);
1355
1356	    kfree(fore200e_vcc);
1357	    vc_map->vcc = NULL;
1358	    return -EAGAIN;
1359	}
1360
1361	/* reserve bandwidth */
1362	fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1363	mutex_unlock(&fore200e->rate_mtx);
1364    }
1365    
1366    vcc->itf = vcc->dev->number;
1367
1368    set_bit(ATM_VF_PARTIAL,&vcc->flags);
1369    set_bit(ATM_VF_ADDR, &vcc->flags);
1370
1371    vcc->dev_data = fore200e_vcc;
1372    
1373    if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1374
1375	vc_map->vcc = NULL;
1376
1377	clear_bit(ATM_VF_ADDR, &vcc->flags);
1378	clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1379
1380	vcc->dev_data = NULL;
1381
1382	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1383
1384	kfree(fore200e_vcc);
1385	return -EINVAL;
1386    }
1387    
1388    /* compute rate control parameters */
1389    if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1390	
1391	fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1392	set_bit(ATM_VF_HASQOS, &vcc->flags);
1393
1394	DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1395		vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1396		vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 
1397		fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1398    }
1399    
1400    fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1401    fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1402    fore200e_vcc->tx_pdu     = fore200e_vcc->rx_pdu     = 0;
1403
1404    /* new incarnation of the vcc */
1405    vc_map->incarn = ++fore200e->incarn_count;
1406
1407    /* VC unusable before this flag is set */
1408    set_bit(ATM_VF_READY, &vcc->flags);
1409
1410    return 0;
1411}
1412
1413
1414static void
1415fore200e_close(struct atm_vcc* vcc)
1416{
1417    struct fore200e*        fore200e = FORE200E_DEV(vcc->dev);
1418    struct fore200e_vcc*    fore200e_vcc;
 
1419    struct fore200e_vc_map* vc_map;
1420    unsigned long           flags;
1421
1422    ASSERT(vcc);
 
 
1423    ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1424    ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1425
1426    DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1427
1428    clear_bit(ATM_VF_READY, &vcc->flags);
1429
1430    fore200e_activate_vcin(fore200e, 0, vcc, 0);
1431
1432    spin_lock_irqsave(&fore200e->q_lock, flags);
1433
1434    vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1435
1436    /* the vc is no longer considered as "in use" by fore200e_open() */
1437    vc_map->vcc = NULL;
1438
1439    vcc->itf = vcc->vci = vcc->vpi = 0;
1440
1441    fore200e_vcc = FORE200E_VCC(vcc);
1442    vcc->dev_data = NULL;
1443
1444    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1445
1446    /* release reserved bandwidth, if any */
1447    if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1448
1449	mutex_lock(&fore200e->rate_mtx);
1450	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1451	mutex_unlock(&fore200e->rate_mtx);
1452
1453	clear_bit(ATM_VF_HASQOS, &vcc->flags);
1454    }
1455
1456    clear_bit(ATM_VF_ADDR, &vcc->flags);
1457    clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1458
1459    ASSERT(fore200e_vcc);
1460    kfree(fore200e_vcc);
1461}
1462
1463
1464static int
1465fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1466{
1467    struct fore200e*        fore200e     = FORE200E_DEV(vcc->dev);
1468    struct fore200e_vcc*    fore200e_vcc = FORE200E_VCC(vcc);
1469    struct fore200e_vc_map* vc_map;
1470    struct host_txq*        txq          = &fore200e->host_txq;
1471    struct host_txq_entry*  entry;
1472    struct tpd*             tpd;
1473    struct tpd_haddr        tpd_haddr;
1474    int                     retry        = CONFIG_ATM_FORE200E_TX_RETRY;
1475    int                     tx_copy      = 0;
1476    int                     tx_len       = skb->len;
1477    u32*                    cell_header  = NULL;
1478    unsigned char*          skb_data;
1479    int                     skb_len;
1480    unsigned char*          data;
1481    unsigned long           flags;
1482
1483    ASSERT(vcc);
1484    ASSERT(fore200e);
1485    ASSERT(fore200e_vcc);
 
 
 
 
 
 
 
 
 
1486
1487    if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1488	DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1489	dev_kfree_skb_any(skb);
1490	return -EINVAL;
1491    }
1492
1493#ifdef FORE200E_52BYTE_AAL0_SDU
1494    if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1495	cell_header = (u32*) skb->data;
1496	skb_data    = skb->data + 4;    /* skip 4-byte cell header */
1497	skb_len     = tx_len = skb->len  - 4;
1498
1499	DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1500    }
1501    else 
1502#endif
1503    {
1504	skb_data = skb->data;
1505	skb_len  = skb->len;
1506    }
1507    
1508    if (((unsigned long)skb_data) & 0x3) {
1509
1510	DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1511	tx_copy = 1;
1512	tx_len  = skb_len;
1513    }
1514
1515    if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1516
1517        /* this simply NUKES the PCA board */
1518	DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1519	tx_copy = 1;
1520	tx_len  = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1521    }
1522    
1523    if (tx_copy) {
1524	data = kmalloc(tx_len, GFP_ATOMIC);
1525	if (data == NULL) {
1526	    if (vcc->pop) {
1527		vcc->pop(vcc, skb);
1528	    }
1529	    else {
1530		dev_kfree_skb_any(skb);
1531	    }
1532	    return -ENOMEM;
1533	}
1534
1535	memcpy(data, skb_data, skb_len);
1536	if (skb_len < tx_len)
1537	    memset(data + skb_len, 0x00, tx_len - skb_len);
1538    }
1539    else {
1540	data = skb_data;
1541    }
1542
1543    vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1544    ASSERT(vc_map->vcc == vcc);
1545
1546  retry_here:
1547
1548    spin_lock_irqsave(&fore200e->q_lock, flags);
1549
1550    entry = &txq->host_entry[ txq->head ];
1551
1552    if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1553
1554	/* try to free completed tx queue entries */
1555	fore200e_tx_irq(fore200e);
1556
1557	if (*entry->status != STATUS_FREE) {
1558
1559	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1560
1561	    /* retry once again? */
1562	    if (--retry > 0) {
1563		udelay(50);
1564		goto retry_here;
1565	    }
1566
1567	    atomic_inc(&vcc->stats->tx_err);
1568
1569	    fore200e->tx_sat++;
1570	    DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1571		    fore200e->name, fore200e->cp_queues->heartbeat);
1572	    if (vcc->pop) {
1573		vcc->pop(vcc, skb);
1574	    }
1575	    else {
1576		dev_kfree_skb_any(skb);
1577	    }
1578
1579	    if (tx_copy)
1580		kfree(data);
1581
1582	    return -ENOBUFS;
1583	}
1584    }
1585
1586    entry->incarn = vc_map->incarn;
1587    entry->vc_map = vc_map;
1588    entry->skb    = skb;
1589    entry->data   = tx_copy ? data : NULL;
1590
1591    tpd = entry->tpd;
1592    tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
1593					  DMA_TO_DEVICE);
1594    if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
1595	if (tx_copy)
1596	    kfree(data);
1597	spin_unlock_irqrestore(&fore200e->q_lock, flags);
1598	return -ENOMEM;
1599    }
1600    tpd->tsd[ 0 ].length = tx_len;
1601
1602    FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1603    txq->txing++;
1604
1605    /* The dma_map call above implies a dma_sync so the device can use it,
1606     * thus no explicit dma_sync call is necessary here.
1607     */
1608    
1609    DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 
1610	    vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1611	    tpd->tsd[0].length, skb_len);
1612
1613    if (skb_len < fore200e_vcc->tx_min_pdu)
1614	fore200e_vcc->tx_min_pdu = skb_len;
1615    if (skb_len > fore200e_vcc->tx_max_pdu)
1616	fore200e_vcc->tx_max_pdu = skb_len;
1617    fore200e_vcc->tx_pdu++;
1618
1619    /* set tx rate control information */
1620    tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1621    tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1622
1623    if (cell_header) {
1624	tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1625	tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1626	tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1627	tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1628	tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1629    }
1630    else {
1631	/* set the ATM header, common to all cells conveying the PDU */
1632	tpd->atm_header.clp = 0;
1633	tpd->atm_header.plt = 0;
1634	tpd->atm_header.vci = vcc->vci;
1635	tpd->atm_header.vpi = vcc->vpi;
1636	tpd->atm_header.gfc = 0;
1637    }
1638
1639    tpd->spec.length = tx_len;
1640    tpd->spec.nseg   = 1;
1641    tpd->spec.aal    = fore200e_atm2fore_aal(vcc->qos.aal);
1642    tpd->spec.intr   = 1;
1643
1644    tpd_haddr.size  = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT);  /* size is expressed in 32 byte blocks */
1645    tpd_haddr.pad   = 0;
1646    tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT;          /* shift the address, as we are in a bitfield */
1647
1648    *entry->status = STATUS_PENDING;
1649    fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1650
1651    spin_unlock_irqrestore(&fore200e->q_lock, flags);
1652
1653    return 0;
1654}
1655
1656
1657static int
1658fore200e_getstats(struct fore200e* fore200e)
1659{
1660    struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1661    struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1662    struct stats_opcode     opcode;
1663    int                     ok;
1664    u32                     stats_dma_addr;
1665
1666    if (fore200e->stats == NULL) {
1667	fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
1668	if (fore200e->stats == NULL)
1669	    return -ENOMEM;
1670    }
1671    
1672    stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
1673				    sizeof(struct stats), DMA_FROM_DEVICE);
1674    if (dma_mapping_error(fore200e->dev, stats_dma_addr))
1675    	return -ENOMEM;
1676    
1677    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1678
1679    opcode.opcode = OPCODE_GET_STATS;
1680    opcode.pad    = 0;
1681
1682    fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1683    
1684    *entry->status = STATUS_PENDING;
1685
1686    fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1687
1688    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1689
1690    *entry->status = STATUS_FREE;
1691
1692    dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1693    
1694    if (ok == 0) {
1695	printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1696	return -EIO;
1697    }
1698
1699    return 0;
1700}
1701
1702
1703static int
1704fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1705{
1706    /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1707
1708    DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1709	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1710
1711    return -EINVAL;
1712}
1713
1714
1715static int
1716fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1717{
1718    /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1719    
1720    DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1721	    vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1722    
1723    return -EINVAL;
1724}
1725
1726
1727#if 0 /* currently unused */
1728static int
1729fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1730{
1731    struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1732    struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1733    struct oc3_opcode       opcode;
1734    int                     ok;
1735    u32                     oc3_regs_dma_addr;
1736
1737    oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1738
1739    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1740
1741    opcode.opcode = OPCODE_GET_OC3;
1742    opcode.reg    = 0;
1743    opcode.value  = 0;
1744    opcode.mask   = 0;
1745
1746    fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1747    
1748    *entry->status = STATUS_PENDING;
1749
1750    fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1751
1752    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1753
1754    *entry->status = STATUS_FREE;
1755
1756    fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1757    
1758    if (ok == 0) {
1759	printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1760	return -EIO;
1761    }
1762
1763    return 0;
1764}
1765#endif
1766
1767
1768static int
1769fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1770{
1771    struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
1772    struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1773    struct oc3_opcode       opcode;
1774    int                     ok;
1775
1776    DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1777
1778    FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1779
1780    opcode.opcode = OPCODE_SET_OC3;
1781    opcode.reg    = reg;
1782    opcode.value  = value;
1783    opcode.mask   = mask;
1784
1785    fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1786    
1787    *entry->status = STATUS_PENDING;
1788
1789    fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1790
1791    ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1792
1793    *entry->status = STATUS_FREE;
1794
1795    if (ok == 0) {
1796	printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1797	return -EIO;
1798    }
1799
1800    return 0;
1801}
1802
1803
1804static int
1805fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1806{
1807    u32 mct_value, mct_mask;
1808    int error;
1809
1810    if (!capable(CAP_NET_ADMIN))
1811	return -EPERM;
1812    
1813    switch (loop_mode) {
1814
1815    case ATM_LM_NONE:
1816	mct_value = 0; 
1817	mct_mask  = SUNI_MCT_DLE | SUNI_MCT_LLE;
1818	break;
1819	
1820    case ATM_LM_LOC_PHY:
1821	mct_value = mct_mask = SUNI_MCT_DLE;
1822	break;
1823
1824    case ATM_LM_RMT_PHY:
1825	mct_value = mct_mask = SUNI_MCT_LLE;
1826	break;
1827
1828    default:
1829	return -EINVAL;
1830    }
1831
1832    error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1833    if (error == 0)
1834	fore200e->loop_mode = loop_mode;
1835
1836    return error;
1837}
1838
1839
1840static int
1841fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1842{
1843    struct sonet_stats tmp;
1844
1845    if (fore200e_getstats(fore200e) < 0)
1846	return -EIO;
1847
1848    tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1849    tmp.line_bip    = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1850    tmp.path_bip    = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1851    tmp.line_febe   = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1852    tmp.path_febe   = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1853    tmp.corr_hcs    = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1854    tmp.uncorr_hcs  = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1855    tmp.tx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_transmitted)  +
1856	              be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1857	              be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1858    tmp.rx_cells    = be32_to_cpu(fore200e->stats->aal0.cells_received)     +
1859	              be32_to_cpu(fore200e->stats->aal34.cells_received)    +
1860	              be32_to_cpu(fore200e->stats->aal5.cells_received);
1861
1862    if (arg)
1863	return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;	
1864    
1865    return 0;
1866}
1867
1868
1869static int
1870fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1871{
1872    struct fore200e* fore200e = FORE200E_DEV(dev);
1873    
1874    DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1875
1876    switch (cmd) {
1877
1878    case SONET_GETSTAT:
1879	return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1880
1881    case SONET_GETDIAG:
1882	return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1883
1884    case ATM_SETLOOP:
1885	return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1886
1887    case ATM_GETLOOP:
1888	return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1889
1890    case ATM_QUERYLOOP:
1891	return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1892    }
1893
1894    return -ENOSYS; /* not implemented */
1895}
1896
1897
1898static int
1899fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1900{
1901    struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1902    struct fore200e*     fore200e     = FORE200E_DEV(vcc->dev);
1903
1904    if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1905	DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1906	return -EINVAL;
1907    }
1908
1909    DPRINTK(2, "change_qos %d.%d.%d, "
1910	    "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1911	    "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1912	    "available_cell_rate = %u",
1913	    vcc->itf, vcc->vpi, vcc->vci,
1914	    fore200e_traffic_class[ qos->txtp.traffic_class ],
1915	    qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1916	    fore200e_traffic_class[ qos->rxtp.traffic_class ],
1917	    qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
1918	    flags, fore200e->available_cell_rate);
1919
1920    if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
1921
1922	mutex_lock(&fore200e->rate_mtx);
1923	if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
1924	    mutex_unlock(&fore200e->rate_mtx);
1925	    return -EAGAIN;
1926	}
1927
1928	fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1929	fore200e->available_cell_rate -= qos->txtp.max_pcr;
1930
1931	mutex_unlock(&fore200e->rate_mtx);
1932	
1933	memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
1934	
1935	/* update rate control parameters */
1936	fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
1937
1938	set_bit(ATM_VF_HASQOS, &vcc->flags);
1939
1940	return 0;
1941    }
1942    
1943    return -EINVAL;
1944}
1945    
1946
1947static int fore200e_irq_request(struct fore200e *fore200e)
1948{
1949    if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
1950
1951	printk(FORE200E "unable to reserve IRQ %s for device %s\n",
1952	       fore200e_irq_itoa(fore200e->irq), fore200e->name);
1953	return -EBUSY;
1954    }
1955
1956    printk(FORE200E "IRQ %s reserved for device %s\n",
1957	   fore200e_irq_itoa(fore200e->irq), fore200e->name);
1958
1959#ifdef FORE200E_USE_TASKLET
1960    tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
1961    tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
1962#endif
1963
1964    fore200e->state = FORE200E_STATE_IRQ;
1965    return 0;
1966}
1967
1968
1969static int fore200e_get_esi(struct fore200e *fore200e)
1970{
1971    struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
1972    int ok, i;
1973
1974    if (!prom)
1975	return -ENOMEM;
1976
1977    ok = fore200e->bus->prom_read(fore200e, prom);
1978    if (ok < 0) {
1979	kfree(prom);
1980	return -EBUSY;
1981    }
1982	
1983    printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
1984	   fore200e->name, 
1985	   (prom->hw_revision & 0xFF) + '@',    /* probably meaningless with SBA boards */
1986	   prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
1987	
1988    for (i = 0; i < ESI_LEN; i++) {
1989	fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
1990    }
1991    
1992    kfree(prom);
1993
1994    return 0;
1995}
1996
1997
1998static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
1999{
2000    int scheme, magn, nbr, size, i;
2001
2002    struct host_bsq* bsq;
2003    struct buffer*   buffer;
2004
2005    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2006	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2007
2008	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2009
2010	    nbr  = fore200e_rx_buf_nbr[ scheme ][ magn ];
2011	    size = fore200e_rx_buf_size[ scheme ][ magn ];
2012
2013	    DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2014
2015	    /* allocate the array of receive buffers */
2016	    buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
2017                                           GFP_KERNEL);
2018
2019	    if (buffer == NULL)
2020		return -ENOMEM;
2021
2022	    bsq->freebuf = NULL;
2023
2024	    for (i = 0; i < nbr; i++) {
2025
2026		buffer[ i ].scheme = scheme;
2027		buffer[ i ].magn   = magn;
2028#ifdef FORE200E_BSQ_DEBUG
2029		buffer[ i ].index  = i;
2030		buffer[ i ].supplied = 0;
2031#endif
2032
2033		/* allocate the receive buffer body */
2034		if (fore200e_chunk_alloc(fore200e,
2035					 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2036					 DMA_FROM_DEVICE) < 0) {
2037		    
2038		    while (i > 0)
2039			fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2040		    kfree(buffer);
2041		    
2042		    return -ENOMEM;
2043		}
2044
2045		/* insert the buffer into the free buffer list */
2046		buffer[ i ].next = bsq->freebuf;
2047		bsq->freebuf = &buffer[ i ];
2048	    }
2049	    /* all the buffers are free, initially */
2050	    bsq->freebuf_count = nbr;
2051
2052#ifdef FORE200E_BSQ_DEBUG
2053	    bsq_audit(3, bsq, scheme, magn);
2054#endif
2055	}
2056    }
2057
2058    fore200e->state = FORE200E_STATE_ALLOC_BUF;
2059    return 0;
2060}
2061
2062
2063static int fore200e_init_bs_queue(struct fore200e *fore200e)
2064{
2065    int scheme, magn, i;
2066
2067    struct host_bsq*     bsq;
2068    struct cp_bsq_entry __iomem * cp_entry;
2069
2070    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2071	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2072
2073	    DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2074
2075	    bsq = &fore200e->host_bsq[ scheme ][ magn ];
2076
2077	    /* allocate and align the array of status words */
2078	    if (fore200e_dma_chunk_alloc(fore200e,
2079					       &bsq->status,
2080					       sizeof(enum status), 
2081					       QUEUE_SIZE_BS,
2082					       fore200e->bus->status_alignment) < 0) {
2083		return -ENOMEM;
2084	    }
2085
2086	    /* allocate and align the array of receive buffer descriptors */
2087	    if (fore200e_dma_chunk_alloc(fore200e,
2088					       &bsq->rbd_block,
2089					       sizeof(struct rbd_block),
2090					       QUEUE_SIZE_BS,
2091					       fore200e->bus->descr_alignment) < 0) {
2092		
2093		fore200e_dma_chunk_free(fore200e, &bsq->status);
2094		return -ENOMEM;
2095	    }
2096	    
2097	    /* get the base address of the cp resident buffer supply queue entries */
2098	    cp_entry = fore200e->virt_base + 
2099		       fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2100	    
2101	    /* fill the host resident and cp resident buffer supply queue entries */
2102	    for (i = 0; i < QUEUE_SIZE_BS; i++) {
2103		
2104		bsq->host_entry[ i ].status = 
2105		                     FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2106	        bsq->host_entry[ i ].rbd_block =
2107		                     FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2108		bsq->host_entry[ i ].rbd_block_dma =
2109		                     FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2110		bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2111		
2112		*bsq->host_entry[ i ].status = STATUS_FREE;
2113		
2114		fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 
2115				     &cp_entry[ i ].status_haddr);
2116	    }
2117	}
2118    }
2119
2120    fore200e->state = FORE200E_STATE_INIT_BSQ;
2121    return 0;
2122}
2123
2124
2125static int fore200e_init_rx_queue(struct fore200e *fore200e)
2126{
2127    struct host_rxq*     rxq =  &fore200e->host_rxq;
2128    struct cp_rxq_entry __iomem * cp_entry;
2129    int i;
2130
2131    DPRINTK(2, "receive queue is being initialized\n");
2132
2133    /* allocate and align the array of status words */
2134    if (fore200e_dma_chunk_alloc(fore200e,
2135				       &rxq->status,
2136				       sizeof(enum status), 
2137				       QUEUE_SIZE_RX,
2138				       fore200e->bus->status_alignment) < 0) {
2139	return -ENOMEM;
2140    }
2141
2142    /* allocate and align the array of receive PDU descriptors */
2143    if (fore200e_dma_chunk_alloc(fore200e,
2144				       &rxq->rpd,
2145				       sizeof(struct rpd), 
2146				       QUEUE_SIZE_RX,
2147				       fore200e->bus->descr_alignment) < 0) {
2148	
2149	fore200e_dma_chunk_free(fore200e, &rxq->status);
2150	return -ENOMEM;
2151    }
2152
2153    /* get the base address of the cp resident rx queue entries */
2154    cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2155
2156    /* fill the host resident and cp resident rx entries */
2157    for (i=0; i < QUEUE_SIZE_RX; i++) {
2158	
2159	rxq->host_entry[ i ].status = 
2160	                     FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2161	rxq->host_entry[ i ].rpd = 
2162	                     FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2163	rxq->host_entry[ i ].rpd_dma = 
2164	                     FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2165	rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2166
2167	*rxq->host_entry[ i ].status = STATUS_FREE;
2168
2169	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 
2170			     &cp_entry[ i ].status_haddr);
2171
2172	fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2173			     &cp_entry[ i ].rpd_haddr);
2174    }
2175
2176    /* set the head entry of the queue */
2177    rxq->head = 0;
2178
2179    fore200e->state = FORE200E_STATE_INIT_RXQ;
2180    return 0;
2181}
2182
2183
2184static int fore200e_init_tx_queue(struct fore200e *fore200e)
2185{
2186    struct host_txq*     txq =  &fore200e->host_txq;
2187    struct cp_txq_entry __iomem * cp_entry;
2188    int i;
2189
2190    DPRINTK(2, "transmit queue is being initialized\n");
2191
2192    /* allocate and align the array of status words */
2193    if (fore200e_dma_chunk_alloc(fore200e,
2194				       &txq->status,
2195				       sizeof(enum status), 
2196				       QUEUE_SIZE_TX,
2197				       fore200e->bus->status_alignment) < 0) {
2198	return -ENOMEM;
2199    }
2200
2201    /* allocate and align the array of transmit PDU descriptors */
2202    if (fore200e_dma_chunk_alloc(fore200e,
2203				       &txq->tpd,
2204				       sizeof(struct tpd), 
2205				       QUEUE_SIZE_TX,
2206				       fore200e->bus->descr_alignment) < 0) {
2207	
2208	fore200e_dma_chunk_free(fore200e, &txq->status);
2209	return -ENOMEM;
2210    }
2211
2212    /* get the base address of the cp resident tx queue entries */
2213    cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2214
2215    /* fill the host resident and cp resident tx entries */
2216    for (i=0; i < QUEUE_SIZE_TX; i++) {
2217	
2218	txq->host_entry[ i ].status = 
2219	                     FORE200E_INDEX(txq->status.align_addr, enum status, i);
2220	txq->host_entry[ i ].tpd = 
2221	                     FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2222	txq->host_entry[ i ].tpd_dma  = 
2223                             FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2224	txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2225
2226	*txq->host_entry[ i ].status = STATUS_FREE;
2227	
2228	fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 
2229			     &cp_entry[ i ].status_haddr);
2230	
2231        /* although there is a one-to-one mapping of tx queue entries and tpds,
2232	   we do not write here the DMA (physical) base address of each tpd into
2233	   the related cp resident entry, because the cp relies on this write
2234	   operation to detect that a new pdu has been submitted for tx */
2235    }
2236
2237    /* set the head and tail entries of the queue */
2238    txq->head = 0;
2239    txq->tail = 0;
2240
2241    fore200e->state = FORE200E_STATE_INIT_TXQ;
2242    return 0;
2243}
2244
2245
2246static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2247{
2248    struct host_cmdq*     cmdq =  &fore200e->host_cmdq;
2249    struct cp_cmdq_entry __iomem * cp_entry;
2250    int i;
2251
2252    DPRINTK(2, "command queue is being initialized\n");
2253
2254    /* allocate and align the array of status words */
2255    if (fore200e_dma_chunk_alloc(fore200e,
2256				       &cmdq->status,
2257				       sizeof(enum status), 
2258				       QUEUE_SIZE_CMD,
2259				       fore200e->bus->status_alignment) < 0) {
2260	return -ENOMEM;
2261    }
2262    
2263    /* get the base address of the cp resident cmd queue entries */
2264    cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2265
2266    /* fill the host resident and cp resident cmd entries */
2267    for (i=0; i < QUEUE_SIZE_CMD; i++) {
2268	
2269	cmdq->host_entry[ i ].status   = 
2270                              FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2271	cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2272
2273	*cmdq->host_entry[ i ].status = STATUS_FREE;
2274
2275	fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 
2276                             &cp_entry[ i ].status_haddr);
2277    }
2278
2279    /* set the head entry of the queue */
2280    cmdq->head = 0;
2281
2282    fore200e->state = FORE200E_STATE_INIT_CMDQ;
2283    return 0;
2284}
2285
2286
2287static void fore200e_param_bs_queue(struct fore200e *fore200e,
2288				    enum buffer_scheme scheme,
2289				    enum buffer_magn magn, int queue_length,
2290				    int pool_size, int supply_blksize)
2291{
2292    struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2293
2294    fore200e->bus->write(queue_length,                           &bs_spec->queue_length);
2295    fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2296    fore200e->bus->write(pool_size,                              &bs_spec->pool_size);
2297    fore200e->bus->write(supply_blksize,                         &bs_spec->supply_blksize);
2298}
2299
2300
2301static int fore200e_initialize(struct fore200e *fore200e)
2302{
2303    struct cp_queues __iomem * cpq;
2304    int               ok, scheme, magn;
2305
2306    DPRINTK(2, "device %s being initialized\n", fore200e->name);
2307
2308    mutex_init(&fore200e->rate_mtx);
2309    spin_lock_init(&fore200e->q_lock);
2310
2311    cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2312
2313    /* enable cp to host interrupts */
2314    fore200e->bus->write(1, &cpq->imask);
2315
2316    if (fore200e->bus->irq_enable)
2317	fore200e->bus->irq_enable(fore200e);
2318    
2319    fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2320
2321    fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2322    fore200e->bus->write(QUEUE_SIZE_RX,  &cpq->init.rx_queue_len);
2323    fore200e->bus->write(QUEUE_SIZE_TX,  &cpq->init.tx_queue_len);
2324
2325    fore200e->bus->write(RSD_EXTENSION,  &cpq->init.rsd_extension);
2326    fore200e->bus->write(TSD_EXTENSION,  &cpq->init.tsd_extension);
2327
2328    for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2329	for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2330	    fore200e_param_bs_queue(fore200e, scheme, magn,
2331				    QUEUE_SIZE_BS, 
2332				    fore200e_rx_buf_nbr[ scheme ][ magn ],
2333				    RBD_BLK_SIZE);
2334
2335    /* issue the initialize command */
2336    fore200e->bus->write(STATUS_PENDING,    &cpq->init.status);
2337    fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2338
2339    ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2340    if (ok == 0) {
2341	printk(FORE200E "device %s initialization failed\n", fore200e->name);
2342	return -ENODEV;
2343    }
2344
2345    printk(FORE200E "device %s initialized\n", fore200e->name);
2346
2347    fore200e->state = FORE200E_STATE_INITIALIZE;
2348    return 0;
2349}
2350
2351
2352static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2353{
2354    struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2355
2356#if 0
2357    printk("%c", c);
2358#endif
2359    fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2360}
2361
2362
2363static int fore200e_monitor_getc(struct fore200e *fore200e)
2364{
2365    struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2366    unsigned long      timeout = jiffies + msecs_to_jiffies(50);
2367    int                c;
2368
2369    while (time_before(jiffies, timeout)) {
2370
2371	c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2372
2373	if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2374
2375	    fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2376#if 0
2377	    printk("%c", c & 0xFF);
2378#endif
2379	    return c & 0xFF;
2380	}
2381    }
2382
2383    return -1;
2384}
2385
2386
2387static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2388{
2389    while (*str) {
2390
2391	/* the i960 monitor doesn't accept any new character if it has something to say */
2392	while (fore200e_monitor_getc(fore200e) >= 0);
2393	
2394	fore200e_monitor_putc(fore200e, *str++);
2395    }
2396
2397    while (fore200e_monitor_getc(fore200e) >= 0);
2398}
2399
2400#ifdef __LITTLE_ENDIAN
2401#define FW_EXT ".bin"
2402#else
2403#define FW_EXT "_ecd.bin2"
2404#endif
2405
2406static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2407{
2408    const struct firmware *firmware;
2409    const struct fw_header *fw_header;
2410    const __le32 *fw_data;
2411    u32 fw_size;
2412    u32 __iomem *load_addr;
2413    char buf[48];
2414    int err;
2415
2416    sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2417    if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
2418	printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2419	return err;
2420    }
2421
2422    fw_data = (const __le32 *)firmware->data;
2423    fw_size = firmware->size / sizeof(u32);
2424    fw_header = (const struct fw_header *)firmware->data;
2425    load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2426
2427    DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2428	    fore200e->name, load_addr, fw_size);
2429
2430    if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2431	printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2432	goto release;
2433    }
2434
2435    for (; fw_size--; fw_data++, load_addr++)
2436	fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2437
2438    DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2439
2440#if defined(__sparc_v9__)
2441    /* reported to be required by SBA cards on some sparc64 hosts */
2442    fore200e_spin(100);
2443#endif
2444
2445    sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2446    fore200e_monitor_puts(fore200e, buf);
2447
2448    if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2449	printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2450	goto release;
2451    }
2452
2453    printk(FORE200E "device %s firmware started\n", fore200e->name);
2454
2455    fore200e->state = FORE200E_STATE_START_FW;
2456    err = 0;
2457
2458release:
2459    release_firmware(firmware);
2460    return err;
2461}
2462
2463
2464static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2465{
2466    struct atm_dev* atm_dev;
2467
2468    DPRINTK(2, "device %s being registered\n", fore200e->name);
2469
2470    atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2471                               -1, NULL);
2472    if (atm_dev == NULL) {
2473	printk(FORE200E "unable to register device %s\n", fore200e->name);
2474	return -ENODEV;
2475    }
2476
2477    atm_dev->dev_data = fore200e;
2478    fore200e->atm_dev = atm_dev;
2479
2480    atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2481    atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2482
2483    fore200e->available_cell_rate = ATM_OC3_PCR;
2484
2485    fore200e->state = FORE200E_STATE_REGISTER;
2486    return 0;
2487}
2488
2489
2490static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2491{
2492    if (fore200e_register(fore200e, parent) < 0)
2493	return -ENODEV;
2494    
2495    if (fore200e->bus->configure(fore200e) < 0)
2496	return -ENODEV;
2497
2498    if (fore200e->bus->map(fore200e) < 0)
2499	return -ENODEV;
2500
2501    if (fore200e_reset(fore200e, 1) < 0)
2502	return -ENODEV;
2503
2504    if (fore200e_load_and_start_fw(fore200e) < 0)
2505	return -ENODEV;
2506
2507    if (fore200e_initialize(fore200e) < 0)
2508	return -ENODEV;
2509
2510    if (fore200e_init_cmd_queue(fore200e) < 0)
2511	return -ENOMEM;
2512
2513    if (fore200e_init_tx_queue(fore200e) < 0)
2514	return -ENOMEM;
2515
2516    if (fore200e_init_rx_queue(fore200e) < 0)
2517	return -ENOMEM;
2518
2519    if (fore200e_init_bs_queue(fore200e) < 0)
2520	return -ENOMEM;
2521
2522    if (fore200e_alloc_rx_buf(fore200e) < 0)
2523	return -ENOMEM;
2524
2525    if (fore200e_get_esi(fore200e) < 0)
2526	return -EIO;
2527
2528    if (fore200e_irq_request(fore200e) < 0)
2529	return -EBUSY;
2530
2531    fore200e_supply(fore200e);
2532
2533    /* all done, board initialization is now complete */
2534    fore200e->state = FORE200E_STATE_COMPLETE;
2535    return 0;
2536}
2537
2538#ifdef CONFIG_SBUS
2539static const struct of_device_id fore200e_sba_match[];
2540static int fore200e_sba_probe(struct platform_device *op)
2541{
2542	const struct of_device_id *match;
2543	struct fore200e *fore200e;
2544	static int index = 0;
2545	int err;
2546
2547	match = of_match_device(fore200e_sba_match, &op->dev);
2548	if (!match)
2549		return -EINVAL;
2550
2551	fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2552	if (!fore200e)
2553		return -ENOMEM;
2554
2555	fore200e->bus = &fore200e_sbus_ops;
2556	fore200e->dev = &op->dev;
2557	fore200e->irq = op->archdata.irqs[0];
2558	fore200e->phys_base = op->resource[0].start;
2559
2560	sprintf(fore200e->name, "SBA-200E-%d", index);
2561
2562	err = fore200e_init(fore200e, &op->dev);
2563	if (err < 0) {
2564		fore200e_shutdown(fore200e);
2565		kfree(fore200e);
2566		return err;
2567	}
2568
2569	index++;
2570	dev_set_drvdata(&op->dev, fore200e);
2571
2572	return 0;
2573}
2574
2575static int fore200e_sba_remove(struct platform_device *op)
2576{
2577	struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2578
2579	fore200e_shutdown(fore200e);
2580	kfree(fore200e);
2581
2582	return 0;
2583}
2584
2585static const struct of_device_id fore200e_sba_match[] = {
2586	{
2587		.name = SBA200E_PROM_NAME,
2588	},
2589	{},
2590};
2591MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2592
2593static struct platform_driver fore200e_sba_driver = {
2594	.driver = {
2595		.name = "fore_200e",
2596		.of_match_table = fore200e_sba_match,
2597	},
2598	.probe		= fore200e_sba_probe,
2599	.remove		= fore200e_sba_remove,
2600};
2601#endif
2602
2603#ifdef CONFIG_PCI
2604static int fore200e_pca_detect(struct pci_dev *pci_dev,
2605			       const struct pci_device_id *pci_ent)
2606{
2607    struct fore200e* fore200e;
2608    int err = 0;
2609    static int index = 0;
2610
2611    if (pci_enable_device(pci_dev)) {
2612	err = -EINVAL;
2613	goto out;
2614    }
2615
2616    if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2617	err = -EINVAL;
2618	goto out;
2619    }
2620    
2621    fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2622    if (fore200e == NULL) {
2623	err = -ENOMEM;
2624	goto out_disable;
2625    }
2626
2627    fore200e->bus       = &fore200e_pci_ops;
2628    fore200e->dev	= &pci_dev->dev;
2629    fore200e->irq       = pci_dev->irq;
2630    fore200e->phys_base = pci_resource_start(pci_dev, 0);
2631
2632    sprintf(fore200e->name, "PCA-200E-%d", index - 1);
2633
2634    pci_set_master(pci_dev);
2635
2636    printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
2637	   fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2638
2639    sprintf(fore200e->name, "PCA-200E-%d", index);
2640
2641    err = fore200e_init(fore200e, &pci_dev->dev);
2642    if (err < 0) {
2643	fore200e_shutdown(fore200e);
2644	goto out_free;
2645    }
2646
2647    ++index;
2648    pci_set_drvdata(pci_dev, fore200e);
2649
2650out:
2651    return err;
2652
2653out_free:
2654    kfree(fore200e);
2655out_disable:
2656    pci_disable_device(pci_dev);
2657    goto out;
2658}
2659
2660
2661static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2662{
2663    struct fore200e *fore200e;
2664
2665    fore200e = pci_get_drvdata(pci_dev);
2666
2667    fore200e_shutdown(fore200e);
2668    kfree(fore200e);
2669    pci_disable_device(pci_dev);
2670}
2671
2672
2673static const struct pci_device_id fore200e_pca_tbl[] = {
2674    { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
2675    { 0, }
2676};
2677
2678MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2679
2680static struct pci_driver fore200e_pca_driver = {
2681    .name =     "fore_200e",
2682    .probe =    fore200e_pca_detect,
2683    .remove =   fore200e_pca_remove_one,
2684    .id_table = fore200e_pca_tbl,
2685};
2686#endif
2687
2688static int __init fore200e_module_init(void)
2689{
2690	int err = 0;
2691
2692	printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2693
2694#ifdef CONFIG_SBUS
2695	err = platform_driver_register(&fore200e_sba_driver);
2696	if (err)
2697		return err;
2698#endif
2699
2700#ifdef CONFIG_PCI
2701	err = pci_register_driver(&fore200e_pca_driver);
2702#endif
2703
2704#ifdef CONFIG_SBUS
2705	if (err)
2706		platform_driver_unregister(&fore200e_sba_driver);
2707#endif
2708
2709	return err;
2710}
2711
2712static void __exit fore200e_module_cleanup(void)
2713{
2714#ifdef CONFIG_PCI
2715	pci_unregister_driver(&fore200e_pca_driver);
2716#endif
2717#ifdef CONFIG_SBUS
2718	platform_driver_unregister(&fore200e_sba_driver);
2719#endif
2720}
2721
2722static int
2723fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2724{
2725    struct fore200e*     fore200e  = FORE200E_DEV(dev);
2726    struct fore200e_vcc* fore200e_vcc;
2727    struct atm_vcc*      vcc;
2728    int                  i, len, left = *pos;
2729    unsigned long        flags;
2730
2731    if (!left--) {
2732
2733	if (fore200e_getstats(fore200e) < 0)
2734	    return -EIO;
2735
2736	len = sprintf(page,"\n"
2737		       " device:\n"
2738		       "   internal name:\t\t%s\n", fore200e->name);
2739
2740	/* print bus-specific information */
2741	if (fore200e->bus->proc_read)
2742	    len += fore200e->bus->proc_read(fore200e, page + len);
2743	
2744	len += sprintf(page + len,
2745		"   interrupt line:\t\t%s\n"
2746		"   physical base address:\t0x%p\n"
2747		"   virtual base address:\t0x%p\n"
2748		"   factory address (ESI):\t%pM\n"
2749		"   board serial number:\t\t%d\n\n",
2750		fore200e_irq_itoa(fore200e->irq),
2751		(void*)fore200e->phys_base,
2752		fore200e->virt_base,
2753		fore200e->esi,
2754		fore200e->esi[4] * 256 + fore200e->esi[5]);
2755
2756	return len;
2757    }
2758
2759    if (!left--)
2760	return sprintf(page,
2761		       "   free small bufs, scheme 1:\t%d\n"
2762		       "   free large bufs, scheme 1:\t%d\n"
2763		       "   free small bufs, scheme 2:\t%d\n"
2764		       "   free large bufs, scheme 2:\t%d\n",
2765		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2766		       fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2767		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2768		       fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2769
2770    if (!left--) {
2771	u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2772
2773	len = sprintf(page,"\n\n"
2774		      " cell processor:\n"
2775		      "   heartbeat state:\t\t");
2776	
2777	if (hb >> 16 != 0xDEAD)
2778	    len += sprintf(page + len, "0x%08x\n", hb);
2779	else
2780	    len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2781
2782	return len;
2783    }
2784
2785    if (!left--) {
2786	static const char* media_name[] = {
2787	    "unshielded twisted pair",
2788	    "multimode optical fiber ST",
2789	    "multimode optical fiber SC",
2790	    "single-mode optical fiber ST",
2791	    "single-mode optical fiber SC",
2792	    "unknown"
2793	};
2794
2795	static const char* oc3_mode[] = {
2796	    "normal operation",
2797	    "diagnostic loopback",
2798	    "line loopback",
2799	    "unknown"
2800	};
2801
2802	u32 fw_release     = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2803	u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2804	u32 oc3_revision   = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2805	u32 media_index    = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2806	u32 oc3_index;
2807
2808	if (media_index > 4)
2809		media_index = 5;
2810	
2811	switch (fore200e->loop_mode) {
2812	    case ATM_LM_NONE:    oc3_index = 0;
2813		                 break;
2814	    case ATM_LM_LOC_PHY: oc3_index = 1;
2815		                 break;
2816	    case ATM_LM_RMT_PHY: oc3_index = 2;
2817		                 break;
2818	    default:             oc3_index = 3;
2819	}
2820
2821	return sprintf(page,
2822		       "   firmware release:\t\t%d.%d.%d\n"
2823		       "   monitor release:\t\t%d.%d\n"
2824		       "   media type:\t\t\t%s\n"
2825		       "   OC-3 revision:\t\t0x%x\n"
2826                       "   OC-3 mode:\t\t\t%s",
2827		       fw_release >> 16, fw_release << 16 >> 24,  fw_release << 24 >> 24,
2828		       mon960_release >> 16, mon960_release << 16 >> 16,
2829		       media_name[ media_index ],
2830		       oc3_revision,
2831		       oc3_mode[ oc3_index ]);
2832    }
2833
2834    if (!left--) {
2835	struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2836
2837	return sprintf(page,
2838		       "\n\n"
2839		       " monitor:\n"
2840		       "   version number:\t\t%d\n"
2841		       "   boot status word:\t\t0x%08x\n",
2842		       fore200e->bus->read(&cp_monitor->mon_version),
2843		       fore200e->bus->read(&cp_monitor->bstat));
2844    }
2845
2846    if (!left--)
2847	return sprintf(page,
2848		       "\n"
2849		       " device statistics:\n"
2850		       "  4b5b:\n"
2851		       "     crc_header_errors:\t\t%10u\n"
2852		       "     framing_errors:\t\t%10u\n",
2853		       be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2854		       be32_to_cpu(fore200e->stats->phy.framing_errors));
2855    
2856    if (!left--)
2857	return sprintf(page, "\n"
2858		       "  OC-3:\n"
2859		       "     section_bip8_errors:\t%10u\n"
2860		       "     path_bip8_errors:\t\t%10u\n"
2861		       "     line_bip24_errors:\t\t%10u\n"
2862		       "     line_febe_errors:\t\t%10u\n"
2863		       "     path_febe_errors:\t\t%10u\n"
2864		       "     corr_hcs_errors:\t\t%10u\n"
2865		       "     ucorr_hcs_errors:\t\t%10u\n",
2866		       be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2867		       be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2868		       be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2869		       be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2870		       be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2871		       be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2872		       be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2873
2874    if (!left--)
2875	return sprintf(page,"\n"
2876		       "   ATM:\t\t\t\t     cells\n"
2877		       "     TX:\t\t\t%10u\n"
2878		       "     RX:\t\t\t%10u\n"
2879		       "     vpi out of range:\t\t%10u\n"
2880		       "     vpi no conn:\t\t%10u\n"
2881		       "     vci out of range:\t\t%10u\n"
2882		       "     vci no conn:\t\t%10u\n",
2883		       be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2884		       be32_to_cpu(fore200e->stats->atm.cells_received),
2885		       be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2886		       be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2887		       be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2888		       be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2889    
2890    if (!left--)
2891	return sprintf(page,"\n"
2892		       "   AAL0:\t\t\t     cells\n"
2893		       "     TX:\t\t\t%10u\n"
2894		       "     RX:\t\t\t%10u\n"
2895		       "     dropped:\t\t\t%10u\n",
2896		       be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2897		       be32_to_cpu(fore200e->stats->aal0.cells_received),
2898		       be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2899    
2900    if (!left--)
2901	return sprintf(page,"\n"
2902		       "   AAL3/4:\n"
2903		       "     SAR sublayer:\t\t     cells\n"
2904		       "       TX:\t\t\t%10u\n"
2905		       "       RX:\t\t\t%10u\n"
2906		       "       dropped:\t\t\t%10u\n"
2907		       "       CRC errors:\t\t%10u\n"
2908		       "       protocol errors:\t\t%10u\n\n"
2909		       "     CS  sublayer:\t\t      PDUs\n"
2910		       "       TX:\t\t\t%10u\n"
2911		       "       RX:\t\t\t%10u\n"
2912		       "       dropped:\t\t\t%10u\n"
2913		       "       protocol errors:\t\t%10u\n",
2914		       be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
2915		       be32_to_cpu(fore200e->stats->aal34.cells_received),
2916		       be32_to_cpu(fore200e->stats->aal34.cells_dropped),
2917		       be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
2918		       be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
2919		       be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
2920		       be32_to_cpu(fore200e->stats->aal34.cspdus_received),
2921		       be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
2922		       be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
2923    
2924    if (!left--)
2925	return sprintf(page,"\n"
2926		       "   AAL5:\n"
2927		       "     SAR sublayer:\t\t     cells\n"
2928		       "       TX:\t\t\t%10u\n"
2929		       "       RX:\t\t\t%10u\n"
2930		       "       dropped:\t\t\t%10u\n"
2931		       "       congestions:\t\t%10u\n\n"
2932		       "     CS  sublayer:\t\t      PDUs\n"
2933		       "       TX:\t\t\t%10u\n"
2934		       "       RX:\t\t\t%10u\n"
2935		       "       dropped:\t\t\t%10u\n"
2936		       "       CRC errors:\t\t%10u\n"
2937		       "       protocol errors:\t\t%10u\n",
2938		       be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
2939		       be32_to_cpu(fore200e->stats->aal5.cells_received),
2940		       be32_to_cpu(fore200e->stats->aal5.cells_dropped),
2941		       be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
2942		       be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
2943		       be32_to_cpu(fore200e->stats->aal5.cspdus_received),
2944		       be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
2945		       be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
2946		       be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
2947    
2948    if (!left--)
2949	return sprintf(page,"\n"
2950		       "   AUX:\t\t       allocation failures\n"
2951		       "     small b1:\t\t\t%10u\n"
2952		       "     large b1:\t\t\t%10u\n"
2953		       "     small b2:\t\t\t%10u\n"
2954		       "     large b2:\t\t\t%10u\n"
2955		       "     RX PDUs:\t\t\t%10u\n"
2956		       "     TX PDUs:\t\t\t%10lu\n",
2957		       be32_to_cpu(fore200e->stats->aux.small_b1_failed),
2958		       be32_to_cpu(fore200e->stats->aux.large_b1_failed),
2959		       be32_to_cpu(fore200e->stats->aux.small_b2_failed),
2960		       be32_to_cpu(fore200e->stats->aux.large_b2_failed),
2961		       be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
2962		       fore200e->tx_sat);
2963    
2964    if (!left--)
2965	return sprintf(page,"\n"
2966		       " receive carrier:\t\t\t%s\n",
2967		       fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
2968    
2969    if (!left--) {
2970        return sprintf(page,"\n"
2971		       " VCCs:\n  address   VPI VCI   AAL "
2972		       "TX PDUs   TX min/max size  RX PDUs   RX min/max size\n");
2973    }
2974
2975    for (i = 0; i < NBR_CONNECT; i++) {
2976
2977	vcc = fore200e->vc_map[i].vcc;
2978
2979	if (vcc == NULL)
2980	    continue;
2981
2982	spin_lock_irqsave(&fore200e->q_lock, flags);
2983
2984	if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
2985
2986	    fore200e_vcc = FORE200E_VCC(vcc);
2987	    ASSERT(fore200e_vcc);
2988
2989	    len = sprintf(page,
2990			  "  %pK  %03d %05d %1d   %09lu %05d/%05d      %09lu %05d/%05d\n",
2991			  vcc,
2992			  vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
2993			  fore200e_vcc->tx_pdu,
2994			  fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
2995			  fore200e_vcc->tx_max_pdu,
2996			  fore200e_vcc->rx_pdu,
2997			  fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
2998			  fore200e_vcc->rx_max_pdu);
2999
3000	    spin_unlock_irqrestore(&fore200e->q_lock, flags);
3001	    return len;
3002	}
3003
3004	spin_unlock_irqrestore(&fore200e->q_lock, flags);
3005    }
3006    
3007    return 0;
3008}
3009
3010module_init(fore200e_module_init);
3011module_exit(fore200e_module_cleanup);
3012
3013
3014static const struct atmdev_ops fore200e_ops = {
3015	.open       = fore200e_open,
3016	.close      = fore200e_close,
3017	.ioctl      = fore200e_ioctl,
3018	.getsockopt = fore200e_getsockopt,
3019	.setsockopt = fore200e_setsockopt,
3020	.send       = fore200e_send,
3021	.change_qos = fore200e_change_qos,
3022	.proc_read  = fore200e_proc_read,
3023	.owner      = THIS_MODULE
3024};
3025
3026MODULE_LICENSE("GPL");
3027#ifdef CONFIG_PCI
3028#ifdef __LITTLE_ENDIAN__
3029MODULE_FIRMWARE("pca200e.bin");
3030#else
3031MODULE_FIRMWARE("pca200e_ecd.bin2");
3032#endif
3033#endif /* CONFIG_PCI */
3034#ifdef CONFIG_SBUS
3035MODULE_FIRMWARE("sba200e_ecd.bin2");
3036#endif