Loading...
1/******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
6 Version: 1.0
7*******************************************************************************
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
13
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The following are the change log and history:
25
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
31 Add the CBR support.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
37 Add SMP support.
38
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41*******************************************************************************/
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/mm.h>
46#include <linux/pci.h>
47#include <linux/errno.h>
48#include <linux/atm.h>
49#include <linux/atmdev.h>
50#include <linux/sonet.h>
51#include <linux/skbuff.h>
52#include <linux/time.h>
53#include <linux/delay.h>
54#include <linux/uio.h>
55#include <linux/init.h>
56#include <linux/interrupt.h>
57#include <linux/wait.h>
58#include <linux/slab.h>
59#include <asm/io.h>
60#include <linux/atomic.h>
61#include <linux/uaccess.h>
62#include <asm/string.h>
63#include <asm/byteorder.h>
64#include <linux/vmalloc.h>
65#include <linux/jiffies.h>
66#include <linux/nospec.h>
67#include "iphase.h"
68#include "suni.h"
69#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
70
71#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
72
73static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
74static void desc_dbg(IADEV *iadev);
75
76static IADEV *ia_dev[8];
77static struct atm_dev *_ia_dev[8];
78static int iadev_count;
79static void ia_led_timer(struct timer_list *unused);
80static DEFINE_TIMER(ia_timer, ia_led_timer);
81static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
82static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
83static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
84 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
85
86module_param(IA_TX_BUF, int, 0);
87module_param(IA_TX_BUF_SZ, int, 0);
88module_param(IA_RX_BUF, int, 0);
89module_param(IA_RX_BUF_SZ, int, 0);
90module_param(IADebugFlag, uint, 0644);
91
92MODULE_LICENSE("GPL");
93
94/**************************** IA_LIB **********************************/
95
96static void ia_init_rtn_q (IARTN_Q *que)
97{
98 que->next = NULL;
99 que->tail = NULL;
100}
101
102static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
103{
104 data->next = NULL;
105 if (que->next == NULL)
106 que->next = que->tail = data;
107 else {
108 data->next = que->next;
109 que->next = data;
110 }
111 return;
112}
113
114static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
115 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
116 if (!entry)
117 return -ENOMEM;
118 entry->data = data;
119 entry->next = NULL;
120 if (que->next == NULL)
121 que->next = que->tail = entry;
122 else {
123 que->tail->next = entry;
124 que->tail = que->tail->next;
125 }
126 return 1;
127}
128
129static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
130 IARTN_Q *tmpdata;
131 if (que->next == NULL)
132 return NULL;
133 tmpdata = que->next;
134 if ( que->next == que->tail)
135 que->next = que->tail = NULL;
136 else
137 que->next = que->next->next;
138 return tmpdata;
139}
140
141static void ia_hack_tcq(IADEV *dev) {
142
143 u_short desc1;
144 u_short tcq_wr;
145 struct ia_vcc *iavcc_r = NULL;
146
147 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
148 while (dev->host_tcq_wr != tcq_wr) {
149 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
150 if (!desc1) ;
151 else if (!dev->desc_tbl[desc1 -1].timestamp) {
152 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
153 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
154 }
155 else if (dev->desc_tbl[desc1 -1].timestamp) {
156 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
157 printk("IA: Fatal err in get_desc\n");
158 continue;
159 }
160 iavcc_r->vc_desc_cnt--;
161 dev->desc_tbl[desc1 -1].timestamp = 0;
162 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
163 dev->desc_tbl[desc1 -1].txskb, desc1);)
164 if (iavcc_r->pcr < dev->rate_limit) {
165 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
166 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
167 printk("ia_hack_tcq: No memory available\n");
168 }
169 dev->desc_tbl[desc1 -1].iavcc = NULL;
170 dev->desc_tbl[desc1 -1].txskb = NULL;
171 }
172 dev->host_tcq_wr += 2;
173 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
174 dev->host_tcq_wr = dev->ffL.tcq_st;
175 }
176} /* ia_hack_tcq */
177
178static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
179 u_short desc_num, i;
180 struct sk_buff *skb;
181 struct ia_vcc *iavcc_r = NULL;
182 unsigned long delta;
183 static unsigned long timer = 0;
184 int ltimeout;
185
186 ia_hack_tcq (dev);
187 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
188 timer = jiffies;
189 i=0;
190 while (i < dev->num_tx_desc) {
191 if (!dev->desc_tbl[i].timestamp) {
192 i++;
193 continue;
194 }
195 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
196 delta = jiffies - dev->desc_tbl[i].timestamp;
197 if (delta >= ltimeout) {
198 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
199 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
200 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
201 else
202 dev->ffL.tcq_rd -= 2;
203 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
204 if (!(skb = dev->desc_tbl[i].txskb) ||
205 !(iavcc_r = dev->desc_tbl[i].iavcc))
206 printk("Fatal err, desc table vcc or skb is NULL\n");
207 else
208 iavcc_r->vc_desc_cnt--;
209 dev->desc_tbl[i].timestamp = 0;
210 dev->desc_tbl[i].iavcc = NULL;
211 dev->desc_tbl[i].txskb = NULL;
212 }
213 i++;
214 } /* while */
215 }
216 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
217 return 0xFFFF;
218
219 /* Get the next available descriptor number from TCQ */
220 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
221
222 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
223 dev->ffL.tcq_rd += 2;
224 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
225 dev->ffL.tcq_rd = dev->ffL.tcq_st;
226 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
227 return 0xFFFF;
228 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
229 }
230
231 /* get system time */
232 dev->desc_tbl[desc_num -1].timestamp = jiffies;
233 return desc_num;
234}
235
236static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
237 u_char foundLockUp;
238 vcstatus_t *vcstatus;
239 u_short *shd_tbl;
240 u_short tempCellSlot, tempFract;
241 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
242 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
243 u_int i;
244
245 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
246 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
247 vcstatus->cnt++;
248 foundLockUp = 0;
249 if( vcstatus->cnt == 0x05 ) {
250 abr_vc += vcc->vci;
251 eabr_vc += vcc->vci;
252 if( eabr_vc->last_desc ) {
253 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
254 /* Wait for 10 Micro sec */
255 udelay(10);
256 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
257 foundLockUp = 1;
258 }
259 else {
260 tempCellSlot = abr_vc->last_cell_slot;
261 tempFract = abr_vc->fraction;
262 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
263 && (tempFract == dev->testTable[vcc->vci]->fract))
264 foundLockUp = 1;
265 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
266 dev->testTable[vcc->vci]->fract = tempFract;
267 }
268 } /* last descriptor */
269 vcstatus->cnt = 0;
270 } /* vcstatus->cnt */
271
272 if (foundLockUp) {
273 IF_ABR(printk("LOCK UP found\n");)
274 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
275 /* Wait for 10 Micro sec */
276 udelay(10);
277 abr_vc->status &= 0xFFF8;
278 abr_vc->status |= 0x0001; /* state is idle */
279 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
280 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
281 if (i < dev->num_vc)
282 shd_tbl[i] = vcc->vci;
283 else
284 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
285 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
286 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
287 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
288 vcstatus->cnt = 0;
289 } /* foundLockUp */
290
291 } /* if an ABR VC */
292
293
294}
295
296/*
297** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
298**
299** +----+----+------------------+-------------------------------+
300** | R | NZ | 5-bit exponent | 9-bit mantissa |
301** +----+----+------------------+-------------------------------+
302**
303** R = reserved (written as 0)
304** NZ = 0 if 0 cells/sec; 1 otherwise
305**
306** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
307*/
308static u16
309cellrate_to_float(u32 cr)
310{
311
312#define NZ 0x4000
313#define M_BITS 9 /* Number of bits in mantissa */
314#define E_BITS 5 /* Number of bits in exponent */
315#define M_MASK 0x1ff
316#define E_MASK 0x1f
317 u16 flot;
318 u32 tmp = cr & 0x00ffffff;
319 int i = 0;
320 if (cr == 0)
321 return 0;
322 while (tmp != 1) {
323 tmp >>= 1;
324 i++;
325 }
326 if (i == M_BITS)
327 flot = NZ | (i << M_BITS) | (cr & M_MASK);
328 else if (i < M_BITS)
329 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
330 else
331 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
332 return flot;
333}
334
335#if 0
336/*
337** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
338*/
339static u32
340float_to_cellrate(u16 rate)
341{
342 u32 exp, mantissa, cps;
343 if ((rate & NZ) == 0)
344 return 0;
345 exp = (rate >> M_BITS) & E_MASK;
346 mantissa = rate & M_MASK;
347 if (exp == 0)
348 return 1;
349 cps = (1 << M_BITS) | mantissa;
350 if (exp == M_BITS)
351 cps = cps;
352 else if (exp > M_BITS)
353 cps <<= (exp - M_BITS);
354 else
355 cps >>= (M_BITS - exp);
356 return cps;
357}
358#endif
359
360static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
361 srv_p->class_type = ATM_ABR;
362 srv_p->pcr = dev->LineRate;
363 srv_p->mcr = 0;
364 srv_p->icr = 0x055cb7;
365 srv_p->tbe = 0xffffff;
366 srv_p->frtt = 0x3a;
367 srv_p->rif = 0xf;
368 srv_p->rdf = 0xb;
369 srv_p->nrm = 0x4;
370 srv_p->trm = 0x7;
371 srv_p->cdf = 0x3;
372 srv_p->adtf = 50;
373}
374
375static int
376ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
377 struct atm_vcc *vcc, u8 flag)
378{
379 f_vc_abr_entry *f_abr_vc;
380 r_vc_abr_entry *r_abr_vc;
381 u32 icr;
382 u8 trm, nrm, crm;
383 u16 adtf, air, *ptr16;
384 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
385 f_abr_vc += vcc->vci;
386 switch (flag) {
387 case 1: /* FFRED initialization */
388#if 0 /* sanity check */
389 if (srv_p->pcr == 0)
390 return INVALID_PCR;
391 if (srv_p->pcr > dev->LineRate)
392 srv_p->pcr = dev->LineRate;
393 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
394 return MCR_UNAVAILABLE;
395 if (srv_p->mcr > srv_p->pcr)
396 return INVALID_MCR;
397 if (!(srv_p->icr))
398 srv_p->icr = srv_p->pcr;
399 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
400 return INVALID_ICR;
401 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
402 return INVALID_TBE;
403 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
404 return INVALID_FRTT;
405 if (srv_p->nrm > MAX_NRM)
406 return INVALID_NRM;
407 if (srv_p->trm > MAX_TRM)
408 return INVALID_TRM;
409 if (srv_p->adtf > MAX_ADTF)
410 return INVALID_ADTF;
411 else if (srv_p->adtf == 0)
412 srv_p->adtf = 1;
413 if (srv_p->cdf > MAX_CDF)
414 return INVALID_CDF;
415 if (srv_p->rif > MAX_RIF)
416 return INVALID_RIF;
417 if (srv_p->rdf > MAX_RDF)
418 return INVALID_RDF;
419#endif
420 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
421 f_abr_vc->f_vc_type = ABR;
422 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
423 /* i.e 2**n = 2 << (n-1) */
424 f_abr_vc->f_nrm = nrm << 8 | nrm;
425 trm = 100000/(2 << (16 - srv_p->trm));
426 if ( trm == 0) trm = 1;
427 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
428 crm = srv_p->tbe / nrm;
429 if (crm == 0) crm = 1;
430 f_abr_vc->f_crm = crm & 0xff;
431 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
432 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
433 ((srv_p->tbe/srv_p->frtt)*1000000) :
434 (1000000/(srv_p->frtt/srv_p->tbe)));
435 f_abr_vc->f_icr = cellrate_to_float(icr);
436 adtf = (10000 * srv_p->adtf)/8192;
437 if (adtf == 0) adtf = 1;
438 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
439 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
440 f_abr_vc->f_acr = f_abr_vc->f_icr;
441 f_abr_vc->f_status = 0x0042;
442 break;
443 case 0: /* RFRED initialization */
444 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
445 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
446 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
447 r_abr_vc += vcc->vci;
448 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
449 air = srv_p->pcr << (15 - srv_p->rif);
450 if (air == 0) air = 1;
451 r_abr_vc->r_air = cellrate_to_float(air);
452 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
453 dev->sum_mcr += srv_p->mcr;
454 dev->n_abr++;
455 break;
456 default:
457 break;
458 }
459 return 0;
460}
461static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
462 u32 rateLow=0, rateHigh, rate;
463 int entries;
464 struct ia_vcc *ia_vcc;
465
466 int idealSlot =0, testSlot, toBeAssigned, inc;
467 u32 spacing;
468 u16 *SchedTbl, *TstSchedTbl;
469 u16 cbrVC, vcIndex;
470 u32 fracSlot = 0;
471 u32 sp_mod = 0;
472 u32 sp_mod2 = 0;
473
474 /* IpAdjustTrafficParams */
475 if (vcc->qos.txtp.max_pcr <= 0) {
476 IF_ERR(printk("PCR for CBR not defined\n");)
477 return -1;
478 }
479 rate = vcc->qos.txtp.max_pcr;
480 entries = rate / dev->Granularity;
481 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
482 entries, rate, dev->Granularity);)
483 if (entries < 1)
484 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
485 rateLow = entries * dev->Granularity;
486 rateHigh = (entries + 1) * dev->Granularity;
487 if (3*(rate - rateLow) > (rateHigh - rate))
488 entries++;
489 if (entries > dev->CbrRemEntries) {
490 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
491 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
492 entries, dev->CbrRemEntries);)
493 return -EBUSY;
494 }
495
496 ia_vcc = INPH_IA_VCC(vcc);
497 ia_vcc->NumCbrEntry = entries;
498 dev->sum_mcr += entries * dev->Granularity;
499 /* IaFFrednInsertCbrSched */
500 // Starting at an arbitrary location, place the entries into the table
501 // as smoothly as possible
502 cbrVC = 0;
503 spacing = dev->CbrTotEntries / entries;
504 sp_mod = dev->CbrTotEntries % entries; // get modulo
505 toBeAssigned = entries;
506 fracSlot = 0;
507 vcIndex = vcc->vci;
508 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
509 while (toBeAssigned)
510 {
511 // If this is the first time, start the table loading for this connection
512 // as close to entryPoint as possible.
513 if (toBeAssigned == entries)
514 {
515 idealSlot = dev->CbrEntryPt;
516 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
517 if (dev->CbrEntryPt >= dev->CbrTotEntries)
518 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
519 } else {
520 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
521 // in the table that would be smoothest
522 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
523 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
524 }
525 if (idealSlot >= (int)dev->CbrTotEntries)
526 idealSlot -= dev->CbrTotEntries;
527 // Continuously check around this ideal value until a null
528 // location is encountered.
529 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
530 inc = 0;
531 testSlot = idealSlot;
532 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
533 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
534 testSlot, TstSchedTbl,toBeAssigned);)
535 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
536 while (cbrVC) // If another VC at this location, we have to keep looking
537 {
538 inc++;
539 testSlot = idealSlot - inc;
540 if (testSlot < 0) { // Wrap if necessary
541 testSlot += dev->CbrTotEntries;
542 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
543 SchedTbl,testSlot);)
544 }
545 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
546 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
547 if (!cbrVC)
548 break;
549 testSlot = idealSlot + inc;
550 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
551 testSlot -= dev->CbrTotEntries;
552 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
553 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
554 testSlot, toBeAssigned);)
555 }
556 // set table index and read in value
557 TstSchedTbl = (u16*)(SchedTbl + testSlot);
558 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
559 TstSchedTbl,cbrVC,inc);)
560 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
561 } /* while */
562 // Move this VCI number into this location of the CBR Sched table.
563 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
564 dev->CbrRemEntries--;
565 toBeAssigned--;
566 } /* while */
567
568 /* IaFFrednCbrEnable */
569 dev->NumEnabledCBR++;
570 if (dev->NumEnabledCBR == 1) {
571 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
572 IF_CBR(printk("CBR is enabled\n");)
573 }
574 return 0;
575}
576static void ia_cbrVc_close (struct atm_vcc *vcc) {
577 IADEV *iadev;
578 u16 *SchedTbl, NullVci = 0;
579 u32 i, NumFound;
580
581 iadev = INPH_IA_DEV(vcc->dev);
582 iadev->NumEnabledCBR--;
583 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
584 if (iadev->NumEnabledCBR == 0) {
585 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
586 IF_CBR (printk("CBR support disabled\n");)
587 }
588 NumFound = 0;
589 for (i=0; i < iadev->CbrTotEntries; i++)
590 {
591 if (*SchedTbl == vcc->vci) {
592 iadev->CbrRemEntries++;
593 *SchedTbl = NullVci;
594 IF_CBR(NumFound++;)
595 }
596 SchedTbl++;
597 }
598 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
599}
600
601static int ia_avail_descs(IADEV *iadev) {
602 int tmp = 0;
603 ia_hack_tcq(iadev);
604 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
605 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
606 else
607 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
608 iadev->ffL.tcq_st) / 2;
609 return tmp;
610}
611
612static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
613
614static int ia_que_tx (IADEV *iadev) {
615 struct sk_buff *skb;
616 int num_desc;
617 struct atm_vcc *vcc;
618 num_desc = ia_avail_descs(iadev);
619
620 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
621 if (!(vcc = ATM_SKB(skb)->vcc)) {
622 dev_kfree_skb_any(skb);
623 printk("ia_que_tx: Null vcc\n");
624 break;
625 }
626 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
627 dev_kfree_skb_any(skb);
628 printk("Free the SKB on closed vci %d \n", vcc->vci);
629 break;
630 }
631 if (ia_pkt_tx (vcc, skb)) {
632 skb_queue_head(&iadev->tx_backlog, skb);
633 }
634 num_desc--;
635 }
636 return 0;
637}
638
639static void ia_tx_poll (IADEV *iadev) {
640 struct atm_vcc *vcc = NULL;
641 struct sk_buff *skb = NULL, *skb1 = NULL;
642 struct ia_vcc *iavcc;
643 IARTN_Q * rtne;
644
645 ia_hack_tcq(iadev);
646 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
647 skb = rtne->data.txskb;
648 if (!skb) {
649 printk("ia_tx_poll: skb is null\n");
650 goto out;
651 }
652 vcc = ATM_SKB(skb)->vcc;
653 if (!vcc) {
654 printk("ia_tx_poll: vcc is null\n");
655 dev_kfree_skb_any(skb);
656 goto out;
657 }
658
659 iavcc = INPH_IA_VCC(vcc);
660 if (!iavcc) {
661 printk("ia_tx_poll: iavcc is null\n");
662 dev_kfree_skb_any(skb);
663 goto out;
664 }
665
666 skb1 = skb_dequeue(&iavcc->txing_skb);
667 while (skb1 && (skb1 != skb)) {
668 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
669 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
670 }
671 IF_ERR(printk("Release the SKB not match\n");)
672 if ((vcc->pop) && (skb1->len != 0))
673 {
674 vcc->pop(vcc, skb1);
675 IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
676 (long)skb1);)
677 }
678 else
679 dev_kfree_skb_any(skb1);
680 skb1 = skb_dequeue(&iavcc->txing_skb);
681 }
682 if (!skb1) {
683 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
684 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
685 break;
686 }
687 if ((vcc->pop) && (skb->len != 0))
688 {
689 vcc->pop(vcc, skb);
690 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
691 }
692 else
693 dev_kfree_skb_any(skb);
694 kfree(rtne);
695 }
696 ia_que_tx(iadev);
697out:
698 return;
699}
700#if 0
701static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
702{
703 u32 t;
704 int i;
705 /*
706 * Issue a command to enable writes to the NOVRAM
707 */
708 NVRAM_CMD (EXTEND + EWEN);
709 NVRAM_CLR_CE;
710 /*
711 * issue the write command
712 */
713 NVRAM_CMD(IAWRITE + addr);
714 /*
715 * Send the data, starting with D15, then D14, and so on for 16 bits
716 */
717 for (i=15; i>=0; i--) {
718 NVRAM_CLKOUT (val & 0x8000);
719 val <<= 1;
720 }
721 NVRAM_CLR_CE;
722 CFG_OR(NVCE);
723 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
724 while (!(t & NVDO))
725 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
726
727 NVRAM_CLR_CE;
728 /*
729 * disable writes again
730 */
731 NVRAM_CMD(EXTEND + EWDS)
732 NVRAM_CLR_CE;
733 CFG_AND(~NVDI);
734}
735#endif
736
737static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
738{
739 u_short val;
740 u32 t;
741 int i;
742 /*
743 * Read the first bit that was clocked with the falling edge of the
744 * the last command data clock
745 */
746 NVRAM_CMD(IAREAD + addr);
747 /*
748 * Now read the rest of the bits, the next bit read is D14, then D13,
749 * and so on.
750 */
751 val = 0;
752 for (i=15; i>=0; i--) {
753 NVRAM_CLKIN(t);
754 val |= (t << i);
755 }
756 NVRAM_CLR_CE;
757 CFG_AND(~NVDI);
758 return val;
759}
760
761static void ia_hw_type(IADEV *iadev) {
762 u_short memType = ia_eeprom_get(iadev, 25);
763 iadev->memType = memType;
764 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
765 iadev->num_tx_desc = IA_TX_BUF;
766 iadev->tx_buf_sz = IA_TX_BUF_SZ;
767 iadev->num_rx_desc = IA_RX_BUF;
768 iadev->rx_buf_sz = IA_RX_BUF_SZ;
769 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
770 if (IA_TX_BUF == DFL_TX_BUFFERS)
771 iadev->num_tx_desc = IA_TX_BUF / 2;
772 else
773 iadev->num_tx_desc = IA_TX_BUF;
774 iadev->tx_buf_sz = IA_TX_BUF_SZ;
775 if (IA_RX_BUF == DFL_RX_BUFFERS)
776 iadev->num_rx_desc = IA_RX_BUF / 2;
777 else
778 iadev->num_rx_desc = IA_RX_BUF;
779 iadev->rx_buf_sz = IA_RX_BUF_SZ;
780 }
781 else {
782 if (IA_TX_BUF == DFL_TX_BUFFERS)
783 iadev->num_tx_desc = IA_TX_BUF / 8;
784 else
785 iadev->num_tx_desc = IA_TX_BUF;
786 iadev->tx_buf_sz = IA_TX_BUF_SZ;
787 if (IA_RX_BUF == DFL_RX_BUFFERS)
788 iadev->num_rx_desc = IA_RX_BUF / 8;
789 else
790 iadev->num_rx_desc = IA_RX_BUF;
791 iadev->rx_buf_sz = IA_RX_BUF_SZ;
792 }
793 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
794 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
795 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
796 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
797
798#if 0
799 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
800 iadev->phy_type = PHY_OC3C_S;
801 else if ((memType & FE_MASK) == FE_UTP_OPTION)
802 iadev->phy_type = PHY_UTP155;
803 else
804 iadev->phy_type = PHY_OC3C_M;
805#endif
806
807 iadev->phy_type = memType & FE_MASK;
808 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
809 memType,iadev->phy_type);)
810 if (iadev->phy_type == FE_25MBIT_PHY)
811 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
812 else if (iadev->phy_type == FE_DS3_PHY)
813 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
814 else if (iadev->phy_type == FE_E3_PHY)
815 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
816 else
817 iadev->LineRate = (u32)(ATM_OC3_PCR);
818 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
819
820}
821
822static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
823{
824 return readl(ia->phy + (reg >> 2));
825}
826
827static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
828{
829 writel(val, ia->phy + (reg >> 2));
830}
831
832static void ia_frontend_intr(struct iadev_priv *iadev)
833{
834 u32 status;
835
836 if (iadev->phy_type & FE_25MBIT_PHY) {
837 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
838 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
839 } else if (iadev->phy_type & FE_DS3_PHY) {
840 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
841 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
842 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
843 } else if (iadev->phy_type & FE_E3_PHY) {
844 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
845 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
846 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
847 } else {
848 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
849 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
850 }
851
852 printk(KERN_INFO "IA: SUNI carrier %s\n",
853 iadev->carrier_detect ? "detected" : "lost signal");
854}
855
856static void ia_mb25_init(struct iadev_priv *iadev)
857{
858#if 0
859 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
860#endif
861 ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
862 ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
863
864 iadev->carrier_detect =
865 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
866}
867
868struct ia_reg {
869 u16 reg;
870 u16 val;
871};
872
873static void ia_phy_write(struct iadev_priv *iadev,
874 const struct ia_reg *regs, int len)
875{
876 while (len--) {
877 ia_phy_write32(iadev, regs->reg, regs->val);
878 regs++;
879 }
880}
881
882static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
883{
884 static const struct ia_reg suni_ds3_init[] = {
885 { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
886 { SUNI_DS3_FRM_CFG, 0x01 },
887 { SUNI_DS3_TRAN_CFG, 0x01 },
888 { SUNI_CONFIG, 0 },
889 { SUNI_SPLR_CFG, 0 },
890 { SUNI_SPLT_CFG, 0 }
891 };
892 u32 status;
893
894 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
895 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
896
897 ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
898}
899
900static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
901{
902 static const struct ia_reg suni_e3_init[] = {
903 { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
904 { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
905 { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
906 { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
907 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
908 { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
909 { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
910 { SUNI_SPLR_CFG, 0x41 },
911 { SUNI_SPLT_CFG, 0x41 }
912 };
913 u32 status;
914
915 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
916 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
917 ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
918}
919
920static void ia_suni_pm7345_init(struct iadev_priv *iadev)
921{
922 static const struct ia_reg suni_init[] = {
923 /* Enable RSOP loss of signal interrupt. */
924 { SUNI_INTR_ENBL, 0x28 },
925 /* Clear error counters. */
926 { SUNI_ID_RESET, 0 },
927 /* Clear "PMCTST" in master test register. */
928 { SUNI_MASTER_TEST, 0 },
929
930 { SUNI_RXCP_CTRL, 0x2c },
931 { SUNI_RXCP_FCTRL, 0x81 },
932
933 { SUNI_RXCP_IDLE_PAT_H1, 0 },
934 { SUNI_RXCP_IDLE_PAT_H2, 0 },
935 { SUNI_RXCP_IDLE_PAT_H3, 0 },
936 { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
937
938 { SUNI_RXCP_IDLE_MASK_H1, 0xff },
939 { SUNI_RXCP_IDLE_MASK_H2, 0xff },
940 { SUNI_RXCP_IDLE_MASK_H3, 0xff },
941 { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
942
943 { SUNI_RXCP_CELL_PAT_H1, 0 },
944 { SUNI_RXCP_CELL_PAT_H2, 0 },
945 { SUNI_RXCP_CELL_PAT_H3, 0 },
946 { SUNI_RXCP_CELL_PAT_H4, 0x01 },
947
948 { SUNI_RXCP_CELL_MASK_H1, 0xff },
949 { SUNI_RXCP_CELL_MASK_H2, 0xff },
950 { SUNI_RXCP_CELL_MASK_H3, 0xff },
951 { SUNI_RXCP_CELL_MASK_H4, 0xff },
952
953 { SUNI_TXCP_CTRL, 0xa4 },
954 { SUNI_TXCP_INTR_EN_STS, 0x10 },
955 { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
956 };
957
958 if (iadev->phy_type & FE_DS3_PHY)
959 ia_suni_pm7345_init_ds3(iadev);
960 else
961 ia_suni_pm7345_init_e3(iadev);
962
963 ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
964
965 ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
966 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
967 SUNI_PM7345_DLB | SUNI_PM7345_PLB));
968#ifdef __SNMP__
969 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
970#endif /* __SNMP__ */
971 return;
972}
973
974
975/***************************** IA_LIB END *****************************/
976
977#ifdef CONFIG_ATM_IA_DEBUG
978static int tcnter = 0;
979static void xdump( u_char* cp, int length, char* prefix )
980{
981 int col, count;
982 u_char prntBuf[120];
983 u_char* pBuf = prntBuf;
984 count = 0;
985 while(count < length){
986 pBuf += sprintf( pBuf, "%s", prefix );
987 for(col = 0;count + col < length && col < 16; col++){
988 if (col != 0 && (col % 4) == 0)
989 pBuf += sprintf( pBuf, " " );
990 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
991 }
992 while(col++ < 16){ /* pad end of buffer with blanks */
993 if ((col % 4) == 0)
994 sprintf( pBuf, " " );
995 pBuf += sprintf( pBuf, " " );
996 }
997 pBuf += sprintf( pBuf, " " );
998 for(col = 0;count + col < length && col < 16; col++){
999 if (isprint((int)cp[count + col]))
1000 pBuf += sprintf( pBuf, "%c", cp[count + col] );
1001 else
1002 pBuf += sprintf( pBuf, "." );
1003 }
1004 printk("%s\n", prntBuf);
1005 count += col;
1006 pBuf = prntBuf;
1007 }
1008
1009} /* close xdump(... */
1010#endif /* CONFIG_ATM_IA_DEBUG */
1011
1012
1013static struct atm_dev *ia_boards = NULL;
1014
1015#define ACTUAL_RAM_BASE \
1016 RAM_BASE*((iadev->mem)/(128 * 1024))
1017#define ACTUAL_SEG_RAM_BASE \
1018 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1019#define ACTUAL_REASS_RAM_BASE \
1020 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1021
1022
1023/*-- some utilities and memory allocation stuff will come here -------------*/
1024
1025static void desc_dbg(IADEV *iadev) {
1026
1027 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1028 u32 i;
1029 void __iomem *tmp;
1030 // regval = readl((u32)ia_cmds->maddr);
1031 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1032 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1033 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1034 readw(iadev->seg_ram+tcq_wr_ptr-2));
1035 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1036 iadev->ffL.tcq_rd);
1037 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1038 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1039 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1040 i = 0;
1041 while (tcq_st_ptr != tcq_ed_ptr) {
1042 tmp = iadev->seg_ram+tcq_st_ptr;
1043 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1044 tcq_st_ptr += 2;
1045 }
1046 for(i=0; i <iadev->num_tx_desc; i++)
1047 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1048}
1049
1050
1051/*----------------------------- Receiving side stuff --------------------------*/
1052
1053static void rx_excp_rcvd(struct atm_dev *dev)
1054{
1055#if 0 /* closing the receiving size will cause too many excp int */
1056 IADEV *iadev;
1057 u_short state;
1058 u_short excpq_rd_ptr;
1059 //u_short *ptr;
1060 int vci, error = 1;
1061 iadev = INPH_IA_DEV(dev);
1062 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1063 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1064 { printk("state = %x \n", state);
1065 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1066 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1067 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1068 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1069 // TODO: update exception stat
1070 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1071 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1072 // pwang_test
1073 excpq_rd_ptr += 4;
1074 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1075 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1076 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1077 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1078 }
1079#endif
1080}
1081
1082static void free_desc(struct atm_dev *dev, int desc)
1083{
1084 IADEV *iadev;
1085 iadev = INPH_IA_DEV(dev);
1086 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1087 iadev->rfL.fdq_wr +=2;
1088 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1089 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1090 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1091}
1092
1093
1094static int rx_pkt(struct atm_dev *dev)
1095{
1096 IADEV *iadev;
1097 struct atm_vcc *vcc;
1098 unsigned short status;
1099 struct rx_buf_desc __iomem *buf_desc_ptr;
1100 int desc;
1101 struct dle* wr_ptr;
1102 int len;
1103 struct sk_buff *skb;
1104 u_int buf_addr, dma_addr;
1105
1106 iadev = INPH_IA_DEV(dev);
1107 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1108 {
1109 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1110 return -EINVAL;
1111 }
1112 /* mask 1st 3 bits to get the actual descno. */
1113 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1114 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1115 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1116 printk(" pcq_wr_ptr = 0x%x\n",
1117 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1118 /* update the read pointer - maybe we shud do this in the end*/
1119 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1120 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1121 else
1122 iadev->rfL.pcq_rd += 2;
1123 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1124
1125 /* get the buffer desc entry.
1126 update stuff. - doesn't seem to be any update necessary
1127 */
1128 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1129 /* make the ptr point to the corresponding buffer desc entry */
1130 buf_desc_ptr += desc;
1131 if (!desc || (desc > iadev->num_rx_desc) ||
1132 ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1133 free_desc(dev, desc);
1134 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1135 return -1;
1136 }
1137 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1138 if (!vcc)
1139 {
1140 free_desc(dev, desc);
1141 printk("IA: null vcc, drop PDU\n");
1142 return -1;
1143 }
1144
1145
1146 /* might want to check the status bits for errors */
1147 status = (u_short) (buf_desc_ptr->desc_mode);
1148 if (status & (RX_CER | RX_PTE | RX_OFL))
1149 {
1150 atomic_inc(&vcc->stats->rx_err);
1151 IF_ERR(printk("IA: bad packet, dropping it");)
1152 if (status & RX_CER) {
1153 IF_ERR(printk(" cause: packet CRC error\n");)
1154 }
1155 else if (status & RX_PTE) {
1156 IF_ERR(printk(" cause: packet time out\n");)
1157 }
1158 else {
1159 IF_ERR(printk(" cause: buffer overflow\n");)
1160 }
1161 goto out_free_desc;
1162 }
1163
1164 /*
1165 build DLE.
1166 */
1167
1168 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1169 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1170 len = dma_addr - buf_addr;
1171 if (len > iadev->rx_buf_sz) {
1172 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1173 atomic_inc(&vcc->stats->rx_err);
1174 goto out_free_desc;
1175 }
1176
1177 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1178 if (vcc->vci < 32)
1179 printk("Drop control packets\n");
1180 goto out_free_desc;
1181 }
1182 skb_put(skb,len);
1183 // pwang_test
1184 ATM_SKB(skb)->vcc = vcc;
1185 ATM_DESC(skb) = desc;
1186 skb_queue_tail(&iadev->rx_dma_q, skb);
1187
1188 /* Build the DLE structure */
1189 wr_ptr = iadev->rx_dle_q.write;
1190 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1191 len, DMA_FROM_DEVICE);
1192 wr_ptr->local_pkt_addr = buf_addr;
1193 wr_ptr->bytes = len; /* We don't know this do we ?? */
1194 wr_ptr->mode = DMA_INT_ENABLE;
1195
1196 /* shud take care of wrap around here too. */
1197 if(++wr_ptr == iadev->rx_dle_q.end)
1198 wr_ptr = iadev->rx_dle_q.start;
1199 iadev->rx_dle_q.write = wr_ptr;
1200 udelay(1);
1201 /* Increment transaction counter */
1202 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1203out: return 0;
1204out_free_desc:
1205 free_desc(dev, desc);
1206 goto out;
1207}
1208
1209static void rx_intr(struct atm_dev *dev)
1210{
1211 IADEV *iadev;
1212 u_short status;
1213 u_short state, i;
1214
1215 iadev = INPH_IA_DEV(dev);
1216 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1217 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1218 if (status & RX_PKT_RCVD)
1219 {
1220 /* do something */
1221 /* Basically recvd an interrupt for receiving a packet.
1222 A descriptor would have been written to the packet complete
1223 queue. Get all the descriptors and set up dma to move the
1224 packets till the packet complete queue is empty..
1225 */
1226 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1227 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1228 while(!(state & PCQ_EMPTY))
1229 {
1230 rx_pkt(dev);
1231 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1232 }
1233 iadev->rxing = 1;
1234 }
1235 if (status & RX_FREEQ_EMPT)
1236 {
1237 if (iadev->rxing) {
1238 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1239 iadev->rx_tmp_jif = jiffies;
1240 iadev->rxing = 0;
1241 }
1242 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1243 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1244 for (i = 1; i <= iadev->num_rx_desc; i++)
1245 free_desc(dev, i);
1246printk("Test logic RUN!!!!\n");
1247 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1248 iadev->rxing = 1;
1249 }
1250 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1251 }
1252
1253 if (status & RX_EXCP_RCVD)
1254 {
1255 /* probably need to handle the exception queue also. */
1256 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1257 rx_excp_rcvd(dev);
1258 }
1259
1260
1261 if (status & RX_RAW_RCVD)
1262 {
1263 /* need to handle the raw incoming cells. This deepnds on
1264 whether we have programmed to receive the raw cells or not.
1265 Else ignore. */
1266 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1267 }
1268}
1269
1270
1271static void rx_dle_intr(struct atm_dev *dev)
1272{
1273 IADEV *iadev;
1274 struct atm_vcc *vcc;
1275 struct sk_buff *skb;
1276 int desc;
1277 u_short state;
1278 struct dle *dle, *cur_dle;
1279 u_int dle_lp;
1280 int len;
1281 iadev = INPH_IA_DEV(dev);
1282
1283 /* free all the dles done, that is just update our own dle read pointer
1284 - do we really need to do this. Think not. */
1285 /* DMA is done, just get all the recevie buffers from the rx dma queue
1286 and push them up to the higher layer protocol. Also free the desc
1287 associated with the buffer. */
1288 dle = iadev->rx_dle_q.read;
1289 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1290 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1291 while(dle != cur_dle)
1292 {
1293 /* free the DMAed skb */
1294 skb = skb_dequeue(&iadev->rx_dma_q);
1295 if (!skb)
1296 goto INCR_DLE;
1297 desc = ATM_DESC(skb);
1298 free_desc(dev, desc);
1299
1300 if (!(len = skb->len))
1301 {
1302 printk("rx_dle_intr: skb len 0\n");
1303 dev_kfree_skb_any(skb);
1304 }
1305 else
1306 {
1307 struct cpcs_trailer *trailer;
1308 u_short length;
1309 struct ia_vcc *ia_vcc;
1310
1311 dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1312 len, DMA_FROM_DEVICE);
1313 /* no VCC related housekeeping done as yet. lets see */
1314 vcc = ATM_SKB(skb)->vcc;
1315 if (!vcc) {
1316 printk("IA: null vcc\n");
1317 dev_kfree_skb_any(skb);
1318 goto INCR_DLE;
1319 }
1320 ia_vcc = INPH_IA_VCC(vcc);
1321 if (ia_vcc == NULL)
1322 {
1323 atomic_inc(&vcc->stats->rx_err);
1324 atm_return(vcc, skb->truesize);
1325 dev_kfree_skb_any(skb);
1326 goto INCR_DLE;
1327 }
1328 // get real pkt length pwang_test
1329 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1330 skb->len - sizeof(*trailer));
1331 length = swap_byte_order(trailer->length);
1332 if ((length > iadev->rx_buf_sz) || (length >
1333 (skb->len - sizeof(struct cpcs_trailer))))
1334 {
1335 atomic_inc(&vcc->stats->rx_err);
1336 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1337 length, skb->len);)
1338 atm_return(vcc, skb->truesize);
1339 dev_kfree_skb_any(skb);
1340 goto INCR_DLE;
1341 }
1342 skb_trim(skb, length);
1343
1344 /* Display the packet */
1345 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1346 xdump(skb->data, skb->len, "RX: ");
1347 printk("\n");)
1348
1349 IF_RX(printk("rx_dle_intr: skb push");)
1350 vcc->push(vcc,skb);
1351 atomic_inc(&vcc->stats->rx);
1352 iadev->rx_pkt_cnt++;
1353 }
1354INCR_DLE:
1355 if (++dle == iadev->rx_dle_q.end)
1356 dle = iadev->rx_dle_q.start;
1357 }
1358 iadev->rx_dle_q.read = dle;
1359
1360 /* if the interrupts are masked because there were no free desc available,
1361 unmask them now. */
1362 if (!iadev->rxing) {
1363 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1364 if (!(state & FREEQ_EMPTY)) {
1365 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1366 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1367 iadev->reass_reg+REASS_MASK_REG);
1368 iadev->rxing++;
1369 }
1370 }
1371}
1372
1373
1374static int open_rx(struct atm_vcc *vcc)
1375{
1376 IADEV *iadev;
1377 u_short __iomem *vc_table;
1378 u_short __iomem *reass_ptr;
1379 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1380
1381 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1382 iadev = INPH_IA_DEV(vcc->dev);
1383 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1384 if (iadev->phy_type & FE_25MBIT_PHY) {
1385 printk("IA: ABR not support\n");
1386 return -EINVAL;
1387 }
1388 }
1389 /* Make only this VCI in the vc table valid and let all
1390 others be invalid entries */
1391 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1392 vc_table += vcc->vci;
1393 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1394
1395 *vc_table = vcc->vci << 6;
1396 /* Also keep a list of open rx vcs so that we can attach them with
1397 incoming PDUs later. */
1398 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1399 (vcc->qos.txtp.traffic_class == ATM_ABR))
1400 {
1401 srv_cls_param_t srv_p;
1402 init_abr_vc(iadev, &srv_p);
1403 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1404 }
1405 else { /* for UBR later may need to add CBR logic */
1406 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1407 reass_ptr += vcc->vci;
1408 *reass_ptr = NO_AAL5_PKT;
1409 }
1410
1411 if (iadev->rx_open[vcc->vci])
1412 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1413 vcc->dev->number, vcc->vci);
1414 iadev->rx_open[vcc->vci] = vcc;
1415 return 0;
1416}
1417
1418static int rx_init(struct atm_dev *dev)
1419{
1420 IADEV *iadev;
1421 struct rx_buf_desc __iomem *buf_desc_ptr;
1422 unsigned long rx_pkt_start = 0;
1423 void *dle_addr;
1424 struct abr_vc_table *abr_vc_table;
1425 u16 *vc_table;
1426 u16 *reass_table;
1427 int i,j, vcsize_sel;
1428 u_short freeq_st_adr;
1429 u_short *freeq_start;
1430
1431 iadev = INPH_IA_DEV(dev);
1432 // spin_lock_init(&iadev->rx_lock);
1433
1434 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1435 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1436 &iadev->rx_dle_dma, GFP_KERNEL);
1437 if (!dle_addr) {
1438 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1439 goto err_out;
1440 }
1441 iadev->rx_dle_q.start = (struct dle *)dle_addr;
1442 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1443 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1444 iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1445 /* the end of the dle q points to the entry after the last
1446 DLE that can be used. */
1447
1448 /* write the upper 20 bits of the start address to rx list address register */
1449 /* We know this is 32bit bus addressed so the following is safe */
1450 writel(iadev->rx_dle_dma & 0xfffff000,
1451 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1452 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1453 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1454 readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1455 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1456 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1457 readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1458
1459 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1460 writew(0, iadev->reass_reg+MODE_REG);
1461 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1462
1463 /* Receive side control memory map
1464 -------------------------------
1465
1466 Buffer descr 0x0000 (736 - 23K)
1467 VP Table 0x5c00 (256 - 512)
1468 Except q 0x5e00 (128 - 512)
1469 Free buffer q 0x6000 (1K - 2K)
1470 Packet comp q 0x6800 (1K - 2K)
1471 Reass Table 0x7000 (1K - 2K)
1472 VC Table 0x7800 (1K - 2K)
1473 ABR VC Table 0x8000 (1K - 32K)
1474 */
1475
1476 /* Base address for Buffer Descriptor Table */
1477 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1478 /* Set the buffer size register */
1479 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1480
1481 /* Initialize each entry in the Buffer Descriptor Table */
1482 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1483 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1484 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1485 buf_desc_ptr++;
1486 rx_pkt_start = iadev->rx_pkt_ram;
1487 for(i=1; i<=iadev->num_rx_desc; i++)
1488 {
1489 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1490 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1491 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1492 buf_desc_ptr++;
1493 rx_pkt_start += iadev->rx_buf_sz;
1494 }
1495 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1496 i = FREE_BUF_DESC_Q*iadev->memSize;
1497 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1498 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1499 writew(i+iadev->num_rx_desc*sizeof(u_short),
1500 iadev->reass_reg+FREEQ_ED_ADR);
1501 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1502 writew(i+iadev->num_rx_desc*sizeof(u_short),
1503 iadev->reass_reg+FREEQ_WR_PTR);
1504 /* Fill the FREEQ with all the free descriptors. */
1505 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1506 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1507 for(i=1; i<=iadev->num_rx_desc; i++)
1508 {
1509 *freeq_start = (u_short)i;
1510 freeq_start++;
1511 }
1512 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1513 /* Packet Complete Queue */
1514 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1515 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1516 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1517 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1518 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1519
1520 /* Exception Queue */
1521 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1522 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1523 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1524 iadev->reass_reg+EXCP_Q_ED_ADR);
1525 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1526 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1527
1528 /* Load local copy of FREEQ and PCQ ptrs */
1529 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1530 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1531 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1532 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1533 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1534 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1535 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1536 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1537
1538 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1539 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1540 iadev->rfL.pcq_wr);)
1541 /* just for check - no VP TBL */
1542 /* VP Table */
1543 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1544 /* initialize VP Table for invalid VPIs
1545 - I guess we can write all 1s or 0x000f in the entire memory
1546 space or something similar.
1547 */
1548
1549 /* This seems to work and looks right to me too !!! */
1550 i = REASS_TABLE * iadev->memSize;
1551 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1552 /* initialize Reassembly table to I don't know what ???? */
1553 reass_table = (u16 *)(iadev->reass_ram+i);
1554 j = REASS_TABLE_SZ * iadev->memSize;
1555 for(i=0; i < j; i++)
1556 *reass_table++ = NO_AAL5_PKT;
1557 i = 8*1024;
1558 vcsize_sel = 0;
1559 while (i != iadev->num_vc) {
1560 i /= 2;
1561 vcsize_sel++;
1562 }
1563 i = RX_VC_TABLE * iadev->memSize;
1564 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1565 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1566 j = RX_VC_TABLE_SZ * iadev->memSize;
1567 for(i = 0; i < j; i++)
1568 {
1569 /* shift the reassembly pointer by 3 + lower 3 bits of
1570 vc_lkup_base register (=3 for 1K VCs) and the last byte
1571 is those low 3 bits.
1572 Shall program this later.
1573 */
1574 *vc_table = (i << 6) | 15; /* for invalid VCI */
1575 vc_table++;
1576 }
1577 /* ABR VC table */
1578 i = ABR_VC_TABLE * iadev->memSize;
1579 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1580
1581 i = ABR_VC_TABLE * iadev->memSize;
1582 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1583 j = REASS_TABLE_SZ * iadev->memSize;
1584 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1585 for(i = 0; i < j; i++) {
1586 abr_vc_table->rdf = 0x0003;
1587 abr_vc_table->air = 0x5eb1;
1588 abr_vc_table++;
1589 }
1590
1591 /* Initialize other registers */
1592
1593 /* VP Filter Register set for VC Reassembly only */
1594 writew(0xff00, iadev->reass_reg+VP_FILTER);
1595 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1596 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1597
1598 /* Packet Timeout Count related Registers :
1599 Set packet timeout to occur in about 3 seconds
1600 Set Packet Aging Interval count register to overflow in about 4 us
1601 */
1602 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1603
1604 i = (j >> 6) & 0xFF;
1605 j += 2 * (j - 1);
1606 i |= ((j << 2) & 0xFF00);
1607 writew(i, iadev->reass_reg+TMOUT_RANGE);
1608
1609 /* initiate the desc_tble */
1610 for(i=0; i<iadev->num_tx_desc;i++)
1611 iadev->desc_tbl[i].timestamp = 0;
1612
1613 /* to clear the interrupt status register - read it */
1614 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1615
1616 /* Mask Register - clear it */
1617 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1618
1619 skb_queue_head_init(&iadev->rx_dma_q);
1620 iadev->rx_free_desc_qhead = NULL;
1621
1622 iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1623 if (!iadev->rx_open) {
1624 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1625 dev->number);
1626 goto err_free_dle;
1627 }
1628
1629 iadev->rxing = 1;
1630 iadev->rx_pkt_cnt = 0;
1631 /* Mode Register */
1632 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1633 return 0;
1634
1635err_free_dle:
1636 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1637 iadev->rx_dle_dma);
1638err_out:
1639 return -ENOMEM;
1640}
1641
1642
1643/*
1644 The memory map suggested in appendix A and the coding for it.
1645 Keeping it around just in case we change our mind later.
1646
1647 Buffer descr 0x0000 (128 - 4K)
1648 UBR sched 0x1000 (1K - 4K)
1649 UBR Wait q 0x2000 (1K - 4K)
1650 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1651 (128 - 256) each
1652 extended VC 0x4000 (1K - 8K)
1653 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1654 CBR sched 0x7000 (as needed)
1655 VC table 0x8000 (1K - 32K)
1656*/
1657
1658static void tx_intr(struct atm_dev *dev)
1659{
1660 IADEV *iadev;
1661 unsigned short status;
1662 unsigned long flags;
1663
1664 iadev = INPH_IA_DEV(dev);
1665
1666 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1667 if (status & TRANSMIT_DONE){
1668
1669 IF_EVENT(printk("Transmit Done Intr logic run\n");)
1670 spin_lock_irqsave(&iadev->tx_lock, flags);
1671 ia_tx_poll(iadev);
1672 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1673 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1674 if (iadev->close_pending)
1675 wake_up(&iadev->close_wait);
1676 }
1677 if (status & TCQ_NOT_EMPTY)
1678 {
1679 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1680 }
1681}
1682
1683static void tx_dle_intr(struct atm_dev *dev)
1684{
1685 IADEV *iadev;
1686 struct dle *dle, *cur_dle;
1687 struct sk_buff *skb;
1688 struct atm_vcc *vcc;
1689 struct ia_vcc *iavcc;
1690 u_int dle_lp;
1691 unsigned long flags;
1692
1693 iadev = INPH_IA_DEV(dev);
1694 spin_lock_irqsave(&iadev->tx_lock, flags);
1695 dle = iadev->tx_dle_q.read;
1696 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1697 (sizeof(struct dle)*DLE_ENTRIES - 1);
1698 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1699 while (dle != cur_dle)
1700 {
1701 /* free the DMAed skb */
1702 skb = skb_dequeue(&iadev->tx_dma_q);
1703 if (!skb) break;
1704
1705 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1706 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1707 dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1708 DMA_TO_DEVICE);
1709 }
1710 vcc = ATM_SKB(skb)->vcc;
1711 if (!vcc) {
1712 printk("tx_dle_intr: vcc is null\n");
1713 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1714 dev_kfree_skb_any(skb);
1715
1716 return;
1717 }
1718 iavcc = INPH_IA_VCC(vcc);
1719 if (!iavcc) {
1720 printk("tx_dle_intr: iavcc is null\n");
1721 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1722 dev_kfree_skb_any(skb);
1723 return;
1724 }
1725 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1726 if ((vcc->pop) && (skb->len != 0))
1727 {
1728 vcc->pop(vcc, skb);
1729 }
1730 else {
1731 dev_kfree_skb_any(skb);
1732 }
1733 }
1734 else { /* Hold the rate-limited skb for flow control */
1735 IA_SKB_STATE(skb) |= IA_DLED;
1736 skb_queue_tail(&iavcc->txing_skb, skb);
1737 }
1738 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1739 if (++dle == iadev->tx_dle_q.end)
1740 dle = iadev->tx_dle_q.start;
1741 }
1742 iadev->tx_dle_q.read = dle;
1743 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1744}
1745
1746static int open_tx(struct atm_vcc *vcc)
1747{
1748 struct ia_vcc *ia_vcc;
1749 IADEV *iadev;
1750 struct main_vc *vc;
1751 struct ext_vc *evc;
1752 int ret;
1753 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1754 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1755 iadev = INPH_IA_DEV(vcc->dev);
1756
1757 if (iadev->phy_type & FE_25MBIT_PHY) {
1758 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1759 printk("IA: ABR not support\n");
1760 return -EINVAL;
1761 }
1762 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1763 printk("IA: CBR not support\n");
1764 return -EINVAL;
1765 }
1766 }
1767 ia_vcc = INPH_IA_VCC(vcc);
1768 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1769 if (vcc->qos.txtp.max_sdu >
1770 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1771 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1772 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1773 vcc->dev_data = NULL;
1774 kfree(ia_vcc);
1775 return -EINVAL;
1776 }
1777 ia_vcc->vc_desc_cnt = 0;
1778 ia_vcc->txing = 1;
1779
1780 /* find pcr */
1781 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1782 vcc->qos.txtp.pcr = iadev->LineRate;
1783 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1784 vcc->qos.txtp.pcr = iadev->LineRate;
1785 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1786 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1787 if (vcc->qos.txtp.pcr > iadev->LineRate)
1788 vcc->qos.txtp.pcr = iadev->LineRate;
1789 ia_vcc->pcr = vcc->qos.txtp.pcr;
1790
1791 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1792 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1793 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1794 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1795 if (ia_vcc->pcr < iadev->rate_limit)
1796 skb_queue_head_init (&ia_vcc->txing_skb);
1797 if (ia_vcc->pcr < iadev->rate_limit) {
1798 struct sock *sk = sk_atm(vcc);
1799
1800 if (vcc->qos.txtp.max_sdu != 0) {
1801 if (ia_vcc->pcr > 60000)
1802 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1803 else if (ia_vcc->pcr > 2000)
1804 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1805 else
1806 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1807 }
1808 else
1809 sk->sk_sndbuf = 24576;
1810 }
1811
1812 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1813 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1814 vc += vcc->vci;
1815 evc += vcc->vci;
1816 memset((caddr_t)vc, 0, sizeof(*vc));
1817 memset((caddr_t)evc, 0, sizeof(*evc));
1818
1819 /* store the most significant 4 bits of vci as the last 4 bits
1820 of first part of atm header.
1821 store the last 12 bits of vci as first 12 bits of the second
1822 part of the atm header.
1823 */
1824 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1825 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1826
1827 /* check the following for different traffic classes */
1828 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1829 {
1830 vc->type = UBR;
1831 vc->status = CRC_APPEND;
1832 vc->acr = cellrate_to_float(iadev->LineRate);
1833 if (vcc->qos.txtp.pcr > 0)
1834 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1835 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1836 vcc->qos.txtp.max_pcr,vc->acr);)
1837 }
1838 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1839 { srv_cls_param_t srv_p;
1840 IF_ABR(printk("Tx ABR VCC\n");)
1841 init_abr_vc(iadev, &srv_p);
1842 if (vcc->qos.txtp.pcr > 0)
1843 srv_p.pcr = vcc->qos.txtp.pcr;
1844 if (vcc->qos.txtp.min_pcr > 0) {
1845 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1846 if (tmpsum > iadev->LineRate)
1847 return -EBUSY;
1848 srv_p.mcr = vcc->qos.txtp.min_pcr;
1849 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1850 }
1851 else srv_p.mcr = 0;
1852 if (vcc->qos.txtp.icr)
1853 srv_p.icr = vcc->qos.txtp.icr;
1854 if (vcc->qos.txtp.tbe)
1855 srv_p.tbe = vcc->qos.txtp.tbe;
1856 if (vcc->qos.txtp.frtt)
1857 srv_p.frtt = vcc->qos.txtp.frtt;
1858 if (vcc->qos.txtp.rif)
1859 srv_p.rif = vcc->qos.txtp.rif;
1860 if (vcc->qos.txtp.rdf)
1861 srv_p.rdf = vcc->qos.txtp.rdf;
1862 if (vcc->qos.txtp.nrm_pres)
1863 srv_p.nrm = vcc->qos.txtp.nrm;
1864 if (vcc->qos.txtp.trm_pres)
1865 srv_p.trm = vcc->qos.txtp.trm;
1866 if (vcc->qos.txtp.adtf_pres)
1867 srv_p.adtf = vcc->qos.txtp.adtf;
1868 if (vcc->qos.txtp.cdf_pres)
1869 srv_p.cdf = vcc->qos.txtp.cdf;
1870 if (srv_p.icr > srv_p.pcr)
1871 srv_p.icr = srv_p.pcr;
1872 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1873 srv_p.pcr, srv_p.mcr);)
1874 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1875 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1876 if (iadev->phy_type & FE_25MBIT_PHY) {
1877 printk("IA: CBR not support\n");
1878 return -EINVAL;
1879 }
1880 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1881 IF_CBR(printk("PCR is not available\n");)
1882 return -1;
1883 }
1884 vc->type = CBR;
1885 vc->status = CRC_APPEND;
1886 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1887 return ret;
1888 }
1889 } else {
1890 printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
1891 }
1892
1893 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1894 IF_EVENT(printk("ia open_tx returning \n");)
1895 return 0;
1896}
1897
1898
1899static int tx_init(struct atm_dev *dev)
1900{
1901 IADEV *iadev;
1902 struct tx_buf_desc *buf_desc_ptr;
1903 unsigned int tx_pkt_start;
1904 void *dle_addr;
1905 int i;
1906 u_short tcq_st_adr;
1907 u_short *tcq_start;
1908 u_short prq_st_adr;
1909 u_short *prq_start;
1910 struct main_vc *vc;
1911 struct ext_vc *evc;
1912 u_short tmp16;
1913 u32 vcsize_sel;
1914
1915 iadev = INPH_IA_DEV(dev);
1916 spin_lock_init(&iadev->tx_lock);
1917
1918 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1919 readw(iadev->seg_reg+SEG_MASK_REG));)
1920
1921 /* Allocate 4k (boundary aligned) bytes */
1922 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1923 &iadev->tx_dle_dma, GFP_KERNEL);
1924 if (!dle_addr) {
1925 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1926 goto err_out;
1927 }
1928 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1929 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1930 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1931 iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1932
1933 /* write the upper 20 bits of the start address to tx list address register */
1934 writel(iadev->tx_dle_dma & 0xfffff000,
1935 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1936 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1937 writew(0, iadev->seg_reg+MODE_REG_0);
1938 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1939 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1940 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1941 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1942
1943 /*
1944 Transmit side control memory map
1945 --------------------------------
1946 Buffer descr 0x0000 (128 - 4K)
1947 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1948 (512 - 1K) each
1949 TCQ - 4K, PRQ - 5K
1950 CBR Table 0x1800 (as needed) - 6K
1951 UBR Table 0x3000 (1K - 4K) - 12K
1952 UBR Wait queue 0x4000 (1K - 4K) - 16K
1953 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1954 ABR Tbl - 20K, ABR Wq - 22K
1955 extended VC 0x6000 (1K - 8K) - 24K
1956 VC Table 0x8000 (1K - 32K) - 32K
1957
1958 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1959 and Wait q, which can be allotted later.
1960 */
1961
1962 /* Buffer Descriptor Table Base address */
1963 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1964
1965 /* initialize each entry in the buffer descriptor table */
1966 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1967 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1968 buf_desc_ptr++;
1969 tx_pkt_start = TX_PACKET_RAM;
1970 for(i=1; i<=iadev->num_tx_desc; i++)
1971 {
1972 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1973 buf_desc_ptr->desc_mode = AAL5;
1974 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1975 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1976 buf_desc_ptr++;
1977 tx_pkt_start += iadev->tx_buf_sz;
1978 }
1979 iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1980 sizeof(*iadev->tx_buf),
1981 GFP_KERNEL);
1982 if (!iadev->tx_buf) {
1983 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1984 goto err_free_dle;
1985 }
1986 for (i= 0; i< iadev->num_tx_desc; i++)
1987 {
1988 struct cpcs_trailer *cpcs;
1989
1990 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1991 if(!cpcs) {
1992 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1993 goto err_free_tx_bufs;
1994 }
1995 iadev->tx_buf[i].cpcs = cpcs;
1996 iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1997 cpcs,
1998 sizeof(*cpcs),
1999 DMA_TO_DEVICE);
2000 }
2001 iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2002 sizeof(*iadev->desc_tbl),
2003 GFP_KERNEL);
2004 if (!iadev->desc_tbl) {
2005 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2006 goto err_free_all_tx_bufs;
2007 }
2008
2009 /* Communication Queues base address */
2010 i = TX_COMP_Q * iadev->memSize;
2011 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2012
2013 /* Transmit Complete Queue */
2014 writew(i, iadev->seg_reg+TCQ_ST_ADR);
2015 writew(i, iadev->seg_reg+TCQ_RD_PTR);
2016 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2017 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2018 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2019 iadev->seg_reg+TCQ_ED_ADR);
2020 /* Fill the TCQ with all the free descriptors. */
2021 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2022 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2023 for(i=1; i<=iadev->num_tx_desc; i++)
2024 {
2025 *tcq_start = (u_short)i;
2026 tcq_start++;
2027 }
2028
2029 /* Packet Ready Queue */
2030 i = PKT_RDY_Q * iadev->memSize;
2031 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2032 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2033 iadev->seg_reg+PRQ_ED_ADR);
2034 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2035 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2036
2037 /* Load local copy of PRQ and TCQ ptrs */
2038 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2039 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2040 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2041
2042 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2043 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2044 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2045
2046 /* Just for safety initializing the queue to have desc 1 always */
2047 /* Fill the PRQ with all the free descriptors. */
2048 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2049 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2050 for(i=1; i<=iadev->num_tx_desc; i++)
2051 {
2052 *prq_start = (u_short)0; /* desc 1 in all entries */
2053 prq_start++;
2054 }
2055 /* CBR Table */
2056 IF_INIT(printk("Start CBR Init\n");)
2057#if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2058 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2059#else /* Charlie's logic is wrong ? */
2060 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2061 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2062 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2063#endif
2064
2065 IF_INIT(printk("value in register = 0x%x\n",
2066 readw(iadev->seg_reg+CBR_PTR_BASE));)
2067 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2068 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2069 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2070 readw(iadev->seg_reg+CBR_TAB_BEG));)
2071 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2072 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2073 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2074 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2075 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2076 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2077 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2078 readw(iadev->seg_reg+CBR_TAB_END+1));)
2079
2080 /* Initialize the CBR Schedualing Table */
2081 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2082 0, iadev->num_vc*6);
2083 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2084 iadev->CbrEntryPt = 0;
2085 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2086 iadev->NumEnabledCBR = 0;
2087
2088 /* UBR scheduling Table and wait queue */
2089 /* initialize all bytes of UBR scheduler table and wait queue to 0
2090 - SCHEDSZ is 1K (# of entries).
2091 - UBR Table size is 4K
2092 - UBR wait queue is 4K
2093 since the table and wait queues are contiguous, all the bytes
2094 can be initialized by one memeset.
2095 */
2096
2097 vcsize_sel = 0;
2098 i = 8*1024;
2099 while (i != iadev->num_vc) {
2100 i /= 2;
2101 vcsize_sel++;
2102 }
2103
2104 i = MAIN_VC_TABLE * iadev->memSize;
2105 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2106 i = EXT_VC_TABLE * iadev->memSize;
2107 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2108 i = UBR_SCHED_TABLE * iadev->memSize;
2109 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2110 i = UBR_WAIT_Q * iadev->memSize;
2111 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2112 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2113 0, iadev->num_vc*8);
2114 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2115 /* initialize all bytes of ABR scheduler table and wait queue to 0
2116 - SCHEDSZ is 1K (# of entries).
2117 - ABR Table size is 2K
2118 - ABR wait queue is 2K
2119 since the table and wait queues are contiguous, all the bytes
2120 can be initialized by one memeset.
2121 */
2122 i = ABR_SCHED_TABLE * iadev->memSize;
2123 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2124 i = ABR_WAIT_Q * iadev->memSize;
2125 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2126
2127 i = ABR_SCHED_TABLE*iadev->memSize;
2128 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2129 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2130 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2131 iadev->testTable = kmalloc_array(iadev->num_vc,
2132 sizeof(*iadev->testTable),
2133 GFP_KERNEL);
2134 if (!iadev->testTable) {
2135 printk("Get freepage failed\n");
2136 goto err_free_desc_tbl;
2137 }
2138 for(i=0; i<iadev->num_vc; i++)
2139 {
2140 memset((caddr_t)vc, 0, sizeof(*vc));
2141 memset((caddr_t)evc, 0, sizeof(*evc));
2142 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2143 GFP_KERNEL);
2144 if (!iadev->testTable[i])
2145 goto err_free_test_tables;
2146 iadev->testTable[i]->lastTime = 0;
2147 iadev->testTable[i]->fract = 0;
2148 iadev->testTable[i]->vc_status = VC_UBR;
2149 vc++;
2150 evc++;
2151 }
2152
2153 /* Other Initialization */
2154
2155 /* Max Rate Register */
2156 if (iadev->phy_type & FE_25MBIT_PHY) {
2157 writew(RATE25, iadev->seg_reg+MAXRATE);
2158 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2159 }
2160 else {
2161 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2162 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2163 }
2164 /* Set Idle Header Reigisters to be sure */
2165 writew(0, iadev->seg_reg+IDLEHEADHI);
2166 writew(0, iadev->seg_reg+IDLEHEADLO);
2167
2168 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2169 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2170
2171 iadev->close_pending = 0;
2172 init_waitqueue_head(&iadev->close_wait);
2173 init_waitqueue_head(&iadev->timeout_wait);
2174 skb_queue_head_init(&iadev->tx_dma_q);
2175 ia_init_rtn_q(&iadev->tx_return_q);
2176
2177 /* RM Cell Protocol ID and Message Type */
2178 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2179 skb_queue_head_init (&iadev->tx_backlog);
2180
2181 /* Mode Register 1 */
2182 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2183
2184 /* Mode Register 0 */
2185 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2186
2187 /* Interrupt Status Register - read to clear */
2188 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2189
2190 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2191 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2192 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2193 iadev->tx_pkt_cnt = 0;
2194 iadev->rate_limit = iadev->LineRate / 3;
2195
2196 return 0;
2197
2198err_free_test_tables:
2199 while (--i >= 0)
2200 kfree(iadev->testTable[i]);
2201 kfree(iadev->testTable);
2202err_free_desc_tbl:
2203 kfree(iadev->desc_tbl);
2204err_free_all_tx_bufs:
2205 i = iadev->num_tx_desc;
2206err_free_tx_bufs:
2207 while (--i >= 0) {
2208 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2209
2210 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2211 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2212 kfree(desc->cpcs);
2213 }
2214 kfree(iadev->tx_buf);
2215err_free_dle:
2216 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2217 iadev->tx_dle_dma);
2218err_out:
2219 return -ENOMEM;
2220}
2221
2222static irqreturn_t ia_int(int irq, void *dev_id)
2223{
2224 struct atm_dev *dev;
2225 IADEV *iadev;
2226 unsigned int status;
2227 int handled = 0;
2228
2229 dev = dev_id;
2230 iadev = INPH_IA_DEV(dev);
2231 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2232 {
2233 handled = 1;
2234 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2235 if (status & STAT_REASSINT)
2236 {
2237 /* do something */
2238 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2239 rx_intr(dev);
2240 }
2241 if (status & STAT_DLERINT)
2242 {
2243 /* Clear this bit by writing a 1 to it. */
2244 writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2245 rx_dle_intr(dev);
2246 }
2247 if (status & STAT_SEGINT)
2248 {
2249 /* do something */
2250 IF_EVENT(printk("IA: tx_intr \n");)
2251 tx_intr(dev);
2252 }
2253 if (status & STAT_DLETINT)
2254 {
2255 writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2256 tx_dle_intr(dev);
2257 }
2258 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2259 {
2260 if (status & STAT_FEINT)
2261 ia_frontend_intr(iadev);
2262 }
2263 }
2264 return IRQ_RETVAL(handled);
2265}
2266
2267
2268
2269/*----------------------------- entries --------------------------------*/
2270static int get_esi(struct atm_dev *dev)
2271{
2272 IADEV *iadev;
2273 int i;
2274 u32 mac1;
2275 u16 mac2;
2276
2277 iadev = INPH_IA_DEV(dev);
2278 mac1 = cpu_to_be32(le32_to_cpu(readl(
2279 iadev->reg+IPHASE5575_MAC1)));
2280 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2281 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2282 for (i=0; i<MAC1_LEN; i++)
2283 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2284
2285 for (i=0; i<MAC2_LEN; i++)
2286 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2287 return 0;
2288}
2289
2290static int reset_sar(struct atm_dev *dev)
2291{
2292 IADEV *iadev;
2293 int i, error = 1;
2294 unsigned int pci[64];
2295
2296 iadev = INPH_IA_DEV(dev);
2297 for(i=0; i<64; i++)
2298 if ((error = pci_read_config_dword(iadev->pci,
2299 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2300 return error;
2301 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2302 for(i=0; i<64; i++)
2303 if ((error = pci_write_config_dword(iadev->pci,
2304 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2305 return error;
2306 udelay(5);
2307 return 0;
2308}
2309
2310
2311static int ia_init(struct atm_dev *dev)
2312{
2313 IADEV *iadev;
2314 unsigned long real_base;
2315 void __iomem *base;
2316 unsigned short command;
2317 int error, i;
2318
2319 /* The device has been identified and registered. Now we read
2320 necessary configuration info like memory base address,
2321 interrupt number etc */
2322
2323 IF_INIT(printk(">ia_init\n");)
2324 dev->ci_range.vpi_bits = 0;
2325 dev->ci_range.vci_bits = NR_VCI_LD;
2326
2327 iadev = INPH_IA_DEV(dev);
2328 real_base = pci_resource_start (iadev->pci, 0);
2329 iadev->irq = iadev->pci->irq;
2330
2331 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2332 if (error) {
2333 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2334 dev->number,error);
2335 return -EINVAL;
2336 }
2337 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2338 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2339
2340 /* find mapping size of board */
2341
2342 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2343
2344 if (iadev->pci_map_size == 0x100000){
2345 iadev->num_vc = 4096;
2346 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2347 iadev->memSize = 4;
2348 }
2349 else if (iadev->pci_map_size == 0x40000) {
2350 iadev->num_vc = 1024;
2351 iadev->memSize = 1;
2352 }
2353 else {
2354 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2355 return -EINVAL;
2356 }
2357 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2358
2359 /* enable bus mastering */
2360 pci_set_master(iadev->pci);
2361
2362 /*
2363 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2364 */
2365 udelay(10);
2366
2367 /* mapping the physical address to a virtual address in address space */
2368 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2369
2370 if (!base)
2371 {
2372 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2373 dev->number);
2374 return -ENOMEM;
2375 }
2376 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2377 dev->number, iadev->pci->revision, base, iadev->irq);)
2378
2379 /* filling the iphase dev structure */
2380 iadev->mem = iadev->pci_map_size /2;
2381 iadev->real_base = real_base;
2382 iadev->base = base;
2383
2384 /* Bus Interface Control Registers */
2385 iadev->reg = base + REG_BASE;
2386 /* Segmentation Control Registers */
2387 iadev->seg_reg = base + SEG_BASE;
2388 /* Reassembly Control Registers */
2389 iadev->reass_reg = base + REASS_BASE;
2390 /* Front end/ DMA control registers */
2391 iadev->phy = base + PHY_BASE;
2392 iadev->dma = base + PHY_BASE;
2393 /* RAM - Segmentation RAm and Reassembly RAM */
2394 iadev->ram = base + ACTUAL_RAM_BASE;
2395 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2396 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2397
2398 /* lets print out the above */
2399 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2400 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2401 iadev->phy, iadev->ram, iadev->seg_ram,
2402 iadev->reass_ram);)
2403
2404 /* lets try reading the MAC address */
2405 error = get_esi(dev);
2406 if (error) {
2407 iounmap(iadev->base);
2408 return error;
2409 }
2410 printk("IA: ");
2411 for (i=0; i < ESI_LEN; i++)
2412 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2413 printk("\n");
2414
2415 /* reset SAR */
2416 if (reset_sar(dev)) {
2417 iounmap(iadev->base);
2418 printk("IA: reset SAR fail, please try again\n");
2419 return 1;
2420 }
2421 return 0;
2422}
2423
2424static void ia_update_stats(IADEV *iadev) {
2425 if (!iadev->carrier_detect)
2426 return;
2427 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2428 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2429 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2430 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2431 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2432 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2433 return;
2434}
2435
2436static void ia_led_timer(struct timer_list *unused) {
2437 unsigned long flags;
2438 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2439 u_char i;
2440 static u32 ctrl_reg;
2441 for (i = 0; i < iadev_count; i++) {
2442 if (ia_dev[i]) {
2443 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2444 if (blinking[i] == 0) {
2445 blinking[i]++;
2446 ctrl_reg &= (~CTRL_LED);
2447 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2448 ia_update_stats(ia_dev[i]);
2449 }
2450 else {
2451 blinking[i] = 0;
2452 ctrl_reg |= CTRL_LED;
2453 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2454 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2455 if (ia_dev[i]->close_pending)
2456 wake_up(&ia_dev[i]->close_wait);
2457 ia_tx_poll(ia_dev[i]);
2458 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2459 }
2460 }
2461 }
2462 mod_timer(&ia_timer, jiffies + HZ / 4);
2463 return;
2464}
2465
2466static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2467 unsigned long addr)
2468{
2469 writel(value, INPH_IA_DEV(dev)->phy+addr);
2470}
2471
2472static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2473{
2474 return readl(INPH_IA_DEV(dev)->phy+addr);
2475}
2476
2477static void ia_free_tx(IADEV *iadev)
2478{
2479 int i;
2480
2481 kfree(iadev->desc_tbl);
2482 for (i = 0; i < iadev->num_vc; i++)
2483 kfree(iadev->testTable[i]);
2484 kfree(iadev->testTable);
2485 for (i = 0; i < iadev->num_tx_desc; i++) {
2486 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2487
2488 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2489 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2490 kfree(desc->cpcs);
2491 }
2492 kfree(iadev->tx_buf);
2493 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2494 iadev->tx_dle_dma);
2495}
2496
2497static void ia_free_rx(IADEV *iadev)
2498{
2499 kfree(iadev->rx_open);
2500 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2501 iadev->rx_dle_dma);
2502}
2503
2504static int ia_start(struct atm_dev *dev)
2505{
2506 IADEV *iadev;
2507 int error;
2508 unsigned char phy;
2509 u32 ctrl_reg;
2510 IF_EVENT(printk(">ia_start\n");)
2511 iadev = INPH_IA_DEV(dev);
2512 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2513 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2514 dev->number, iadev->irq);
2515 error = -EAGAIN;
2516 goto err_out;
2517 }
2518 /* @@@ should release IRQ on error */
2519 /* enabling memory + master */
2520 if ((error = pci_write_config_word(iadev->pci,
2521 PCI_COMMAND,
2522 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2523 {
2524 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2525 "master (0x%x)\n",dev->number, error);
2526 error = -EIO;
2527 goto err_free_irq;
2528 }
2529 udelay(10);
2530
2531 /* Maybe we should reset the front end, initialize Bus Interface Control
2532 Registers and see. */
2533
2534 IF_INIT(printk("Bus ctrl reg: %08x\n",
2535 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2536 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2537 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2538 | CTRL_B8
2539 | CTRL_B16
2540 | CTRL_B32
2541 | CTRL_B48
2542 | CTRL_B64
2543 | CTRL_B128
2544 | CTRL_ERRMASK
2545 | CTRL_DLETMASK /* shud be removed l8r */
2546 | CTRL_DLERMASK
2547 | CTRL_SEGMASK
2548 | CTRL_REASSMASK
2549 | CTRL_FEMASK
2550 | CTRL_CSPREEMPT;
2551
2552 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2553
2554 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2555 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2556 printk("Bus status reg after init: %08x\n",
2557 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2558
2559 ia_hw_type(iadev);
2560 error = tx_init(dev);
2561 if (error)
2562 goto err_free_irq;
2563 error = rx_init(dev);
2564 if (error)
2565 goto err_free_tx;
2566
2567 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2568 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2569 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2570 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2571 phy = 0; /* resolve compiler complaint */
2572 IF_INIT (
2573 if ((phy=ia_phy_get(dev,0)) == 0x30)
2574 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2575 else
2576 printk("IA: utopia,rev.%0x\n",phy);)
2577
2578 if (iadev->phy_type & FE_25MBIT_PHY)
2579 ia_mb25_init(iadev);
2580 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2581 ia_suni_pm7345_init(iadev);
2582 else {
2583 error = suni_init(dev);
2584 if (error)
2585 goto err_free_rx;
2586 if (dev->phy->start) {
2587 error = dev->phy->start(dev);
2588 if (error)
2589 goto err_free_rx;
2590 }
2591 /* Get iadev->carrier_detect status */
2592 ia_frontend_intr(iadev);
2593 }
2594 return 0;
2595
2596err_free_rx:
2597 ia_free_rx(iadev);
2598err_free_tx:
2599 ia_free_tx(iadev);
2600err_free_irq:
2601 free_irq(iadev->irq, dev);
2602err_out:
2603 return error;
2604}
2605
2606static void ia_close(struct atm_vcc *vcc)
2607{
2608 DEFINE_WAIT(wait);
2609 u16 *vc_table;
2610 IADEV *iadev;
2611 struct ia_vcc *ia_vcc;
2612 struct sk_buff *skb = NULL;
2613 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2614 unsigned long closetime, flags;
2615
2616 iadev = INPH_IA_DEV(vcc->dev);
2617 ia_vcc = INPH_IA_VCC(vcc);
2618 if (!ia_vcc) return;
2619
2620 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2621 ia_vcc->vc_desc_cnt,vcc->vci);)
2622 clear_bit(ATM_VF_READY,&vcc->flags);
2623 skb_queue_head_init (&tmp_tx_backlog);
2624 skb_queue_head_init (&tmp_vcc_backlog);
2625 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2626 iadev->close_pending++;
2627 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2628 schedule_timeout(msecs_to_jiffies(500));
2629 finish_wait(&iadev->timeout_wait, &wait);
2630 spin_lock_irqsave(&iadev->tx_lock, flags);
2631 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2632 if (ATM_SKB(skb)->vcc == vcc){
2633 if (vcc->pop) vcc->pop(vcc, skb);
2634 else dev_kfree_skb_any(skb);
2635 }
2636 else
2637 skb_queue_tail(&tmp_tx_backlog, skb);
2638 }
2639 while((skb = skb_dequeue(&tmp_tx_backlog)))
2640 skb_queue_tail(&iadev->tx_backlog, skb);
2641 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2642 closetime = 300000 / ia_vcc->pcr;
2643 if (closetime == 0)
2644 closetime = 1;
2645 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2646 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2647 spin_lock_irqsave(&iadev->tx_lock, flags);
2648 iadev->close_pending--;
2649 iadev->testTable[vcc->vci]->lastTime = 0;
2650 iadev->testTable[vcc->vci]->fract = 0;
2651 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2652 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2653 if (vcc->qos.txtp.min_pcr > 0)
2654 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2655 }
2656 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2657 ia_vcc = INPH_IA_VCC(vcc);
2658 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2659 ia_cbrVc_close (vcc);
2660 }
2661 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2662 }
2663
2664 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2665 // reset reass table
2666 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2667 vc_table += vcc->vci;
2668 *vc_table = NO_AAL5_PKT;
2669 // reset vc table
2670 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2671 vc_table += vcc->vci;
2672 *vc_table = (vcc->vci << 6) | 15;
2673 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2674 struct abr_vc_table __iomem *abr_vc_table =
2675 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2676 abr_vc_table += vcc->vci;
2677 abr_vc_table->rdf = 0x0003;
2678 abr_vc_table->air = 0x5eb1;
2679 }
2680 // Drain the packets
2681 rx_dle_intr(vcc->dev);
2682 iadev->rx_open[vcc->vci] = NULL;
2683 }
2684 kfree(INPH_IA_VCC(vcc));
2685 ia_vcc = NULL;
2686 vcc->dev_data = NULL;
2687 clear_bit(ATM_VF_ADDR,&vcc->flags);
2688 return;
2689}
2690
2691static int ia_open(struct atm_vcc *vcc)
2692{
2693 struct ia_vcc *ia_vcc;
2694 int error;
2695 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2696 {
2697 IF_EVENT(printk("ia: not partially allocated resources\n");)
2698 vcc->dev_data = NULL;
2699 }
2700 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2701 {
2702 IF_EVENT(printk("iphase open: unspec part\n");)
2703 set_bit(ATM_VF_ADDR,&vcc->flags);
2704 }
2705 if (vcc->qos.aal != ATM_AAL5)
2706 return -EINVAL;
2707 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2708 vcc->dev->number, vcc->vpi, vcc->vci);)
2709
2710 /* Device dependent initialization */
2711 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2712 if (!ia_vcc) return -ENOMEM;
2713 vcc->dev_data = ia_vcc;
2714
2715 if ((error = open_rx(vcc)))
2716 {
2717 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2718 ia_close(vcc);
2719 return error;
2720 }
2721
2722 if ((error = open_tx(vcc)))
2723 {
2724 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2725 ia_close(vcc);
2726 return error;
2727 }
2728
2729 set_bit(ATM_VF_READY,&vcc->flags);
2730
2731#if 0
2732 {
2733 static u8 first = 1;
2734 if (first) {
2735 ia_timer.expires = jiffies + 3*HZ;
2736 add_timer(&ia_timer);
2737 first = 0;
2738 }
2739 }
2740#endif
2741 IF_EVENT(printk("ia open returning\n");)
2742 return 0;
2743}
2744
2745static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2746{
2747 IF_EVENT(printk(">ia_change_qos\n");)
2748 return 0;
2749}
2750
2751static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2752{
2753 IA_CMDBUF ia_cmds;
2754 IADEV *iadev;
2755 int i, board;
2756 u16 __user *tmps;
2757 IF_EVENT(printk(">ia_ioctl\n");)
2758 if (cmd != IA_CMD) {
2759 if (!dev->phy->ioctl) return -EINVAL;
2760 return dev->phy->ioctl(dev,cmd,arg);
2761 }
2762 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2763 board = ia_cmds.status;
2764
2765 if ((board < 0) || (board > iadev_count))
2766 board = 0;
2767 board = array_index_nospec(board, iadev_count + 1);
2768
2769 iadev = ia_dev[board];
2770 switch (ia_cmds.cmd) {
2771 case MEMDUMP:
2772 {
2773 switch (ia_cmds.sub_cmd) {
2774 case MEMDUMP_SEGREG:
2775 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2776 tmps = (u16 __user *)ia_cmds.buf;
2777 for(i=0; i<0x80; i+=2, tmps++)
2778 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2779 ia_cmds.status = 0;
2780 ia_cmds.len = 0x80;
2781 break;
2782 case MEMDUMP_REASSREG:
2783 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2784 tmps = (u16 __user *)ia_cmds.buf;
2785 for(i=0; i<0x80; i+=2, tmps++)
2786 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2787 ia_cmds.status = 0;
2788 ia_cmds.len = 0x80;
2789 break;
2790 case MEMDUMP_FFL:
2791 {
2792 ia_regs_t *regs_local;
2793 ffredn_t *ffL;
2794 rfredn_t *rfL;
2795
2796 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2797 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2798 if (!regs_local) return -ENOMEM;
2799 ffL = ®s_local->ffredn;
2800 rfL = ®s_local->rfredn;
2801 /* Copy real rfred registers into the local copy */
2802 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2803 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2804 /* Copy real ffred registers into the local copy */
2805 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2806 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2807
2808 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2809 kfree(regs_local);
2810 return -EFAULT;
2811 }
2812 kfree(regs_local);
2813 printk("Board %d registers dumped\n", board);
2814 ia_cmds.status = 0;
2815 }
2816 break;
2817 case READ_REG:
2818 {
2819 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2820 desc_dbg(iadev);
2821 ia_cmds.status = 0;
2822 }
2823 break;
2824 case 0x6:
2825 {
2826 ia_cmds.status = 0;
2827 printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2828 printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2829 }
2830 break;
2831 case 0x8:
2832 {
2833 struct k_sonet_stats *stats;
2834 stats = &PRIV(_ia_dev[board])->sonet_stats;
2835 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2836 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2837 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2838 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2839 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2840 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2841 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2842 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2843 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2844 }
2845 ia_cmds.status = 0;
2846 break;
2847 case 0x9:
2848 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2849 for (i = 1; i <= iadev->num_rx_desc; i++)
2850 free_desc(_ia_dev[board], i);
2851 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2852 iadev->reass_reg+REASS_MASK_REG);
2853 iadev->rxing = 1;
2854
2855 ia_cmds.status = 0;
2856 break;
2857
2858 case 0xb:
2859 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2860 ia_frontend_intr(iadev);
2861 break;
2862 case 0xa:
2863 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2864 {
2865 ia_cmds.status = 0;
2866 IADebugFlag = ia_cmds.maddr;
2867 printk("New debug option loaded\n");
2868 }
2869 break;
2870 default:
2871 ia_cmds.status = 0;
2872 break;
2873 }
2874 }
2875 break;
2876 default:
2877 break;
2878
2879 }
2880 return 0;
2881}
2882
2883static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2884 IADEV *iadev;
2885 struct dle *wr_ptr;
2886 struct tx_buf_desc __iomem *buf_desc_ptr;
2887 int desc;
2888 int comp_code;
2889 int total_len;
2890 struct cpcs_trailer *trailer;
2891 struct ia_vcc *iavcc;
2892
2893 iadev = INPH_IA_DEV(vcc->dev);
2894 iavcc = INPH_IA_VCC(vcc);
2895 if (!iavcc->txing) {
2896 printk("discard packet on closed VC\n");
2897 if (vcc->pop)
2898 vcc->pop(vcc, skb);
2899 else
2900 dev_kfree_skb_any(skb);
2901 return 0;
2902 }
2903
2904 if (skb->len > iadev->tx_buf_sz - 8) {
2905 printk("Transmit size over tx buffer size\n");
2906 if (vcc->pop)
2907 vcc->pop(vcc, skb);
2908 else
2909 dev_kfree_skb_any(skb);
2910 return 0;
2911 }
2912 if ((unsigned long)skb->data & 3) {
2913 printk("Misaligned SKB\n");
2914 if (vcc->pop)
2915 vcc->pop(vcc, skb);
2916 else
2917 dev_kfree_skb_any(skb);
2918 return 0;
2919 }
2920 /* Get a descriptor number from our free descriptor queue
2921 We get the descr number from the TCQ now, since I am using
2922 the TCQ as a free buffer queue. Initially TCQ will be
2923 initialized with all the descriptors and is hence, full.
2924 */
2925 desc = get_desc (iadev, iavcc);
2926 if (desc == 0xffff)
2927 return 1;
2928 comp_code = desc >> 13;
2929 desc &= 0x1fff;
2930
2931 if ((desc == 0) || (desc > iadev->num_tx_desc))
2932 {
2933 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2934 atomic_inc(&vcc->stats->tx);
2935 if (vcc->pop)
2936 vcc->pop(vcc, skb);
2937 else
2938 dev_kfree_skb_any(skb);
2939 return 0; /* return SUCCESS */
2940 }
2941
2942 if (comp_code)
2943 {
2944 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2945 desc, comp_code);)
2946 }
2947
2948 /* remember the desc and vcc mapping */
2949 iavcc->vc_desc_cnt++;
2950 iadev->desc_tbl[desc-1].iavcc = iavcc;
2951 iadev->desc_tbl[desc-1].txskb = skb;
2952 IA_SKB_STATE(skb) = 0;
2953
2954 iadev->ffL.tcq_rd += 2;
2955 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2956 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2957 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2958
2959 /* Put the descriptor number in the packet ready queue
2960 and put the updated write pointer in the DLE field
2961 */
2962 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2963
2964 iadev->ffL.prq_wr += 2;
2965 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2966 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2967
2968 /* Figure out the exact length of the packet and padding required to
2969 make it aligned on a 48 byte boundary. */
2970 total_len = skb->len + sizeof(struct cpcs_trailer);
2971 total_len = ((total_len + 47) / 48) * 48;
2972 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2973
2974 /* Put the packet in a tx buffer */
2975 trailer = iadev->tx_buf[desc-1].cpcs;
2976 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2977 skb, skb->data, skb->len, desc);)
2978 trailer->control = 0;
2979 /*big endian*/
2980 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2981 trailer->crc32 = 0; /* not needed - dummy bytes */
2982
2983 /* Display the packet */
2984 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2985 skb->len, tcnter++);
2986 xdump(skb->data, skb->len, "TX: ");
2987 printk("\n");)
2988
2989 /* Build the buffer descriptor */
2990 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2991 buf_desc_ptr += desc; /* points to the corresponding entry */
2992 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2993 /* Huh ? p.115 of users guide describes this as a read-only register */
2994 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2995 buf_desc_ptr->vc_index = vcc->vci;
2996 buf_desc_ptr->bytes = total_len;
2997
2998 if (vcc->qos.txtp.traffic_class == ATM_ABR)
2999 clear_lockup (vcc, iadev);
3000
3001 /* Build the DLE structure */
3002 wr_ptr = iadev->tx_dle_q.write;
3003 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3004 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3005 skb->len, DMA_TO_DEVICE);
3006 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3007 buf_desc_ptr->buf_start_lo;
3008 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3009 wr_ptr->bytes = skb->len;
3010
3011 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3012 if ((wr_ptr->bytes >> 2) == 0xb)
3013 wr_ptr->bytes = 0x30;
3014
3015 wr_ptr->mode = TX_DLE_PSI;
3016 wr_ptr->prq_wr_ptr_data = 0;
3017
3018 /* end is not to be used for the DLE q */
3019 if (++wr_ptr == iadev->tx_dle_q.end)
3020 wr_ptr = iadev->tx_dle_q.start;
3021
3022 /* Build trailer dle */
3023 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3024 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3025 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3026
3027 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3028 wr_ptr->mode = DMA_INT_ENABLE;
3029 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3030
3031 /* end is not to be used for the DLE q */
3032 if (++wr_ptr == iadev->tx_dle_q.end)
3033 wr_ptr = iadev->tx_dle_q.start;
3034
3035 iadev->tx_dle_q.write = wr_ptr;
3036 ATM_DESC(skb) = vcc->vci;
3037 skb_queue_tail(&iadev->tx_dma_q, skb);
3038
3039 atomic_inc(&vcc->stats->tx);
3040 iadev->tx_pkt_cnt++;
3041 /* Increment transaction counter */
3042 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3043
3044#if 0
3045 /* add flow control logic */
3046 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3047 if (iavcc->vc_desc_cnt > 10) {
3048 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3049 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3050 iavcc->flow_inc = -1;
3051 iavcc->saved_tx_quota = vcc->tx_quota;
3052 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3053 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3054 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3055 iavcc->flow_inc = 0;
3056 }
3057 }
3058#endif
3059 IF_TX(printk("ia send done\n");)
3060 return 0;
3061}
3062
3063static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3064{
3065 IADEV *iadev;
3066 unsigned long flags;
3067
3068 iadev = INPH_IA_DEV(vcc->dev);
3069 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3070 {
3071 if (!skb)
3072 printk(KERN_CRIT "null skb in ia_send\n");
3073 else dev_kfree_skb_any(skb);
3074 return -EINVAL;
3075 }
3076 spin_lock_irqsave(&iadev->tx_lock, flags);
3077 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3078 dev_kfree_skb_any(skb);
3079 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3080 return -EINVAL;
3081 }
3082 ATM_SKB(skb)->vcc = vcc;
3083
3084 if (skb_peek(&iadev->tx_backlog)) {
3085 skb_queue_tail(&iadev->tx_backlog, skb);
3086 }
3087 else {
3088 if (ia_pkt_tx (vcc, skb)) {
3089 skb_queue_tail(&iadev->tx_backlog, skb);
3090 }
3091 }
3092 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3093 return 0;
3094
3095}
3096
3097static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3098{
3099 int left = *pos, n;
3100 char *tmpPtr;
3101 IADEV *iadev = INPH_IA_DEV(dev);
3102 if(!left--) {
3103 if (iadev->phy_type == FE_25MBIT_PHY) {
3104 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3105 return n;
3106 }
3107 if (iadev->phy_type == FE_DS3_PHY)
3108 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3109 else if (iadev->phy_type == FE_E3_PHY)
3110 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3111 else if (iadev->phy_type == FE_UTP_OPTION)
3112 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3113 else
3114 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3115 tmpPtr = page + n;
3116 if (iadev->pci_map_size == 0x40000)
3117 n += sprintf(tmpPtr, "-1KVC-");
3118 else
3119 n += sprintf(tmpPtr, "-4KVC-");
3120 tmpPtr = page + n;
3121 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3122 n += sprintf(tmpPtr, "1M \n");
3123 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3124 n += sprintf(tmpPtr, "512K\n");
3125 else
3126 n += sprintf(tmpPtr, "128K\n");
3127 return n;
3128 }
3129 if (!left) {
3130 return sprintf(page, " Number of Tx Buffer: %u\n"
3131 " Size of Tx Buffer : %u\n"
3132 " Number of Rx Buffer: %u\n"
3133 " Size of Rx Buffer : %u\n"
3134 " Packets Received : %u\n"
3135 " Packets Transmitted: %u\n"
3136 " Cells Received : %u\n"
3137 " Cells Transmitted : %u\n"
3138 " Board Dropped Cells: %u\n"
3139 " Board Dropped Pkts : %u\n",
3140 iadev->num_tx_desc, iadev->tx_buf_sz,
3141 iadev->num_rx_desc, iadev->rx_buf_sz,
3142 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3143 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3144 iadev->drop_rxcell, iadev->drop_rxpkt);
3145 }
3146 return 0;
3147}
3148
3149static const struct atmdev_ops ops = {
3150 .open = ia_open,
3151 .close = ia_close,
3152 .ioctl = ia_ioctl,
3153 .send = ia_send,
3154 .phy_put = ia_phy_put,
3155 .phy_get = ia_phy_get,
3156 .change_qos = ia_change_qos,
3157 .proc_read = ia_proc_read,
3158 .owner = THIS_MODULE,
3159};
3160
3161static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3162{
3163 struct atm_dev *dev;
3164 IADEV *iadev;
3165 int ret;
3166
3167 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3168 if (!iadev) {
3169 ret = -ENOMEM;
3170 goto err_out;
3171 }
3172
3173 iadev->pci = pdev;
3174
3175 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3176 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3177 if (pci_enable_device(pdev)) {
3178 ret = -ENODEV;
3179 goto err_out_free_iadev;
3180 }
3181 dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3182 if (!dev) {
3183 ret = -ENOMEM;
3184 goto err_out_disable_dev;
3185 }
3186 dev->dev_data = iadev;
3187 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3188 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3189 iadev->LineRate);)
3190
3191 pci_set_drvdata(pdev, dev);
3192
3193 ia_dev[iadev_count] = iadev;
3194 _ia_dev[iadev_count] = dev;
3195 iadev_count++;
3196 if (ia_init(dev) || ia_start(dev)) {
3197 IF_INIT(printk("IA register failed!\n");)
3198 iadev_count--;
3199 ia_dev[iadev_count] = NULL;
3200 _ia_dev[iadev_count] = NULL;
3201 ret = -EINVAL;
3202 goto err_out_deregister_dev;
3203 }
3204 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3205
3206 iadev->next_board = ia_boards;
3207 ia_boards = dev;
3208
3209 return 0;
3210
3211err_out_deregister_dev:
3212 atm_dev_deregister(dev);
3213err_out_disable_dev:
3214 pci_disable_device(pdev);
3215err_out_free_iadev:
3216 kfree(iadev);
3217err_out:
3218 return ret;
3219}
3220
3221static void ia_remove_one(struct pci_dev *pdev)
3222{
3223 struct atm_dev *dev = pci_get_drvdata(pdev);
3224 IADEV *iadev = INPH_IA_DEV(dev);
3225
3226 /* Disable phy interrupts */
3227 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3228 SUNI_RSOP_CIE);
3229 udelay(1);
3230
3231 if (dev->phy && dev->phy->stop)
3232 dev->phy->stop(dev);
3233
3234 /* De-register device */
3235 free_irq(iadev->irq, dev);
3236 iadev_count--;
3237 ia_dev[iadev_count] = NULL;
3238 _ia_dev[iadev_count] = NULL;
3239 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3240 atm_dev_deregister(dev);
3241
3242 iounmap(iadev->base);
3243 pci_disable_device(pdev);
3244
3245 ia_free_rx(iadev);
3246 ia_free_tx(iadev);
3247
3248 kfree(iadev);
3249}
3250
3251static const struct pci_device_id ia_pci_tbl[] = {
3252 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3253 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3254 { 0,}
3255};
3256MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3257
3258static struct pci_driver ia_driver = {
3259 .name = DEV_LABEL,
3260 .id_table = ia_pci_tbl,
3261 .probe = ia_init_one,
3262 .remove = ia_remove_one,
3263};
3264
3265static int __init ia_module_init(void)
3266{
3267 int ret;
3268
3269 ret = pci_register_driver(&ia_driver);
3270 if (ret >= 0) {
3271 ia_timer.expires = jiffies + 3*HZ;
3272 add_timer(&ia_timer);
3273 } else
3274 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3275 return ret;
3276}
3277
3278static void __exit ia_module_exit(void)
3279{
3280 pci_unregister_driver(&ia_driver);
3281
3282 del_timer(&ia_timer);
3283}
3284
3285module_init(ia_module_init);
3286module_exit(ia_module_exit);
1/******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
6 Version: 1.0
7*******************************************************************************
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
13
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The following are the change log and history:
25
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
31 Add the CBR support.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
37 Add SMP support.
38
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41*******************************************************************************/
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/mm.h>
46#include <linux/pci.h>
47#include <linux/errno.h>
48#include <linux/atm.h>
49#include <linux/atmdev.h>
50#include <linux/ctype.h>
51#include <linux/sonet.h>
52#include <linux/skbuff.h>
53#include <linux/time.h>
54#include <linux/delay.h>
55#include <linux/uio.h>
56#include <linux/init.h>
57#include <linux/interrupt.h>
58#include <linux/wait.h>
59#include <linux/slab.h>
60#include <asm/io.h>
61#include <linux/atomic.h>
62#include <linux/uaccess.h>
63#include <asm/string.h>
64#include <asm/byteorder.h>
65#include <linux/vmalloc.h>
66#include <linux/jiffies.h>
67#include <linux/nospec.h>
68#include "iphase.h"
69#include "suni.h"
70#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
71
72#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
73
74static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
75static void desc_dbg(IADEV *iadev);
76
77static IADEV *ia_dev[8];
78static struct atm_dev *_ia_dev[8];
79static int iadev_count;
80static void ia_led_timer(struct timer_list *unused);
81static DEFINE_TIMER(ia_timer, ia_led_timer);
82static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
83static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
84static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
85 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
86
87module_param(IA_TX_BUF, int, 0);
88module_param(IA_TX_BUF_SZ, int, 0);
89module_param(IA_RX_BUF, int, 0);
90module_param(IA_RX_BUF_SZ, int, 0);
91module_param(IADebugFlag, uint, 0644);
92
93MODULE_LICENSE("GPL");
94
95/**************************** IA_LIB **********************************/
96
97static void ia_init_rtn_q (IARTN_Q *que)
98{
99 que->next = NULL;
100 que->tail = NULL;
101}
102
103static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
104{
105 data->next = NULL;
106 if (que->next == NULL)
107 que->next = que->tail = data;
108 else {
109 data->next = que->next;
110 que->next = data;
111 }
112 return;
113}
114
115static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
116 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
117 if (!entry)
118 return -ENOMEM;
119 entry->data = data;
120 entry->next = NULL;
121 if (que->next == NULL)
122 que->next = que->tail = entry;
123 else {
124 que->tail->next = entry;
125 que->tail = que->tail->next;
126 }
127 return 1;
128}
129
130static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
131 IARTN_Q *tmpdata;
132 if (que->next == NULL)
133 return NULL;
134 tmpdata = que->next;
135 if ( que->next == que->tail)
136 que->next = que->tail = NULL;
137 else
138 que->next = que->next->next;
139 return tmpdata;
140}
141
142static void ia_hack_tcq(IADEV *dev) {
143
144 u_short desc1;
145 u_short tcq_wr;
146 struct ia_vcc *iavcc_r = NULL;
147
148 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
149 while (dev->host_tcq_wr != tcq_wr) {
150 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
151 if (!desc1) ;
152 else if (!dev->desc_tbl[desc1 -1].timestamp) {
153 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
154 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
155 }
156 else if (dev->desc_tbl[desc1 -1].timestamp) {
157 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
158 printk("IA: Fatal err in get_desc\n");
159 continue;
160 }
161 iavcc_r->vc_desc_cnt--;
162 dev->desc_tbl[desc1 -1].timestamp = 0;
163 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
164 dev->desc_tbl[desc1 -1].txskb, desc1);)
165 if (iavcc_r->pcr < dev->rate_limit) {
166 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
167 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
168 printk("ia_hack_tcq: No memory available\n");
169 }
170 dev->desc_tbl[desc1 -1].iavcc = NULL;
171 dev->desc_tbl[desc1 -1].txskb = NULL;
172 }
173 dev->host_tcq_wr += 2;
174 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
175 dev->host_tcq_wr = dev->ffL.tcq_st;
176 }
177} /* ia_hack_tcq */
178
179static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
180 u_short desc_num, i;
181 struct sk_buff *skb;
182 struct ia_vcc *iavcc_r = NULL;
183 unsigned long delta;
184 static unsigned long timer = 0;
185 int ltimeout;
186
187 ia_hack_tcq (dev);
188 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
189 timer = jiffies;
190 i=0;
191 while (i < dev->num_tx_desc) {
192 if (!dev->desc_tbl[i].timestamp) {
193 i++;
194 continue;
195 }
196 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
197 delta = jiffies - dev->desc_tbl[i].timestamp;
198 if (delta >= ltimeout) {
199 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
200 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
201 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
202 else
203 dev->ffL.tcq_rd -= 2;
204 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
205 if (!(skb = dev->desc_tbl[i].txskb) ||
206 !(iavcc_r = dev->desc_tbl[i].iavcc))
207 printk("Fatal err, desc table vcc or skb is NULL\n");
208 else
209 iavcc_r->vc_desc_cnt--;
210 dev->desc_tbl[i].timestamp = 0;
211 dev->desc_tbl[i].iavcc = NULL;
212 dev->desc_tbl[i].txskb = NULL;
213 }
214 i++;
215 } /* while */
216 }
217 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
218 return 0xFFFF;
219
220 /* Get the next available descriptor number from TCQ */
221 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
222
223 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
224 dev->ffL.tcq_rd += 2;
225 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
226 dev->ffL.tcq_rd = dev->ffL.tcq_st;
227 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
228 return 0xFFFF;
229 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
230 }
231
232 /* get system time */
233 dev->desc_tbl[desc_num -1].timestamp = jiffies;
234 return desc_num;
235}
236
237static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
238 u_char foundLockUp;
239 vcstatus_t *vcstatus;
240 u_short *shd_tbl;
241 u_short tempCellSlot, tempFract;
242 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
243 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
244 u_int i;
245
246 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
247 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
248 vcstatus->cnt++;
249 foundLockUp = 0;
250 if( vcstatus->cnt == 0x05 ) {
251 abr_vc += vcc->vci;
252 eabr_vc += vcc->vci;
253 if( eabr_vc->last_desc ) {
254 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
255 /* Wait for 10 Micro sec */
256 udelay(10);
257 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
258 foundLockUp = 1;
259 }
260 else {
261 tempCellSlot = abr_vc->last_cell_slot;
262 tempFract = abr_vc->fraction;
263 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
264 && (tempFract == dev->testTable[vcc->vci]->fract))
265 foundLockUp = 1;
266 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
267 dev->testTable[vcc->vci]->fract = tempFract;
268 }
269 } /* last descriptor */
270 vcstatus->cnt = 0;
271 } /* vcstatus->cnt */
272
273 if (foundLockUp) {
274 IF_ABR(printk("LOCK UP found\n");)
275 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
276 /* Wait for 10 Micro sec */
277 udelay(10);
278 abr_vc->status &= 0xFFF8;
279 abr_vc->status |= 0x0001; /* state is idle */
280 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
281 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
282 if (i < dev->num_vc)
283 shd_tbl[i] = vcc->vci;
284 else
285 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
286 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
287 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
288 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
289 vcstatus->cnt = 0;
290 } /* foundLockUp */
291
292 } /* if an ABR VC */
293
294
295}
296
297/*
298** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
299**
300** +----+----+------------------+-------------------------------+
301** | R | NZ | 5-bit exponent | 9-bit mantissa |
302** +----+----+------------------+-------------------------------+
303**
304** R = reserved (written as 0)
305** NZ = 0 if 0 cells/sec; 1 otherwise
306**
307** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
308*/
309static u16
310cellrate_to_float(u32 cr)
311{
312
313#define NZ 0x4000
314#define M_BITS 9 /* Number of bits in mantissa */
315#define E_BITS 5 /* Number of bits in exponent */
316#define M_MASK 0x1ff
317#define E_MASK 0x1f
318 u16 flot;
319 u32 tmp = cr & 0x00ffffff;
320 int i = 0;
321 if (cr == 0)
322 return 0;
323 while (tmp != 1) {
324 tmp >>= 1;
325 i++;
326 }
327 if (i == M_BITS)
328 flot = NZ | (i << M_BITS) | (cr & M_MASK);
329 else if (i < M_BITS)
330 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
331 else
332 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
333 return flot;
334}
335
336#if 0
337/*
338** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
339*/
340static u32
341float_to_cellrate(u16 rate)
342{
343 u32 exp, mantissa, cps;
344 if ((rate & NZ) == 0)
345 return 0;
346 exp = (rate >> M_BITS) & E_MASK;
347 mantissa = rate & M_MASK;
348 if (exp == 0)
349 return 1;
350 cps = (1 << M_BITS) | mantissa;
351 if (exp == M_BITS)
352 cps = cps;
353 else if (exp > M_BITS)
354 cps <<= (exp - M_BITS);
355 else
356 cps >>= (M_BITS - exp);
357 return cps;
358}
359#endif
360
361static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
362 srv_p->class_type = ATM_ABR;
363 srv_p->pcr = dev->LineRate;
364 srv_p->mcr = 0;
365 srv_p->icr = 0x055cb7;
366 srv_p->tbe = 0xffffff;
367 srv_p->frtt = 0x3a;
368 srv_p->rif = 0xf;
369 srv_p->rdf = 0xb;
370 srv_p->nrm = 0x4;
371 srv_p->trm = 0x7;
372 srv_p->cdf = 0x3;
373 srv_p->adtf = 50;
374}
375
376static int
377ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
378 struct atm_vcc *vcc, u8 flag)
379{
380 f_vc_abr_entry *f_abr_vc;
381 r_vc_abr_entry *r_abr_vc;
382 u32 icr;
383 u8 trm, nrm, crm;
384 u16 adtf, air, *ptr16;
385 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
386 f_abr_vc += vcc->vci;
387 switch (flag) {
388 case 1: /* FFRED initialization */
389#if 0 /* sanity check */
390 if (srv_p->pcr == 0)
391 return INVALID_PCR;
392 if (srv_p->pcr > dev->LineRate)
393 srv_p->pcr = dev->LineRate;
394 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
395 return MCR_UNAVAILABLE;
396 if (srv_p->mcr > srv_p->pcr)
397 return INVALID_MCR;
398 if (!(srv_p->icr))
399 srv_p->icr = srv_p->pcr;
400 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
401 return INVALID_ICR;
402 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
403 return INVALID_TBE;
404 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
405 return INVALID_FRTT;
406 if (srv_p->nrm > MAX_NRM)
407 return INVALID_NRM;
408 if (srv_p->trm > MAX_TRM)
409 return INVALID_TRM;
410 if (srv_p->adtf > MAX_ADTF)
411 return INVALID_ADTF;
412 else if (srv_p->adtf == 0)
413 srv_p->adtf = 1;
414 if (srv_p->cdf > MAX_CDF)
415 return INVALID_CDF;
416 if (srv_p->rif > MAX_RIF)
417 return INVALID_RIF;
418 if (srv_p->rdf > MAX_RDF)
419 return INVALID_RDF;
420#endif
421 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
422 f_abr_vc->f_vc_type = ABR;
423 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
424 /* i.e 2**n = 2 << (n-1) */
425 f_abr_vc->f_nrm = nrm << 8 | nrm;
426 trm = 100000/(2 << (16 - srv_p->trm));
427 if ( trm == 0) trm = 1;
428 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
429 crm = srv_p->tbe / nrm;
430 if (crm == 0) crm = 1;
431 f_abr_vc->f_crm = crm & 0xff;
432 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
433 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
434 ((srv_p->tbe/srv_p->frtt)*1000000) :
435 (1000000/(srv_p->frtt/srv_p->tbe)));
436 f_abr_vc->f_icr = cellrate_to_float(icr);
437 adtf = (10000 * srv_p->adtf)/8192;
438 if (adtf == 0) adtf = 1;
439 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
440 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
441 f_abr_vc->f_acr = f_abr_vc->f_icr;
442 f_abr_vc->f_status = 0x0042;
443 break;
444 case 0: /* RFRED initialization */
445 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
446 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
447 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
448 r_abr_vc += vcc->vci;
449 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
450 air = srv_p->pcr << (15 - srv_p->rif);
451 if (air == 0) air = 1;
452 r_abr_vc->r_air = cellrate_to_float(air);
453 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
454 dev->sum_mcr += srv_p->mcr;
455 dev->n_abr++;
456 break;
457 default:
458 break;
459 }
460 return 0;
461}
462static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
463 u32 rateLow=0, rateHigh, rate;
464 int entries;
465 struct ia_vcc *ia_vcc;
466
467 int idealSlot =0, testSlot, toBeAssigned, inc;
468 u32 spacing;
469 u16 *SchedTbl, *TstSchedTbl;
470 u16 cbrVC, vcIndex;
471 u32 fracSlot = 0;
472 u32 sp_mod = 0;
473 u32 sp_mod2 = 0;
474
475 /* IpAdjustTrafficParams */
476 if (vcc->qos.txtp.max_pcr <= 0) {
477 IF_ERR(printk("PCR for CBR not defined\n");)
478 return -1;
479 }
480 rate = vcc->qos.txtp.max_pcr;
481 entries = rate / dev->Granularity;
482 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
483 entries, rate, dev->Granularity);)
484 if (entries < 1)
485 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
486 rateLow = entries * dev->Granularity;
487 rateHigh = (entries + 1) * dev->Granularity;
488 if (3*(rate - rateLow) > (rateHigh - rate))
489 entries++;
490 if (entries > dev->CbrRemEntries) {
491 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
492 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
493 entries, dev->CbrRemEntries);)
494 return -EBUSY;
495 }
496
497 ia_vcc = INPH_IA_VCC(vcc);
498 ia_vcc->NumCbrEntry = entries;
499 dev->sum_mcr += entries * dev->Granularity;
500 /* IaFFrednInsertCbrSched */
501 // Starting at an arbitrary location, place the entries into the table
502 // as smoothly as possible
503 cbrVC = 0;
504 spacing = dev->CbrTotEntries / entries;
505 sp_mod = dev->CbrTotEntries % entries; // get modulo
506 toBeAssigned = entries;
507 fracSlot = 0;
508 vcIndex = vcc->vci;
509 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
510 while (toBeAssigned)
511 {
512 // If this is the first time, start the table loading for this connection
513 // as close to entryPoint as possible.
514 if (toBeAssigned == entries)
515 {
516 idealSlot = dev->CbrEntryPt;
517 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
518 if (dev->CbrEntryPt >= dev->CbrTotEntries)
519 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
520 } else {
521 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
522 // in the table that would be smoothest
523 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
524 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
525 }
526 if (idealSlot >= (int)dev->CbrTotEntries)
527 idealSlot -= dev->CbrTotEntries;
528 // Continuously check around this ideal value until a null
529 // location is encountered.
530 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
531 inc = 0;
532 testSlot = idealSlot;
533 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
534 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
535 testSlot, TstSchedTbl,toBeAssigned);)
536 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
537 while (cbrVC) // If another VC at this location, we have to keep looking
538 {
539 inc++;
540 testSlot = idealSlot - inc;
541 if (testSlot < 0) { // Wrap if necessary
542 testSlot += dev->CbrTotEntries;
543 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
544 SchedTbl,testSlot);)
545 }
546 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
547 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
548 if (!cbrVC)
549 break;
550 testSlot = idealSlot + inc;
551 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
552 testSlot -= dev->CbrTotEntries;
553 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
554 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
555 testSlot, toBeAssigned);)
556 }
557 // set table index and read in value
558 TstSchedTbl = (u16*)(SchedTbl + testSlot);
559 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
560 TstSchedTbl,cbrVC,inc);)
561 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
562 } /* while */
563 // Move this VCI number into this location of the CBR Sched table.
564 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
565 dev->CbrRemEntries--;
566 toBeAssigned--;
567 } /* while */
568
569 /* IaFFrednCbrEnable */
570 dev->NumEnabledCBR++;
571 if (dev->NumEnabledCBR == 1) {
572 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
573 IF_CBR(printk("CBR is enabled\n");)
574 }
575 return 0;
576}
577static void ia_cbrVc_close (struct atm_vcc *vcc) {
578 IADEV *iadev;
579 u16 *SchedTbl, NullVci = 0;
580 u32 i, NumFound;
581
582 iadev = INPH_IA_DEV(vcc->dev);
583 iadev->NumEnabledCBR--;
584 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
585 if (iadev->NumEnabledCBR == 0) {
586 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
587 IF_CBR (printk("CBR support disabled\n");)
588 }
589 NumFound = 0;
590 for (i=0; i < iadev->CbrTotEntries; i++)
591 {
592 if (*SchedTbl == vcc->vci) {
593 iadev->CbrRemEntries++;
594 *SchedTbl = NullVci;
595 IF_CBR(NumFound++;)
596 }
597 SchedTbl++;
598 }
599 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
600}
601
602static int ia_avail_descs(IADEV *iadev) {
603 int tmp = 0;
604 ia_hack_tcq(iadev);
605 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
606 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
607 else
608 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
609 iadev->ffL.tcq_st) / 2;
610 return tmp;
611}
612
613static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
614
615static int ia_que_tx (IADEV *iadev) {
616 struct sk_buff *skb;
617 int num_desc;
618 struct atm_vcc *vcc;
619 num_desc = ia_avail_descs(iadev);
620
621 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
622 if (!(vcc = ATM_SKB(skb)->vcc)) {
623 dev_kfree_skb_any(skb);
624 printk("ia_que_tx: Null vcc\n");
625 break;
626 }
627 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
628 dev_kfree_skb_any(skb);
629 printk("Free the SKB on closed vci %d \n", vcc->vci);
630 break;
631 }
632 if (ia_pkt_tx (vcc, skb)) {
633 skb_queue_head(&iadev->tx_backlog, skb);
634 }
635 num_desc--;
636 }
637 return 0;
638}
639
640static void ia_tx_poll (IADEV *iadev) {
641 struct atm_vcc *vcc = NULL;
642 struct sk_buff *skb = NULL, *skb1 = NULL;
643 struct ia_vcc *iavcc;
644 IARTN_Q * rtne;
645
646 ia_hack_tcq(iadev);
647 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
648 skb = rtne->data.txskb;
649 if (!skb) {
650 printk("ia_tx_poll: skb is null\n");
651 goto out;
652 }
653 vcc = ATM_SKB(skb)->vcc;
654 if (!vcc) {
655 printk("ia_tx_poll: vcc is null\n");
656 dev_kfree_skb_any(skb);
657 goto out;
658 }
659
660 iavcc = INPH_IA_VCC(vcc);
661 if (!iavcc) {
662 printk("ia_tx_poll: iavcc is null\n");
663 dev_kfree_skb_any(skb);
664 goto out;
665 }
666
667 skb1 = skb_dequeue(&iavcc->txing_skb);
668 while (skb1 && (skb1 != skb)) {
669 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
670 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
671 }
672 IF_ERR(printk("Release the SKB not match\n");)
673 if ((vcc->pop) && (skb1->len != 0))
674 {
675 vcc->pop(vcc, skb1);
676 IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
677 (long)skb1);)
678 }
679 else
680 dev_kfree_skb_any(skb1);
681 skb1 = skb_dequeue(&iavcc->txing_skb);
682 }
683 if (!skb1) {
684 IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);)
685 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
686 break;
687 }
688 if ((vcc->pop) && (skb->len != 0))
689 {
690 vcc->pop(vcc, skb);
691 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
692 }
693 else
694 dev_kfree_skb_any(skb);
695 kfree(rtne);
696 }
697 ia_que_tx(iadev);
698out:
699 return;
700}
701#if 0
702static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
703{
704 u32 t;
705 int i;
706 /*
707 * Issue a command to enable writes to the NOVRAM
708 */
709 NVRAM_CMD (EXTEND + EWEN);
710 NVRAM_CLR_CE;
711 /*
712 * issue the write command
713 */
714 NVRAM_CMD(IAWRITE + addr);
715 /*
716 * Send the data, starting with D15, then D14, and so on for 16 bits
717 */
718 for (i=15; i>=0; i--) {
719 NVRAM_CLKOUT (val & 0x8000);
720 val <<= 1;
721 }
722 NVRAM_CLR_CE;
723 CFG_OR(NVCE);
724 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
725 while (!(t & NVDO))
726 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
727
728 NVRAM_CLR_CE;
729 /*
730 * disable writes again
731 */
732 NVRAM_CMD(EXTEND + EWDS)
733 NVRAM_CLR_CE;
734 CFG_AND(~NVDI);
735}
736#endif
737
738static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
739{
740 u_short val;
741 u32 t;
742 int i;
743 /*
744 * Read the first bit that was clocked with the falling edge of the
745 * the last command data clock
746 */
747 NVRAM_CMD(IAREAD + addr);
748 /*
749 * Now read the rest of the bits, the next bit read is D14, then D13,
750 * and so on.
751 */
752 val = 0;
753 for (i=15; i>=0; i--) {
754 NVRAM_CLKIN(t);
755 val |= (t << i);
756 }
757 NVRAM_CLR_CE;
758 CFG_AND(~NVDI);
759 return val;
760}
761
762static void ia_hw_type(IADEV *iadev) {
763 u_short memType = ia_eeprom_get(iadev, 25);
764 iadev->memType = memType;
765 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
766 iadev->num_tx_desc = IA_TX_BUF;
767 iadev->tx_buf_sz = IA_TX_BUF_SZ;
768 iadev->num_rx_desc = IA_RX_BUF;
769 iadev->rx_buf_sz = IA_RX_BUF_SZ;
770 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
771 if (IA_TX_BUF == DFL_TX_BUFFERS)
772 iadev->num_tx_desc = IA_TX_BUF / 2;
773 else
774 iadev->num_tx_desc = IA_TX_BUF;
775 iadev->tx_buf_sz = IA_TX_BUF_SZ;
776 if (IA_RX_BUF == DFL_RX_BUFFERS)
777 iadev->num_rx_desc = IA_RX_BUF / 2;
778 else
779 iadev->num_rx_desc = IA_RX_BUF;
780 iadev->rx_buf_sz = IA_RX_BUF_SZ;
781 }
782 else {
783 if (IA_TX_BUF == DFL_TX_BUFFERS)
784 iadev->num_tx_desc = IA_TX_BUF / 8;
785 else
786 iadev->num_tx_desc = IA_TX_BUF;
787 iadev->tx_buf_sz = IA_TX_BUF_SZ;
788 if (IA_RX_BUF == DFL_RX_BUFFERS)
789 iadev->num_rx_desc = IA_RX_BUF / 8;
790 else
791 iadev->num_rx_desc = IA_RX_BUF;
792 iadev->rx_buf_sz = IA_RX_BUF_SZ;
793 }
794 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
795 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
796 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
797 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
798
799#if 0
800 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
801 iadev->phy_type = PHY_OC3C_S;
802 else if ((memType & FE_MASK) == FE_UTP_OPTION)
803 iadev->phy_type = PHY_UTP155;
804 else
805 iadev->phy_type = PHY_OC3C_M;
806#endif
807
808 iadev->phy_type = memType & FE_MASK;
809 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
810 memType,iadev->phy_type);)
811 if (iadev->phy_type == FE_25MBIT_PHY)
812 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
813 else if (iadev->phy_type == FE_DS3_PHY)
814 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
815 else if (iadev->phy_type == FE_E3_PHY)
816 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
817 else
818 iadev->LineRate = (u32)(ATM_OC3_PCR);
819 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
820
821}
822
823static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
824{
825 return readl(ia->phy + (reg >> 2));
826}
827
828static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
829{
830 writel(val, ia->phy + (reg >> 2));
831}
832
833static void ia_frontend_intr(struct iadev_priv *iadev)
834{
835 u32 status;
836
837 if (iadev->phy_type & FE_25MBIT_PHY) {
838 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
839 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
840 } else if (iadev->phy_type & FE_DS3_PHY) {
841 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
842 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
843 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
844 } else if (iadev->phy_type & FE_E3_PHY) {
845 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
846 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
847 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
848 } else {
849 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
850 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
851 }
852
853 printk(KERN_INFO "IA: SUNI carrier %s\n",
854 iadev->carrier_detect ? "detected" : "lost signal");
855}
856
857static void ia_mb25_init(struct iadev_priv *iadev)
858{
859#if 0
860 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
861#endif
862 ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
863 ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
864
865 iadev->carrier_detect =
866 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
867}
868
869struct ia_reg {
870 u16 reg;
871 u16 val;
872};
873
874static void ia_phy_write(struct iadev_priv *iadev,
875 const struct ia_reg *regs, int len)
876{
877 while (len--) {
878 ia_phy_write32(iadev, regs->reg, regs->val);
879 regs++;
880 }
881}
882
883static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
884{
885 static const struct ia_reg suni_ds3_init[] = {
886 { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
887 { SUNI_DS3_FRM_CFG, 0x01 },
888 { SUNI_DS3_TRAN_CFG, 0x01 },
889 { SUNI_CONFIG, 0 },
890 { SUNI_SPLR_CFG, 0 },
891 { SUNI_SPLT_CFG, 0 }
892 };
893 u32 status;
894
895 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
896 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
897
898 ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
899}
900
901static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
902{
903 static const struct ia_reg suni_e3_init[] = {
904 { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
905 { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
906 { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
907 { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
908 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
909 { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
910 { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
911 { SUNI_SPLR_CFG, 0x41 },
912 { SUNI_SPLT_CFG, 0x41 }
913 };
914 u32 status;
915
916 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
917 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
918 ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
919}
920
921static void ia_suni_pm7345_init(struct iadev_priv *iadev)
922{
923 static const struct ia_reg suni_init[] = {
924 /* Enable RSOP loss of signal interrupt. */
925 { SUNI_INTR_ENBL, 0x28 },
926 /* Clear error counters. */
927 { SUNI_ID_RESET, 0 },
928 /* Clear "PMCTST" in master test register. */
929 { SUNI_MASTER_TEST, 0 },
930
931 { SUNI_RXCP_CTRL, 0x2c },
932 { SUNI_RXCP_FCTRL, 0x81 },
933
934 { SUNI_RXCP_IDLE_PAT_H1, 0 },
935 { SUNI_RXCP_IDLE_PAT_H2, 0 },
936 { SUNI_RXCP_IDLE_PAT_H3, 0 },
937 { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
938
939 { SUNI_RXCP_IDLE_MASK_H1, 0xff },
940 { SUNI_RXCP_IDLE_MASK_H2, 0xff },
941 { SUNI_RXCP_IDLE_MASK_H3, 0xff },
942 { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
943
944 { SUNI_RXCP_CELL_PAT_H1, 0 },
945 { SUNI_RXCP_CELL_PAT_H2, 0 },
946 { SUNI_RXCP_CELL_PAT_H3, 0 },
947 { SUNI_RXCP_CELL_PAT_H4, 0x01 },
948
949 { SUNI_RXCP_CELL_MASK_H1, 0xff },
950 { SUNI_RXCP_CELL_MASK_H2, 0xff },
951 { SUNI_RXCP_CELL_MASK_H3, 0xff },
952 { SUNI_RXCP_CELL_MASK_H4, 0xff },
953
954 { SUNI_TXCP_CTRL, 0xa4 },
955 { SUNI_TXCP_INTR_EN_STS, 0x10 },
956 { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
957 };
958
959 if (iadev->phy_type & FE_DS3_PHY)
960 ia_suni_pm7345_init_ds3(iadev);
961 else
962 ia_suni_pm7345_init_e3(iadev);
963
964 ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
965
966 ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
967 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
968 SUNI_PM7345_DLB | SUNI_PM7345_PLB));
969#ifdef __SNMP__
970 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
971#endif /* __SNMP__ */
972 return;
973}
974
975
976/***************************** IA_LIB END *****************************/
977
978#ifdef CONFIG_ATM_IA_DEBUG
979static int tcnter = 0;
980static void xdump( u_char* cp, int length, char* prefix )
981{
982 int col, count;
983 u_char prntBuf[120];
984 u_char* pBuf = prntBuf;
985 count = 0;
986 while(count < length){
987 pBuf += sprintf( pBuf, "%s", prefix );
988 for(col = 0;count + col < length && col < 16; col++){
989 if (col != 0 && (col % 4) == 0)
990 pBuf += sprintf( pBuf, " " );
991 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
992 }
993 while(col++ < 16){ /* pad end of buffer with blanks */
994 if ((col % 4) == 0)
995 sprintf( pBuf, " " );
996 pBuf += sprintf( pBuf, " " );
997 }
998 pBuf += sprintf( pBuf, " " );
999 for(col = 0;count + col < length && col < 16; col++){
1000 u_char c = cp[count + col];
1001
1002 if (isascii(c) && isprint(c))
1003 pBuf += sprintf(pBuf, "%c", c);
1004 else
1005 pBuf += sprintf(pBuf, ".");
1006 }
1007 printk("%s\n", prntBuf);
1008 count += col;
1009 pBuf = prntBuf;
1010 }
1011
1012} /* close xdump(... */
1013#endif /* CONFIG_ATM_IA_DEBUG */
1014
1015
1016static struct atm_dev *ia_boards = NULL;
1017
1018#define ACTUAL_RAM_BASE \
1019 RAM_BASE*((iadev->mem)/(128 * 1024))
1020#define ACTUAL_SEG_RAM_BASE \
1021 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1022#define ACTUAL_REASS_RAM_BASE \
1023 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1024
1025
1026/*-- some utilities and memory allocation stuff will come here -------------*/
1027
1028static void desc_dbg(IADEV *iadev) {
1029
1030 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1031 u32 i;
1032 void __iomem *tmp;
1033 // regval = readl((u32)ia_cmds->maddr);
1034 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1035 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1036 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1037 readw(iadev->seg_ram+tcq_wr_ptr-2));
1038 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1039 iadev->ffL.tcq_rd);
1040 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1041 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1042 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1043 i = 0;
1044 while (tcq_st_ptr != tcq_ed_ptr) {
1045 tmp = iadev->seg_ram+tcq_st_ptr;
1046 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1047 tcq_st_ptr += 2;
1048 }
1049 for(i=0; i <iadev->num_tx_desc; i++)
1050 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1051}
1052
1053
1054/*----------------------------- Receiving side stuff --------------------------*/
1055
1056static void rx_excp_rcvd(struct atm_dev *dev)
1057{
1058#if 0 /* closing the receiving size will cause too many excp int */
1059 IADEV *iadev;
1060 u_short state;
1061 u_short excpq_rd_ptr;
1062 //u_short *ptr;
1063 int vci, error = 1;
1064 iadev = INPH_IA_DEV(dev);
1065 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1066 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1067 { printk("state = %x \n", state);
1068 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1069 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1070 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1071 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1072 // TODO: update exception stat
1073 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1074 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1075 // pwang_test
1076 excpq_rd_ptr += 4;
1077 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1078 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1079 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1080 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1081 }
1082#endif
1083}
1084
1085static void free_desc(struct atm_dev *dev, int desc)
1086{
1087 IADEV *iadev;
1088 iadev = INPH_IA_DEV(dev);
1089 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1090 iadev->rfL.fdq_wr +=2;
1091 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1092 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1093 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1094}
1095
1096
1097static int rx_pkt(struct atm_dev *dev)
1098{
1099 IADEV *iadev;
1100 struct atm_vcc *vcc;
1101 unsigned short status;
1102 struct rx_buf_desc __iomem *buf_desc_ptr;
1103 int desc;
1104 struct dle* wr_ptr;
1105 int len;
1106 struct sk_buff *skb;
1107 u_int buf_addr, dma_addr;
1108
1109 iadev = INPH_IA_DEV(dev);
1110 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1111 {
1112 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1113 return -EINVAL;
1114 }
1115 /* mask 1st 3 bits to get the actual descno. */
1116 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1117 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1118 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1119 printk(" pcq_wr_ptr = 0x%x\n",
1120 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1121 /* update the read pointer - maybe we shud do this in the end*/
1122 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1123 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1124 else
1125 iadev->rfL.pcq_rd += 2;
1126 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1127
1128 /* get the buffer desc entry.
1129 update stuff. - doesn't seem to be any update necessary
1130 */
1131 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1132 /* make the ptr point to the corresponding buffer desc entry */
1133 buf_desc_ptr += desc;
1134 if (!desc || (desc > iadev->num_rx_desc) ||
1135 ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1136 free_desc(dev, desc);
1137 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1138 return -1;
1139 }
1140 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1141 if (!vcc)
1142 {
1143 free_desc(dev, desc);
1144 printk("IA: null vcc, drop PDU\n");
1145 return -1;
1146 }
1147
1148
1149 /* might want to check the status bits for errors */
1150 status = (u_short) (buf_desc_ptr->desc_mode);
1151 if (status & (RX_CER | RX_PTE | RX_OFL))
1152 {
1153 atomic_inc(&vcc->stats->rx_err);
1154 IF_ERR(printk("IA: bad packet, dropping it");)
1155 if (status & RX_CER) {
1156 IF_ERR(printk(" cause: packet CRC error\n");)
1157 }
1158 else if (status & RX_PTE) {
1159 IF_ERR(printk(" cause: packet time out\n");)
1160 }
1161 else {
1162 IF_ERR(printk(" cause: buffer overflow\n");)
1163 }
1164 goto out_free_desc;
1165 }
1166
1167 /*
1168 build DLE.
1169 */
1170
1171 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1172 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1173 len = dma_addr - buf_addr;
1174 if (len > iadev->rx_buf_sz) {
1175 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1176 atomic_inc(&vcc->stats->rx_err);
1177 goto out_free_desc;
1178 }
1179
1180 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1181 if (vcc->vci < 32)
1182 printk("Drop control packets\n");
1183 goto out_free_desc;
1184 }
1185 skb_put(skb,len);
1186 // pwang_test
1187 ATM_SKB(skb)->vcc = vcc;
1188 ATM_DESC(skb) = desc;
1189 skb_queue_tail(&iadev->rx_dma_q, skb);
1190
1191 /* Build the DLE structure */
1192 wr_ptr = iadev->rx_dle_q.write;
1193 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1194 len, DMA_FROM_DEVICE);
1195 wr_ptr->local_pkt_addr = buf_addr;
1196 wr_ptr->bytes = len; /* We don't know this do we ?? */
1197 wr_ptr->mode = DMA_INT_ENABLE;
1198
1199 /* shud take care of wrap around here too. */
1200 if(++wr_ptr == iadev->rx_dle_q.end)
1201 wr_ptr = iadev->rx_dle_q.start;
1202 iadev->rx_dle_q.write = wr_ptr;
1203 udelay(1);
1204 /* Increment transaction counter */
1205 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1206out: return 0;
1207out_free_desc:
1208 free_desc(dev, desc);
1209 goto out;
1210}
1211
1212static void rx_intr(struct atm_dev *dev)
1213{
1214 IADEV *iadev;
1215 u_short status;
1216 u_short state, i;
1217
1218 iadev = INPH_IA_DEV(dev);
1219 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1220 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1221 if (status & RX_PKT_RCVD)
1222 {
1223 /* do something */
1224 /* Basically recvd an interrupt for receiving a packet.
1225 A descriptor would have been written to the packet complete
1226 queue. Get all the descriptors and set up dma to move the
1227 packets till the packet complete queue is empty..
1228 */
1229 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1230 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1231 while(!(state & PCQ_EMPTY))
1232 {
1233 rx_pkt(dev);
1234 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1235 }
1236 iadev->rxing = 1;
1237 }
1238 if (status & RX_FREEQ_EMPT)
1239 {
1240 if (iadev->rxing) {
1241 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1242 iadev->rx_tmp_jif = jiffies;
1243 iadev->rxing = 0;
1244 }
1245 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1246 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1247 for (i = 1; i <= iadev->num_rx_desc; i++)
1248 free_desc(dev, i);
1249printk("Test logic RUN!!!!\n");
1250 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1251 iadev->rxing = 1;
1252 }
1253 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1254 }
1255
1256 if (status & RX_EXCP_RCVD)
1257 {
1258 /* probably need to handle the exception queue also. */
1259 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1260 rx_excp_rcvd(dev);
1261 }
1262
1263
1264 if (status & RX_RAW_RCVD)
1265 {
1266 /* need to handle the raw incoming cells. This deepnds on
1267 whether we have programmed to receive the raw cells or not.
1268 Else ignore. */
1269 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1270 }
1271}
1272
1273
1274static void rx_dle_intr(struct atm_dev *dev)
1275{
1276 IADEV *iadev;
1277 struct atm_vcc *vcc;
1278 struct sk_buff *skb;
1279 int desc;
1280 u_short state;
1281 struct dle *dle, *cur_dle;
1282 u_int dle_lp;
1283 int len;
1284 iadev = INPH_IA_DEV(dev);
1285
1286 /* free all the dles done, that is just update our own dle read pointer
1287 - do we really need to do this. Think not. */
1288 /* DMA is done, just get all the recevie buffers from the rx dma queue
1289 and push them up to the higher layer protocol. Also free the desc
1290 associated with the buffer. */
1291 dle = iadev->rx_dle_q.read;
1292 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1293 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1294 while(dle != cur_dle)
1295 {
1296 /* free the DMAed skb */
1297 skb = skb_dequeue(&iadev->rx_dma_q);
1298 if (!skb)
1299 goto INCR_DLE;
1300 desc = ATM_DESC(skb);
1301 free_desc(dev, desc);
1302
1303 if (!(len = skb->len))
1304 {
1305 printk("rx_dle_intr: skb len 0\n");
1306 dev_kfree_skb_any(skb);
1307 }
1308 else
1309 {
1310 struct cpcs_trailer *trailer;
1311 u_short length;
1312 struct ia_vcc *ia_vcc;
1313
1314 dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1315 len, DMA_FROM_DEVICE);
1316 /* no VCC related housekeeping done as yet. lets see */
1317 vcc = ATM_SKB(skb)->vcc;
1318 if (!vcc) {
1319 printk("IA: null vcc\n");
1320 dev_kfree_skb_any(skb);
1321 goto INCR_DLE;
1322 }
1323 ia_vcc = INPH_IA_VCC(vcc);
1324 if (ia_vcc == NULL)
1325 {
1326 atomic_inc(&vcc->stats->rx_err);
1327 atm_return(vcc, skb->truesize);
1328 dev_kfree_skb_any(skb);
1329 goto INCR_DLE;
1330 }
1331 // get real pkt length pwang_test
1332 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1333 skb->len - sizeof(*trailer));
1334 length = swap_byte_order(trailer->length);
1335 if ((length > iadev->rx_buf_sz) || (length >
1336 (skb->len - sizeof(struct cpcs_trailer))))
1337 {
1338 atomic_inc(&vcc->stats->rx_err);
1339 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1340 length, skb->len);)
1341 atm_return(vcc, skb->truesize);
1342 dev_kfree_skb_any(skb);
1343 goto INCR_DLE;
1344 }
1345 skb_trim(skb, length);
1346
1347 /* Display the packet */
1348 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1349 xdump(skb->data, skb->len, "RX: ");
1350 printk("\n");)
1351
1352 IF_RX(printk("rx_dle_intr: skb push");)
1353 vcc->push(vcc,skb);
1354 atomic_inc(&vcc->stats->rx);
1355 iadev->rx_pkt_cnt++;
1356 }
1357INCR_DLE:
1358 if (++dle == iadev->rx_dle_q.end)
1359 dle = iadev->rx_dle_q.start;
1360 }
1361 iadev->rx_dle_q.read = dle;
1362
1363 /* if the interrupts are masked because there were no free desc available,
1364 unmask them now. */
1365 if (!iadev->rxing) {
1366 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1367 if (!(state & FREEQ_EMPTY)) {
1368 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1369 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1370 iadev->reass_reg+REASS_MASK_REG);
1371 iadev->rxing++;
1372 }
1373 }
1374}
1375
1376
1377static int open_rx(struct atm_vcc *vcc)
1378{
1379 IADEV *iadev;
1380 u_short __iomem *vc_table;
1381 u_short __iomem *reass_ptr;
1382 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1383
1384 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1385 iadev = INPH_IA_DEV(vcc->dev);
1386 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1387 if (iadev->phy_type & FE_25MBIT_PHY) {
1388 printk("IA: ABR not support\n");
1389 return -EINVAL;
1390 }
1391 }
1392 /* Make only this VCI in the vc table valid and let all
1393 others be invalid entries */
1394 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1395 vc_table += vcc->vci;
1396 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1397
1398 *vc_table = vcc->vci << 6;
1399 /* Also keep a list of open rx vcs so that we can attach them with
1400 incoming PDUs later. */
1401 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1402 (vcc->qos.txtp.traffic_class == ATM_ABR))
1403 {
1404 srv_cls_param_t srv_p;
1405 init_abr_vc(iadev, &srv_p);
1406 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1407 }
1408 else { /* for UBR later may need to add CBR logic */
1409 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1410 reass_ptr += vcc->vci;
1411 *reass_ptr = NO_AAL5_PKT;
1412 }
1413
1414 if (iadev->rx_open[vcc->vci])
1415 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1416 vcc->dev->number, vcc->vci);
1417 iadev->rx_open[vcc->vci] = vcc;
1418 return 0;
1419}
1420
1421static int rx_init(struct atm_dev *dev)
1422{
1423 IADEV *iadev;
1424 struct rx_buf_desc __iomem *buf_desc_ptr;
1425 unsigned long rx_pkt_start = 0;
1426 void *dle_addr;
1427 struct abr_vc_table *abr_vc_table;
1428 u16 *vc_table;
1429 u16 *reass_table;
1430 int i,j, vcsize_sel;
1431 u_short freeq_st_adr;
1432 u_short *freeq_start;
1433
1434 iadev = INPH_IA_DEV(dev);
1435 // spin_lock_init(&iadev->rx_lock);
1436
1437 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1438 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1439 &iadev->rx_dle_dma, GFP_KERNEL);
1440 if (!dle_addr) {
1441 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1442 goto err_out;
1443 }
1444 iadev->rx_dle_q.start = (struct dle *)dle_addr;
1445 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1446 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1447 iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1448 /* the end of the dle q points to the entry after the last
1449 DLE that can be used. */
1450
1451 /* write the upper 20 bits of the start address to rx list address register */
1452 /* We know this is 32bit bus addressed so the following is safe */
1453 writel(iadev->rx_dle_dma & 0xfffff000,
1454 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1455 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1456 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1457 readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1458 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1459 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1460 readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1461
1462 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1463 writew(0, iadev->reass_reg+MODE_REG);
1464 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1465
1466 /* Receive side control memory map
1467 -------------------------------
1468
1469 Buffer descr 0x0000 (736 - 23K)
1470 VP Table 0x5c00 (256 - 512)
1471 Except q 0x5e00 (128 - 512)
1472 Free buffer q 0x6000 (1K - 2K)
1473 Packet comp q 0x6800 (1K - 2K)
1474 Reass Table 0x7000 (1K - 2K)
1475 VC Table 0x7800 (1K - 2K)
1476 ABR VC Table 0x8000 (1K - 32K)
1477 */
1478
1479 /* Base address for Buffer Descriptor Table */
1480 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1481 /* Set the buffer size register */
1482 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1483
1484 /* Initialize each entry in the Buffer Descriptor Table */
1485 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1486 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1487 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1488 buf_desc_ptr++;
1489 rx_pkt_start = iadev->rx_pkt_ram;
1490 for(i=1; i<=iadev->num_rx_desc; i++)
1491 {
1492 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1493 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1494 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1495 buf_desc_ptr++;
1496 rx_pkt_start += iadev->rx_buf_sz;
1497 }
1498 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1499 i = FREE_BUF_DESC_Q*iadev->memSize;
1500 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1501 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1502 writew(i+iadev->num_rx_desc*sizeof(u_short),
1503 iadev->reass_reg+FREEQ_ED_ADR);
1504 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1505 writew(i+iadev->num_rx_desc*sizeof(u_short),
1506 iadev->reass_reg+FREEQ_WR_PTR);
1507 /* Fill the FREEQ with all the free descriptors. */
1508 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1509 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1510 for(i=1; i<=iadev->num_rx_desc; i++)
1511 {
1512 *freeq_start = (u_short)i;
1513 freeq_start++;
1514 }
1515 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1516 /* Packet Complete Queue */
1517 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1518 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1519 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1520 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1521 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1522
1523 /* Exception Queue */
1524 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1525 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1526 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1527 iadev->reass_reg+EXCP_Q_ED_ADR);
1528 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1529 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1530
1531 /* Load local copy of FREEQ and PCQ ptrs */
1532 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1533 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1534 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1535 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1536 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1537 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1538 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1539 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1540
1541 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1542 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1543 iadev->rfL.pcq_wr);)
1544 /* just for check - no VP TBL */
1545 /* VP Table */
1546 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1547 /* initialize VP Table for invalid VPIs
1548 - I guess we can write all 1s or 0x000f in the entire memory
1549 space or something similar.
1550 */
1551
1552 /* This seems to work and looks right to me too !!! */
1553 i = REASS_TABLE * iadev->memSize;
1554 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1555 /* initialize Reassembly table to I don't know what ???? */
1556 reass_table = (u16 *)(iadev->reass_ram+i);
1557 j = REASS_TABLE_SZ * iadev->memSize;
1558 for(i=0; i < j; i++)
1559 *reass_table++ = NO_AAL5_PKT;
1560 i = 8*1024;
1561 vcsize_sel = 0;
1562 while (i != iadev->num_vc) {
1563 i /= 2;
1564 vcsize_sel++;
1565 }
1566 i = RX_VC_TABLE * iadev->memSize;
1567 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1568 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1569 j = RX_VC_TABLE_SZ * iadev->memSize;
1570 for(i = 0; i < j; i++)
1571 {
1572 /* shift the reassembly pointer by 3 + lower 3 bits of
1573 vc_lkup_base register (=3 for 1K VCs) and the last byte
1574 is those low 3 bits.
1575 Shall program this later.
1576 */
1577 *vc_table = (i << 6) | 15; /* for invalid VCI */
1578 vc_table++;
1579 }
1580 /* ABR VC table */
1581 i = ABR_VC_TABLE * iadev->memSize;
1582 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1583
1584 i = ABR_VC_TABLE * iadev->memSize;
1585 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1586 j = REASS_TABLE_SZ * iadev->memSize;
1587 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1588 for(i = 0; i < j; i++) {
1589 abr_vc_table->rdf = 0x0003;
1590 abr_vc_table->air = 0x5eb1;
1591 abr_vc_table++;
1592 }
1593
1594 /* Initialize other registers */
1595
1596 /* VP Filter Register set for VC Reassembly only */
1597 writew(0xff00, iadev->reass_reg+VP_FILTER);
1598 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1599 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1600
1601 /* Packet Timeout Count related Registers :
1602 Set packet timeout to occur in about 3 seconds
1603 Set Packet Aging Interval count register to overflow in about 4 us
1604 */
1605 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1606
1607 i = (j >> 6) & 0xFF;
1608 j += 2 * (j - 1);
1609 i |= ((j << 2) & 0xFF00);
1610 writew(i, iadev->reass_reg+TMOUT_RANGE);
1611
1612 /* initiate the desc_tble */
1613 for(i=0; i<iadev->num_tx_desc;i++)
1614 iadev->desc_tbl[i].timestamp = 0;
1615
1616 /* to clear the interrupt status register - read it */
1617 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1618
1619 /* Mask Register - clear it */
1620 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1621
1622 skb_queue_head_init(&iadev->rx_dma_q);
1623 iadev->rx_free_desc_qhead = NULL;
1624
1625 iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1626 if (!iadev->rx_open) {
1627 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1628 dev->number);
1629 goto err_free_dle;
1630 }
1631
1632 iadev->rxing = 1;
1633 iadev->rx_pkt_cnt = 0;
1634 /* Mode Register */
1635 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1636 return 0;
1637
1638err_free_dle:
1639 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1640 iadev->rx_dle_dma);
1641err_out:
1642 return -ENOMEM;
1643}
1644
1645
1646/*
1647 The memory map suggested in appendix A and the coding for it.
1648 Keeping it around just in case we change our mind later.
1649
1650 Buffer descr 0x0000 (128 - 4K)
1651 UBR sched 0x1000 (1K - 4K)
1652 UBR Wait q 0x2000 (1K - 4K)
1653 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1654 (128 - 256) each
1655 extended VC 0x4000 (1K - 8K)
1656 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1657 CBR sched 0x7000 (as needed)
1658 VC table 0x8000 (1K - 32K)
1659*/
1660
1661static void tx_intr(struct atm_dev *dev)
1662{
1663 IADEV *iadev;
1664 unsigned short status;
1665 unsigned long flags;
1666
1667 iadev = INPH_IA_DEV(dev);
1668
1669 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1670 if (status & TRANSMIT_DONE){
1671
1672 IF_EVENT(printk("Transmit Done Intr logic run\n");)
1673 spin_lock_irqsave(&iadev->tx_lock, flags);
1674 ia_tx_poll(iadev);
1675 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1676 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1677 if (iadev->close_pending)
1678 wake_up(&iadev->close_wait);
1679 }
1680 if (status & TCQ_NOT_EMPTY)
1681 {
1682 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1683 }
1684}
1685
1686static void tx_dle_intr(struct atm_dev *dev)
1687{
1688 IADEV *iadev;
1689 struct dle *dle, *cur_dle;
1690 struct sk_buff *skb;
1691 struct atm_vcc *vcc;
1692 struct ia_vcc *iavcc;
1693 u_int dle_lp;
1694 unsigned long flags;
1695
1696 iadev = INPH_IA_DEV(dev);
1697 spin_lock_irqsave(&iadev->tx_lock, flags);
1698 dle = iadev->tx_dle_q.read;
1699 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1700 (sizeof(struct dle)*DLE_ENTRIES - 1);
1701 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1702 while (dle != cur_dle)
1703 {
1704 /* free the DMAed skb */
1705 skb = skb_dequeue(&iadev->tx_dma_q);
1706 if (!skb) break;
1707
1708 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1709 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1710 dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1711 DMA_TO_DEVICE);
1712 }
1713 vcc = ATM_SKB(skb)->vcc;
1714 if (!vcc) {
1715 printk("tx_dle_intr: vcc is null\n");
1716 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1717 dev_kfree_skb_any(skb);
1718
1719 return;
1720 }
1721 iavcc = INPH_IA_VCC(vcc);
1722 if (!iavcc) {
1723 printk("tx_dle_intr: iavcc is null\n");
1724 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1725 dev_kfree_skb_any(skb);
1726 return;
1727 }
1728 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1729 if ((vcc->pop) && (skb->len != 0))
1730 {
1731 vcc->pop(vcc, skb);
1732 }
1733 else {
1734 dev_kfree_skb_any(skb);
1735 }
1736 }
1737 else { /* Hold the rate-limited skb for flow control */
1738 IA_SKB_STATE(skb) |= IA_DLED;
1739 skb_queue_tail(&iavcc->txing_skb, skb);
1740 }
1741 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1742 if (++dle == iadev->tx_dle_q.end)
1743 dle = iadev->tx_dle_q.start;
1744 }
1745 iadev->tx_dle_q.read = dle;
1746 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1747}
1748
1749static int open_tx(struct atm_vcc *vcc)
1750{
1751 struct ia_vcc *ia_vcc;
1752 IADEV *iadev;
1753 struct main_vc *vc;
1754 struct ext_vc *evc;
1755 int ret;
1756 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1757 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1758 iadev = INPH_IA_DEV(vcc->dev);
1759
1760 if (iadev->phy_type & FE_25MBIT_PHY) {
1761 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1762 printk("IA: ABR not support\n");
1763 return -EINVAL;
1764 }
1765 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1766 printk("IA: CBR not support\n");
1767 return -EINVAL;
1768 }
1769 }
1770 ia_vcc = INPH_IA_VCC(vcc);
1771 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1772 if (vcc->qos.txtp.max_sdu >
1773 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1774 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1775 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1776 vcc->dev_data = NULL;
1777 kfree(ia_vcc);
1778 return -EINVAL;
1779 }
1780 ia_vcc->vc_desc_cnt = 0;
1781 ia_vcc->txing = 1;
1782
1783 /* find pcr */
1784 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1785 vcc->qos.txtp.pcr = iadev->LineRate;
1786 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1787 vcc->qos.txtp.pcr = iadev->LineRate;
1788 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1789 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1790 if (vcc->qos.txtp.pcr > iadev->LineRate)
1791 vcc->qos.txtp.pcr = iadev->LineRate;
1792 ia_vcc->pcr = vcc->qos.txtp.pcr;
1793
1794 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1795 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1796 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1797 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1798 if (ia_vcc->pcr < iadev->rate_limit)
1799 skb_queue_head_init (&ia_vcc->txing_skb);
1800 if (ia_vcc->pcr < iadev->rate_limit) {
1801 struct sock *sk = sk_atm(vcc);
1802
1803 if (vcc->qos.txtp.max_sdu != 0) {
1804 if (ia_vcc->pcr > 60000)
1805 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1806 else if (ia_vcc->pcr > 2000)
1807 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1808 else
1809 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1810 }
1811 else
1812 sk->sk_sndbuf = 24576;
1813 }
1814
1815 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1816 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1817 vc += vcc->vci;
1818 evc += vcc->vci;
1819 memset((caddr_t)vc, 0, sizeof(*vc));
1820 memset((caddr_t)evc, 0, sizeof(*evc));
1821
1822 /* store the most significant 4 bits of vci as the last 4 bits
1823 of first part of atm header.
1824 store the last 12 bits of vci as first 12 bits of the second
1825 part of the atm header.
1826 */
1827 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1828 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1829
1830 /* check the following for different traffic classes */
1831 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1832 {
1833 vc->type = UBR;
1834 vc->status = CRC_APPEND;
1835 vc->acr = cellrate_to_float(iadev->LineRate);
1836 if (vcc->qos.txtp.pcr > 0)
1837 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1838 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1839 vcc->qos.txtp.max_pcr,vc->acr);)
1840 }
1841 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1842 { srv_cls_param_t srv_p;
1843 IF_ABR(printk("Tx ABR VCC\n");)
1844 init_abr_vc(iadev, &srv_p);
1845 if (vcc->qos.txtp.pcr > 0)
1846 srv_p.pcr = vcc->qos.txtp.pcr;
1847 if (vcc->qos.txtp.min_pcr > 0) {
1848 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1849 if (tmpsum > iadev->LineRate)
1850 return -EBUSY;
1851 srv_p.mcr = vcc->qos.txtp.min_pcr;
1852 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1853 }
1854 else srv_p.mcr = 0;
1855 if (vcc->qos.txtp.icr)
1856 srv_p.icr = vcc->qos.txtp.icr;
1857 if (vcc->qos.txtp.tbe)
1858 srv_p.tbe = vcc->qos.txtp.tbe;
1859 if (vcc->qos.txtp.frtt)
1860 srv_p.frtt = vcc->qos.txtp.frtt;
1861 if (vcc->qos.txtp.rif)
1862 srv_p.rif = vcc->qos.txtp.rif;
1863 if (vcc->qos.txtp.rdf)
1864 srv_p.rdf = vcc->qos.txtp.rdf;
1865 if (vcc->qos.txtp.nrm_pres)
1866 srv_p.nrm = vcc->qos.txtp.nrm;
1867 if (vcc->qos.txtp.trm_pres)
1868 srv_p.trm = vcc->qos.txtp.trm;
1869 if (vcc->qos.txtp.adtf_pres)
1870 srv_p.adtf = vcc->qos.txtp.adtf;
1871 if (vcc->qos.txtp.cdf_pres)
1872 srv_p.cdf = vcc->qos.txtp.cdf;
1873 if (srv_p.icr > srv_p.pcr)
1874 srv_p.icr = srv_p.pcr;
1875 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1876 srv_p.pcr, srv_p.mcr);)
1877 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1878 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1879 if (iadev->phy_type & FE_25MBIT_PHY) {
1880 printk("IA: CBR not support\n");
1881 return -EINVAL;
1882 }
1883 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1884 IF_CBR(printk("PCR is not available\n");)
1885 return -1;
1886 }
1887 vc->type = CBR;
1888 vc->status = CRC_APPEND;
1889 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1890 return ret;
1891 }
1892 } else {
1893 printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
1894 }
1895
1896 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1897 IF_EVENT(printk("ia open_tx returning \n");)
1898 return 0;
1899}
1900
1901
1902static int tx_init(struct atm_dev *dev)
1903{
1904 IADEV *iadev;
1905 struct tx_buf_desc *buf_desc_ptr;
1906 unsigned int tx_pkt_start;
1907 void *dle_addr;
1908 int i;
1909 u_short tcq_st_adr;
1910 u_short *tcq_start;
1911 u_short prq_st_adr;
1912 u_short *prq_start;
1913 struct main_vc *vc;
1914 struct ext_vc *evc;
1915 u_short tmp16;
1916 u32 vcsize_sel;
1917
1918 iadev = INPH_IA_DEV(dev);
1919 spin_lock_init(&iadev->tx_lock);
1920
1921 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1922 readw(iadev->seg_reg+SEG_MASK_REG));)
1923
1924 /* Allocate 4k (boundary aligned) bytes */
1925 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1926 &iadev->tx_dle_dma, GFP_KERNEL);
1927 if (!dle_addr) {
1928 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1929 goto err_out;
1930 }
1931 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1932 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1933 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1934 iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1935
1936 /* write the upper 20 bits of the start address to tx list address register */
1937 writel(iadev->tx_dle_dma & 0xfffff000,
1938 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1939 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1940 writew(0, iadev->seg_reg+MODE_REG_0);
1941 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1942 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1943 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1944 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1945
1946 /*
1947 Transmit side control memory map
1948 --------------------------------
1949 Buffer descr 0x0000 (128 - 4K)
1950 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1951 (512 - 1K) each
1952 TCQ - 4K, PRQ - 5K
1953 CBR Table 0x1800 (as needed) - 6K
1954 UBR Table 0x3000 (1K - 4K) - 12K
1955 UBR Wait queue 0x4000 (1K - 4K) - 16K
1956 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1957 ABR Tbl - 20K, ABR Wq - 22K
1958 extended VC 0x6000 (1K - 8K) - 24K
1959 VC Table 0x8000 (1K - 32K) - 32K
1960
1961 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1962 and Wait q, which can be allotted later.
1963 */
1964
1965 /* Buffer Descriptor Table Base address */
1966 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1967
1968 /* initialize each entry in the buffer descriptor table */
1969 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1970 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1971 buf_desc_ptr++;
1972 tx_pkt_start = TX_PACKET_RAM;
1973 for(i=1; i<=iadev->num_tx_desc; i++)
1974 {
1975 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1976 buf_desc_ptr->desc_mode = AAL5;
1977 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1978 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1979 buf_desc_ptr++;
1980 tx_pkt_start += iadev->tx_buf_sz;
1981 }
1982 iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1983 sizeof(*iadev->tx_buf),
1984 GFP_KERNEL);
1985 if (!iadev->tx_buf) {
1986 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1987 goto err_free_dle;
1988 }
1989 for (i= 0; i< iadev->num_tx_desc; i++)
1990 {
1991 struct cpcs_trailer *cpcs;
1992
1993 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1994 if(!cpcs) {
1995 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1996 goto err_free_tx_bufs;
1997 }
1998 iadev->tx_buf[i].cpcs = cpcs;
1999 iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
2000 cpcs,
2001 sizeof(*cpcs),
2002 DMA_TO_DEVICE);
2003 }
2004 iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2005 sizeof(*iadev->desc_tbl),
2006 GFP_KERNEL);
2007 if (!iadev->desc_tbl) {
2008 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2009 goto err_free_all_tx_bufs;
2010 }
2011
2012 /* Communication Queues base address */
2013 i = TX_COMP_Q * iadev->memSize;
2014 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2015
2016 /* Transmit Complete Queue */
2017 writew(i, iadev->seg_reg+TCQ_ST_ADR);
2018 writew(i, iadev->seg_reg+TCQ_RD_PTR);
2019 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2020 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2021 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2022 iadev->seg_reg+TCQ_ED_ADR);
2023 /* Fill the TCQ with all the free descriptors. */
2024 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2025 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2026 for(i=1; i<=iadev->num_tx_desc; i++)
2027 {
2028 *tcq_start = (u_short)i;
2029 tcq_start++;
2030 }
2031
2032 /* Packet Ready Queue */
2033 i = PKT_RDY_Q * iadev->memSize;
2034 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2035 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2036 iadev->seg_reg+PRQ_ED_ADR);
2037 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2038 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2039
2040 /* Load local copy of PRQ and TCQ ptrs */
2041 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2042 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2043 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2044
2045 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2046 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2047 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2048
2049 /* Just for safety initializing the queue to have desc 1 always */
2050 /* Fill the PRQ with all the free descriptors. */
2051 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2052 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2053 for(i=1; i<=iadev->num_tx_desc; i++)
2054 {
2055 *prq_start = (u_short)0; /* desc 1 in all entries */
2056 prq_start++;
2057 }
2058 /* CBR Table */
2059 IF_INIT(printk("Start CBR Init\n");)
2060#if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2061 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2062#else /* Charlie's logic is wrong ? */
2063 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2064 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2065 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2066#endif
2067
2068 IF_INIT(printk("value in register = 0x%x\n",
2069 readw(iadev->seg_reg+CBR_PTR_BASE));)
2070 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2071 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2072 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2073 readw(iadev->seg_reg+CBR_TAB_BEG));)
2074 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2075 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2076 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2077 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2078 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2079 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2080 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2081 readw(iadev->seg_reg+CBR_TAB_END+1));)
2082
2083 /* Initialize the CBR Schedualing Table */
2084 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2085 0, iadev->num_vc*6);
2086 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2087 iadev->CbrEntryPt = 0;
2088 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2089 iadev->NumEnabledCBR = 0;
2090
2091 /* UBR scheduling Table and wait queue */
2092 /* initialize all bytes of UBR scheduler table and wait queue to 0
2093 - SCHEDSZ is 1K (# of entries).
2094 - UBR Table size is 4K
2095 - UBR wait queue is 4K
2096 since the table and wait queues are contiguous, all the bytes
2097 can be initialized by one memeset.
2098 */
2099
2100 vcsize_sel = 0;
2101 i = 8*1024;
2102 while (i != iadev->num_vc) {
2103 i /= 2;
2104 vcsize_sel++;
2105 }
2106
2107 i = MAIN_VC_TABLE * iadev->memSize;
2108 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2109 i = EXT_VC_TABLE * iadev->memSize;
2110 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2111 i = UBR_SCHED_TABLE * iadev->memSize;
2112 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2113 i = UBR_WAIT_Q * iadev->memSize;
2114 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2115 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2116 0, iadev->num_vc*8);
2117 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2118 /* initialize all bytes of ABR scheduler table and wait queue to 0
2119 - SCHEDSZ is 1K (# of entries).
2120 - ABR Table size is 2K
2121 - ABR wait queue is 2K
2122 since the table and wait queues are contiguous, all the bytes
2123 can be initialized by one memeset.
2124 */
2125 i = ABR_SCHED_TABLE * iadev->memSize;
2126 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2127 i = ABR_WAIT_Q * iadev->memSize;
2128 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2129
2130 i = ABR_SCHED_TABLE*iadev->memSize;
2131 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2132 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2133 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2134 iadev->testTable = kmalloc_array(iadev->num_vc,
2135 sizeof(*iadev->testTable),
2136 GFP_KERNEL);
2137 if (!iadev->testTable) {
2138 printk("Get freepage failed\n");
2139 goto err_free_desc_tbl;
2140 }
2141 for(i=0; i<iadev->num_vc; i++)
2142 {
2143 memset((caddr_t)vc, 0, sizeof(*vc));
2144 memset((caddr_t)evc, 0, sizeof(*evc));
2145 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2146 GFP_KERNEL);
2147 if (!iadev->testTable[i])
2148 goto err_free_test_tables;
2149 iadev->testTable[i]->lastTime = 0;
2150 iadev->testTable[i]->fract = 0;
2151 iadev->testTable[i]->vc_status = VC_UBR;
2152 vc++;
2153 evc++;
2154 }
2155
2156 /* Other Initialization */
2157
2158 /* Max Rate Register */
2159 if (iadev->phy_type & FE_25MBIT_PHY) {
2160 writew(RATE25, iadev->seg_reg+MAXRATE);
2161 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2162 }
2163 else {
2164 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2165 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2166 }
2167 /* Set Idle Header Reigisters to be sure */
2168 writew(0, iadev->seg_reg+IDLEHEADHI);
2169 writew(0, iadev->seg_reg+IDLEHEADLO);
2170
2171 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2172 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2173
2174 iadev->close_pending = 0;
2175 init_waitqueue_head(&iadev->close_wait);
2176 init_waitqueue_head(&iadev->timeout_wait);
2177 skb_queue_head_init(&iadev->tx_dma_q);
2178 ia_init_rtn_q(&iadev->tx_return_q);
2179
2180 /* RM Cell Protocol ID and Message Type */
2181 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2182 skb_queue_head_init (&iadev->tx_backlog);
2183
2184 /* Mode Register 1 */
2185 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2186
2187 /* Mode Register 0 */
2188 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2189
2190 /* Interrupt Status Register - read to clear */
2191 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2192
2193 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2194 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2195 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2196 iadev->tx_pkt_cnt = 0;
2197 iadev->rate_limit = iadev->LineRate / 3;
2198
2199 return 0;
2200
2201err_free_test_tables:
2202 while (--i >= 0)
2203 kfree(iadev->testTable[i]);
2204 kfree(iadev->testTable);
2205err_free_desc_tbl:
2206 kfree(iadev->desc_tbl);
2207err_free_all_tx_bufs:
2208 i = iadev->num_tx_desc;
2209err_free_tx_bufs:
2210 while (--i >= 0) {
2211 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2212
2213 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2214 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2215 kfree(desc->cpcs);
2216 }
2217 kfree(iadev->tx_buf);
2218err_free_dle:
2219 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2220 iadev->tx_dle_dma);
2221err_out:
2222 return -ENOMEM;
2223}
2224
2225static irqreturn_t ia_int(int irq, void *dev_id)
2226{
2227 struct atm_dev *dev;
2228 IADEV *iadev;
2229 unsigned int status;
2230 int handled = 0;
2231
2232 dev = dev_id;
2233 iadev = INPH_IA_DEV(dev);
2234 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2235 {
2236 handled = 1;
2237 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2238 if (status & STAT_REASSINT)
2239 {
2240 /* do something */
2241 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2242 rx_intr(dev);
2243 }
2244 if (status & STAT_DLERINT)
2245 {
2246 /* Clear this bit by writing a 1 to it. */
2247 writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2248 rx_dle_intr(dev);
2249 }
2250 if (status & STAT_SEGINT)
2251 {
2252 /* do something */
2253 IF_EVENT(printk("IA: tx_intr \n");)
2254 tx_intr(dev);
2255 }
2256 if (status & STAT_DLETINT)
2257 {
2258 writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2259 tx_dle_intr(dev);
2260 }
2261 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2262 {
2263 if (status & STAT_FEINT)
2264 ia_frontend_intr(iadev);
2265 }
2266 }
2267 return IRQ_RETVAL(handled);
2268}
2269
2270
2271
2272/*----------------------------- entries --------------------------------*/
2273static int get_esi(struct atm_dev *dev)
2274{
2275 IADEV *iadev;
2276 int i;
2277 u32 mac1;
2278 u16 mac2;
2279
2280 iadev = INPH_IA_DEV(dev);
2281 mac1 = cpu_to_be32(le32_to_cpu(readl(
2282 iadev->reg+IPHASE5575_MAC1)));
2283 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2284 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2285 for (i=0; i<MAC1_LEN; i++)
2286 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2287
2288 for (i=0; i<MAC2_LEN; i++)
2289 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2290 return 0;
2291}
2292
2293static int reset_sar(struct atm_dev *dev)
2294{
2295 IADEV *iadev;
2296 int i, error = 1;
2297 unsigned int pci[64];
2298
2299 iadev = INPH_IA_DEV(dev);
2300 for(i=0; i<64; i++)
2301 if ((error = pci_read_config_dword(iadev->pci,
2302 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2303 return error;
2304 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2305 for(i=0; i<64; i++)
2306 if ((error = pci_write_config_dword(iadev->pci,
2307 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2308 return error;
2309 udelay(5);
2310 return 0;
2311}
2312
2313
2314static int ia_init(struct atm_dev *dev)
2315{
2316 IADEV *iadev;
2317 unsigned long real_base;
2318 void __iomem *base;
2319 unsigned short command;
2320 int error, i;
2321
2322 /* The device has been identified and registered. Now we read
2323 necessary configuration info like memory base address,
2324 interrupt number etc */
2325
2326 IF_INIT(printk(">ia_init\n");)
2327 dev->ci_range.vpi_bits = 0;
2328 dev->ci_range.vci_bits = NR_VCI_LD;
2329
2330 iadev = INPH_IA_DEV(dev);
2331 real_base = pci_resource_start (iadev->pci, 0);
2332 iadev->irq = iadev->pci->irq;
2333
2334 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2335 if (error) {
2336 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2337 dev->number,error);
2338 return -EINVAL;
2339 }
2340 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2341 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2342
2343 /* find mapping size of board */
2344
2345 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2346
2347 if (iadev->pci_map_size == 0x100000){
2348 iadev->num_vc = 4096;
2349 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2350 iadev->memSize = 4;
2351 }
2352 else if (iadev->pci_map_size == 0x40000) {
2353 iadev->num_vc = 1024;
2354 iadev->memSize = 1;
2355 }
2356 else {
2357 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2358 return -EINVAL;
2359 }
2360 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2361
2362 /* enable bus mastering */
2363 pci_set_master(iadev->pci);
2364
2365 /*
2366 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2367 */
2368 udelay(10);
2369
2370 /* mapping the physical address to a virtual address in address space */
2371 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2372
2373 if (!base)
2374 {
2375 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2376 dev->number);
2377 return -ENOMEM;
2378 }
2379 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2380 dev->number, iadev->pci->revision, base, iadev->irq);)
2381
2382 /* filling the iphase dev structure */
2383 iadev->mem = iadev->pci_map_size /2;
2384 iadev->real_base = real_base;
2385 iadev->base = base;
2386
2387 /* Bus Interface Control Registers */
2388 iadev->reg = base + REG_BASE;
2389 /* Segmentation Control Registers */
2390 iadev->seg_reg = base + SEG_BASE;
2391 /* Reassembly Control Registers */
2392 iadev->reass_reg = base + REASS_BASE;
2393 /* Front end/ DMA control registers */
2394 iadev->phy = base + PHY_BASE;
2395 iadev->dma = base + PHY_BASE;
2396 /* RAM - Segmentation RAm and Reassembly RAM */
2397 iadev->ram = base + ACTUAL_RAM_BASE;
2398 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2399 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2400
2401 /* lets print out the above */
2402 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2403 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2404 iadev->phy, iadev->ram, iadev->seg_ram,
2405 iadev->reass_ram);)
2406
2407 /* lets try reading the MAC address */
2408 error = get_esi(dev);
2409 if (error) {
2410 iounmap(iadev->base);
2411 return error;
2412 }
2413 printk("IA: ");
2414 for (i=0; i < ESI_LEN; i++)
2415 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2416 printk("\n");
2417
2418 /* reset SAR */
2419 if (reset_sar(dev)) {
2420 iounmap(iadev->base);
2421 printk("IA: reset SAR fail, please try again\n");
2422 return 1;
2423 }
2424 return 0;
2425}
2426
2427static void ia_update_stats(IADEV *iadev) {
2428 if (!iadev->carrier_detect)
2429 return;
2430 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2431 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2432 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2433 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2434 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2435 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2436 return;
2437}
2438
2439static void ia_led_timer(struct timer_list *unused) {
2440 unsigned long flags;
2441 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2442 u_char i;
2443 static u32 ctrl_reg;
2444 for (i = 0; i < iadev_count; i++) {
2445 if (ia_dev[i]) {
2446 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2447 if (blinking[i] == 0) {
2448 blinking[i]++;
2449 ctrl_reg &= (~CTRL_LED);
2450 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2451 ia_update_stats(ia_dev[i]);
2452 }
2453 else {
2454 blinking[i] = 0;
2455 ctrl_reg |= CTRL_LED;
2456 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2457 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2458 if (ia_dev[i]->close_pending)
2459 wake_up(&ia_dev[i]->close_wait);
2460 ia_tx_poll(ia_dev[i]);
2461 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2462 }
2463 }
2464 }
2465 mod_timer(&ia_timer, jiffies + HZ / 4);
2466 return;
2467}
2468
2469static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2470 unsigned long addr)
2471{
2472 writel(value, INPH_IA_DEV(dev)->phy+addr);
2473}
2474
2475static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2476{
2477 return readl(INPH_IA_DEV(dev)->phy+addr);
2478}
2479
2480static void ia_free_tx(IADEV *iadev)
2481{
2482 int i;
2483
2484 kfree(iadev->desc_tbl);
2485 for (i = 0; i < iadev->num_vc; i++)
2486 kfree(iadev->testTable[i]);
2487 kfree(iadev->testTable);
2488 for (i = 0; i < iadev->num_tx_desc; i++) {
2489 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2490
2491 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2492 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2493 kfree(desc->cpcs);
2494 }
2495 kfree(iadev->tx_buf);
2496 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2497 iadev->tx_dle_dma);
2498}
2499
2500static void ia_free_rx(IADEV *iadev)
2501{
2502 kfree(iadev->rx_open);
2503 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2504 iadev->rx_dle_dma);
2505}
2506
2507static int ia_start(struct atm_dev *dev)
2508{
2509 IADEV *iadev;
2510 int error;
2511 unsigned char phy;
2512 u32 ctrl_reg;
2513 IF_EVENT(printk(">ia_start\n");)
2514 iadev = INPH_IA_DEV(dev);
2515 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2516 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2517 dev->number, iadev->irq);
2518 error = -EAGAIN;
2519 goto err_out;
2520 }
2521 /* @@@ should release IRQ on error */
2522 /* enabling memory + master */
2523 if ((error = pci_write_config_word(iadev->pci,
2524 PCI_COMMAND,
2525 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2526 {
2527 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2528 "master (0x%x)\n",dev->number, error);
2529 error = -EIO;
2530 goto err_free_irq;
2531 }
2532 udelay(10);
2533
2534 /* Maybe we should reset the front end, initialize Bus Interface Control
2535 Registers and see. */
2536
2537 IF_INIT(printk("Bus ctrl reg: %08x\n",
2538 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2539 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2540 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2541 | CTRL_B8
2542 | CTRL_B16
2543 | CTRL_B32
2544 | CTRL_B48
2545 | CTRL_B64
2546 | CTRL_B128
2547 | CTRL_ERRMASK
2548 | CTRL_DLETMASK /* shud be removed l8r */
2549 | CTRL_DLERMASK
2550 | CTRL_SEGMASK
2551 | CTRL_REASSMASK
2552 | CTRL_FEMASK
2553 | CTRL_CSPREEMPT;
2554
2555 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2556
2557 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2558 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2559 printk("Bus status reg after init: %08x\n",
2560 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2561
2562 ia_hw_type(iadev);
2563 error = tx_init(dev);
2564 if (error)
2565 goto err_free_irq;
2566 error = rx_init(dev);
2567 if (error)
2568 goto err_free_tx;
2569
2570 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2571 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2572 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2573 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2574 phy = 0; /* resolve compiler complaint */
2575 IF_INIT (
2576 if ((phy=ia_phy_get(dev,0)) == 0x30)
2577 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2578 else
2579 printk("IA: utopia,rev.%0x\n",phy);)
2580
2581 if (iadev->phy_type & FE_25MBIT_PHY)
2582 ia_mb25_init(iadev);
2583 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2584 ia_suni_pm7345_init(iadev);
2585 else {
2586 error = suni_init(dev);
2587 if (error)
2588 goto err_free_rx;
2589 if (dev->phy->start) {
2590 error = dev->phy->start(dev);
2591 if (error)
2592 goto err_free_rx;
2593 }
2594 /* Get iadev->carrier_detect status */
2595 ia_frontend_intr(iadev);
2596 }
2597 return 0;
2598
2599err_free_rx:
2600 ia_free_rx(iadev);
2601err_free_tx:
2602 ia_free_tx(iadev);
2603err_free_irq:
2604 free_irq(iadev->irq, dev);
2605err_out:
2606 return error;
2607}
2608
2609static void ia_close(struct atm_vcc *vcc)
2610{
2611 DEFINE_WAIT(wait);
2612 u16 *vc_table;
2613 IADEV *iadev;
2614 struct ia_vcc *ia_vcc;
2615 struct sk_buff *skb = NULL;
2616 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2617 unsigned long closetime, flags;
2618
2619 iadev = INPH_IA_DEV(vcc->dev);
2620 ia_vcc = INPH_IA_VCC(vcc);
2621 if (!ia_vcc) return;
2622
2623 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2624 ia_vcc->vc_desc_cnt,vcc->vci);)
2625 clear_bit(ATM_VF_READY,&vcc->flags);
2626 skb_queue_head_init (&tmp_tx_backlog);
2627 skb_queue_head_init (&tmp_vcc_backlog);
2628 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2629 iadev->close_pending++;
2630 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2631 schedule_timeout(msecs_to_jiffies(500));
2632 finish_wait(&iadev->timeout_wait, &wait);
2633 spin_lock_irqsave(&iadev->tx_lock, flags);
2634 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2635 if (ATM_SKB(skb)->vcc == vcc){
2636 if (vcc->pop) vcc->pop(vcc, skb);
2637 else dev_kfree_skb_any(skb);
2638 }
2639 else
2640 skb_queue_tail(&tmp_tx_backlog, skb);
2641 }
2642 while((skb = skb_dequeue(&tmp_tx_backlog)))
2643 skb_queue_tail(&iadev->tx_backlog, skb);
2644 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2645 closetime = 300000 / ia_vcc->pcr;
2646 if (closetime == 0)
2647 closetime = 1;
2648 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2649 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2650 spin_lock_irqsave(&iadev->tx_lock, flags);
2651 iadev->close_pending--;
2652 iadev->testTable[vcc->vci]->lastTime = 0;
2653 iadev->testTable[vcc->vci]->fract = 0;
2654 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2655 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2656 if (vcc->qos.txtp.min_pcr > 0)
2657 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2658 }
2659 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2660 ia_vcc = INPH_IA_VCC(vcc);
2661 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2662 ia_cbrVc_close (vcc);
2663 }
2664 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2665 }
2666
2667 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2668 // reset reass table
2669 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2670 vc_table += vcc->vci;
2671 *vc_table = NO_AAL5_PKT;
2672 // reset vc table
2673 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2674 vc_table += vcc->vci;
2675 *vc_table = (vcc->vci << 6) | 15;
2676 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2677 struct abr_vc_table __iomem *abr_vc_table =
2678 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2679 abr_vc_table += vcc->vci;
2680 abr_vc_table->rdf = 0x0003;
2681 abr_vc_table->air = 0x5eb1;
2682 }
2683 // Drain the packets
2684 rx_dle_intr(vcc->dev);
2685 iadev->rx_open[vcc->vci] = NULL;
2686 }
2687 kfree(INPH_IA_VCC(vcc));
2688 ia_vcc = NULL;
2689 vcc->dev_data = NULL;
2690 clear_bit(ATM_VF_ADDR,&vcc->flags);
2691 return;
2692}
2693
2694static int ia_open(struct atm_vcc *vcc)
2695{
2696 struct ia_vcc *ia_vcc;
2697 int error;
2698 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2699 {
2700 IF_EVENT(printk("ia: not partially allocated resources\n");)
2701 vcc->dev_data = NULL;
2702 }
2703 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2704 {
2705 IF_EVENT(printk("iphase open: unspec part\n");)
2706 set_bit(ATM_VF_ADDR,&vcc->flags);
2707 }
2708 if (vcc->qos.aal != ATM_AAL5)
2709 return -EINVAL;
2710 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2711 vcc->dev->number, vcc->vpi, vcc->vci);)
2712
2713 /* Device dependent initialization */
2714 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2715 if (!ia_vcc) return -ENOMEM;
2716 vcc->dev_data = ia_vcc;
2717
2718 if ((error = open_rx(vcc)))
2719 {
2720 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2721 ia_close(vcc);
2722 return error;
2723 }
2724
2725 if ((error = open_tx(vcc)))
2726 {
2727 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2728 ia_close(vcc);
2729 return error;
2730 }
2731
2732 set_bit(ATM_VF_READY,&vcc->flags);
2733
2734#if 0
2735 {
2736 static u8 first = 1;
2737 if (first) {
2738 ia_timer.expires = jiffies + 3*HZ;
2739 add_timer(&ia_timer);
2740 first = 0;
2741 }
2742 }
2743#endif
2744 IF_EVENT(printk("ia open returning\n");)
2745 return 0;
2746}
2747
2748static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2749{
2750 IF_EVENT(printk(">ia_change_qos\n");)
2751 return 0;
2752}
2753
2754static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2755{
2756 IA_CMDBUF ia_cmds;
2757 IADEV *iadev;
2758 int i, board;
2759 u16 __user *tmps;
2760 IF_EVENT(printk(">ia_ioctl\n");)
2761 if (cmd != IA_CMD) {
2762 if (!dev->phy->ioctl) return -EINVAL;
2763 return dev->phy->ioctl(dev,cmd,arg);
2764 }
2765 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2766 board = ia_cmds.status;
2767
2768 if ((board < 0) || (board > iadev_count))
2769 board = 0;
2770 board = array_index_nospec(board, iadev_count + 1);
2771
2772 iadev = ia_dev[board];
2773 switch (ia_cmds.cmd) {
2774 case MEMDUMP:
2775 {
2776 switch (ia_cmds.sub_cmd) {
2777 case MEMDUMP_SEGREG:
2778 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2779 tmps = (u16 __user *)ia_cmds.buf;
2780 for(i=0; i<0x80; i+=2, tmps++)
2781 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2782 ia_cmds.status = 0;
2783 ia_cmds.len = 0x80;
2784 break;
2785 case MEMDUMP_REASSREG:
2786 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2787 tmps = (u16 __user *)ia_cmds.buf;
2788 for(i=0; i<0x80; i+=2, tmps++)
2789 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2790 ia_cmds.status = 0;
2791 ia_cmds.len = 0x80;
2792 break;
2793 case MEMDUMP_FFL:
2794 {
2795 ia_regs_t *regs_local;
2796 ffredn_t *ffL;
2797 rfredn_t *rfL;
2798
2799 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2800 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2801 if (!regs_local) return -ENOMEM;
2802 ffL = ®s_local->ffredn;
2803 rfL = ®s_local->rfredn;
2804 /* Copy real rfred registers into the local copy */
2805 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2806 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2807 /* Copy real ffred registers into the local copy */
2808 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2809 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2810
2811 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2812 kfree(regs_local);
2813 return -EFAULT;
2814 }
2815 kfree(regs_local);
2816 printk("Board %d registers dumped\n", board);
2817 ia_cmds.status = 0;
2818 }
2819 break;
2820 case READ_REG:
2821 {
2822 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2823 desc_dbg(iadev);
2824 ia_cmds.status = 0;
2825 }
2826 break;
2827 case 0x6:
2828 {
2829 ia_cmds.status = 0;
2830 printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2831 printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2832 }
2833 break;
2834 case 0x8:
2835 {
2836 struct k_sonet_stats *stats;
2837 stats = &PRIV(_ia_dev[board])->sonet_stats;
2838 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2839 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2840 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2841 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2842 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2843 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2844 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2845 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2846 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2847 }
2848 ia_cmds.status = 0;
2849 break;
2850 case 0x9:
2851 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2852 for (i = 1; i <= iadev->num_rx_desc; i++)
2853 free_desc(_ia_dev[board], i);
2854 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2855 iadev->reass_reg+REASS_MASK_REG);
2856 iadev->rxing = 1;
2857
2858 ia_cmds.status = 0;
2859 break;
2860
2861 case 0xb:
2862 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2863 ia_frontend_intr(iadev);
2864 break;
2865 case 0xa:
2866 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2867 {
2868 ia_cmds.status = 0;
2869 IADebugFlag = ia_cmds.maddr;
2870 printk("New debug option loaded\n");
2871 }
2872 break;
2873 default:
2874 ia_cmds.status = 0;
2875 break;
2876 }
2877 }
2878 break;
2879 default:
2880 break;
2881
2882 }
2883 return 0;
2884}
2885
2886static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2887 IADEV *iadev;
2888 struct dle *wr_ptr;
2889 struct tx_buf_desc __iomem *buf_desc_ptr;
2890 int desc;
2891 int comp_code;
2892 int total_len;
2893 struct cpcs_trailer *trailer;
2894 struct ia_vcc *iavcc;
2895
2896 iadev = INPH_IA_DEV(vcc->dev);
2897 iavcc = INPH_IA_VCC(vcc);
2898 if (!iavcc->txing) {
2899 printk("discard packet on closed VC\n");
2900 if (vcc->pop)
2901 vcc->pop(vcc, skb);
2902 else
2903 dev_kfree_skb_any(skb);
2904 return 0;
2905 }
2906
2907 if (skb->len > iadev->tx_buf_sz - 8) {
2908 printk("Transmit size over tx buffer size\n");
2909 if (vcc->pop)
2910 vcc->pop(vcc, skb);
2911 else
2912 dev_kfree_skb_any(skb);
2913 return 0;
2914 }
2915 if ((unsigned long)skb->data & 3) {
2916 printk("Misaligned SKB\n");
2917 if (vcc->pop)
2918 vcc->pop(vcc, skb);
2919 else
2920 dev_kfree_skb_any(skb);
2921 return 0;
2922 }
2923 /* Get a descriptor number from our free descriptor queue
2924 We get the descr number from the TCQ now, since I am using
2925 the TCQ as a free buffer queue. Initially TCQ will be
2926 initialized with all the descriptors and is hence, full.
2927 */
2928 desc = get_desc (iadev, iavcc);
2929 if (desc == 0xffff)
2930 return 1;
2931 comp_code = desc >> 13;
2932 desc &= 0x1fff;
2933
2934 if ((desc == 0) || (desc > iadev->num_tx_desc))
2935 {
2936 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2937 atomic_inc(&vcc->stats->tx);
2938 if (vcc->pop)
2939 vcc->pop(vcc, skb);
2940 else
2941 dev_kfree_skb_any(skb);
2942 return 0; /* return SUCCESS */
2943 }
2944
2945 if (comp_code)
2946 {
2947 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2948 desc, comp_code);)
2949 }
2950
2951 /* remember the desc and vcc mapping */
2952 iavcc->vc_desc_cnt++;
2953 iadev->desc_tbl[desc-1].iavcc = iavcc;
2954 iadev->desc_tbl[desc-1].txskb = skb;
2955 IA_SKB_STATE(skb) = 0;
2956
2957 iadev->ffL.tcq_rd += 2;
2958 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2959 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2960 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2961
2962 /* Put the descriptor number in the packet ready queue
2963 and put the updated write pointer in the DLE field
2964 */
2965 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2966
2967 iadev->ffL.prq_wr += 2;
2968 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2969 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2970
2971 /* Figure out the exact length of the packet and padding required to
2972 make it aligned on a 48 byte boundary. */
2973 total_len = skb->len + sizeof(struct cpcs_trailer);
2974 total_len = ((total_len + 47) / 48) * 48;
2975 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2976
2977 /* Put the packet in a tx buffer */
2978 trailer = iadev->tx_buf[desc-1].cpcs;
2979 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2980 skb, skb->data, skb->len, desc);)
2981 trailer->control = 0;
2982 /*big endian*/
2983 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2984 trailer->crc32 = 0; /* not needed - dummy bytes */
2985
2986 /* Display the packet */
2987 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2988 skb->len, tcnter++);
2989 xdump(skb->data, skb->len, "TX: ");
2990 printk("\n");)
2991
2992 /* Build the buffer descriptor */
2993 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2994 buf_desc_ptr += desc; /* points to the corresponding entry */
2995 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2996 /* Huh ? p.115 of users guide describes this as a read-only register */
2997 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2998 buf_desc_ptr->vc_index = vcc->vci;
2999 buf_desc_ptr->bytes = total_len;
3000
3001 if (vcc->qos.txtp.traffic_class == ATM_ABR)
3002 clear_lockup (vcc, iadev);
3003
3004 /* Build the DLE structure */
3005 wr_ptr = iadev->tx_dle_q.write;
3006 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3007 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3008 skb->len, DMA_TO_DEVICE);
3009 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3010 buf_desc_ptr->buf_start_lo;
3011 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3012 wr_ptr->bytes = skb->len;
3013
3014 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3015 if ((wr_ptr->bytes >> 2) == 0xb)
3016 wr_ptr->bytes = 0x30;
3017
3018 wr_ptr->mode = TX_DLE_PSI;
3019 wr_ptr->prq_wr_ptr_data = 0;
3020
3021 /* end is not to be used for the DLE q */
3022 if (++wr_ptr == iadev->tx_dle_q.end)
3023 wr_ptr = iadev->tx_dle_q.start;
3024
3025 /* Build trailer dle */
3026 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3027 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3028 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3029
3030 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3031 wr_ptr->mode = DMA_INT_ENABLE;
3032 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3033
3034 /* end is not to be used for the DLE q */
3035 if (++wr_ptr == iadev->tx_dle_q.end)
3036 wr_ptr = iadev->tx_dle_q.start;
3037
3038 iadev->tx_dle_q.write = wr_ptr;
3039 ATM_DESC(skb) = vcc->vci;
3040 skb_queue_tail(&iadev->tx_dma_q, skb);
3041
3042 atomic_inc(&vcc->stats->tx);
3043 iadev->tx_pkt_cnt++;
3044 /* Increment transaction counter */
3045 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3046
3047#if 0
3048 /* add flow control logic */
3049 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3050 if (iavcc->vc_desc_cnt > 10) {
3051 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3052 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3053 iavcc->flow_inc = -1;
3054 iavcc->saved_tx_quota = vcc->tx_quota;
3055 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3056 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3057 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3058 iavcc->flow_inc = 0;
3059 }
3060 }
3061#endif
3062 IF_TX(printk("ia send done\n");)
3063 return 0;
3064}
3065
3066static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3067{
3068 IADEV *iadev;
3069 unsigned long flags;
3070
3071 iadev = INPH_IA_DEV(vcc->dev);
3072 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3073 {
3074 if (!skb)
3075 printk(KERN_CRIT "null skb in ia_send\n");
3076 else dev_kfree_skb_any(skb);
3077 return -EINVAL;
3078 }
3079 spin_lock_irqsave(&iadev->tx_lock, flags);
3080 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3081 dev_kfree_skb_any(skb);
3082 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3083 return -EINVAL;
3084 }
3085 ATM_SKB(skb)->vcc = vcc;
3086
3087 if (skb_peek(&iadev->tx_backlog)) {
3088 skb_queue_tail(&iadev->tx_backlog, skb);
3089 }
3090 else {
3091 if (ia_pkt_tx (vcc, skb)) {
3092 skb_queue_tail(&iadev->tx_backlog, skb);
3093 }
3094 }
3095 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3096 return 0;
3097
3098}
3099
3100static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3101{
3102 int left = *pos, n;
3103 char *tmpPtr;
3104 IADEV *iadev = INPH_IA_DEV(dev);
3105 if(!left--) {
3106 if (iadev->phy_type == FE_25MBIT_PHY) {
3107 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3108 return n;
3109 }
3110 if (iadev->phy_type == FE_DS3_PHY)
3111 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3112 else if (iadev->phy_type == FE_E3_PHY)
3113 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3114 else if (iadev->phy_type == FE_UTP_OPTION)
3115 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3116 else
3117 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3118 tmpPtr = page + n;
3119 if (iadev->pci_map_size == 0x40000)
3120 n += sprintf(tmpPtr, "-1KVC-");
3121 else
3122 n += sprintf(tmpPtr, "-4KVC-");
3123 tmpPtr = page + n;
3124 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3125 n += sprintf(tmpPtr, "1M \n");
3126 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3127 n += sprintf(tmpPtr, "512K\n");
3128 else
3129 n += sprintf(tmpPtr, "128K\n");
3130 return n;
3131 }
3132 if (!left) {
3133 return sprintf(page, " Number of Tx Buffer: %u\n"
3134 " Size of Tx Buffer : %u\n"
3135 " Number of Rx Buffer: %u\n"
3136 " Size of Rx Buffer : %u\n"
3137 " Packets Received : %u\n"
3138 " Packets Transmitted: %u\n"
3139 " Cells Received : %u\n"
3140 " Cells Transmitted : %u\n"
3141 " Board Dropped Cells: %u\n"
3142 " Board Dropped Pkts : %u\n",
3143 iadev->num_tx_desc, iadev->tx_buf_sz,
3144 iadev->num_rx_desc, iadev->rx_buf_sz,
3145 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3146 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3147 iadev->drop_rxcell, iadev->drop_rxpkt);
3148 }
3149 return 0;
3150}
3151
3152static const struct atmdev_ops ops = {
3153 .open = ia_open,
3154 .close = ia_close,
3155 .ioctl = ia_ioctl,
3156 .send = ia_send,
3157 .phy_put = ia_phy_put,
3158 .phy_get = ia_phy_get,
3159 .change_qos = ia_change_qos,
3160 .proc_read = ia_proc_read,
3161 .owner = THIS_MODULE,
3162};
3163
3164static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3165{
3166 struct atm_dev *dev;
3167 IADEV *iadev;
3168 int ret;
3169
3170 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3171 if (!iadev) {
3172 ret = -ENOMEM;
3173 goto err_out;
3174 }
3175
3176 iadev->pci = pdev;
3177
3178 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3179 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3180 if (pci_enable_device(pdev)) {
3181 ret = -ENODEV;
3182 goto err_out_free_iadev;
3183 }
3184 dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3185 if (!dev) {
3186 ret = -ENOMEM;
3187 goto err_out_disable_dev;
3188 }
3189 dev->dev_data = iadev;
3190 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3191 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3192 iadev->LineRate);)
3193
3194 pci_set_drvdata(pdev, dev);
3195
3196 ia_dev[iadev_count] = iadev;
3197 _ia_dev[iadev_count] = dev;
3198 iadev_count++;
3199 if (ia_init(dev) || ia_start(dev)) {
3200 IF_INIT(printk("IA register failed!\n");)
3201 iadev_count--;
3202 ia_dev[iadev_count] = NULL;
3203 _ia_dev[iadev_count] = NULL;
3204 ret = -EINVAL;
3205 goto err_out_deregister_dev;
3206 }
3207 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3208
3209 iadev->next_board = ia_boards;
3210 ia_boards = dev;
3211
3212 return 0;
3213
3214err_out_deregister_dev:
3215 atm_dev_deregister(dev);
3216err_out_disable_dev:
3217 pci_disable_device(pdev);
3218err_out_free_iadev:
3219 kfree(iadev);
3220err_out:
3221 return ret;
3222}
3223
3224static void ia_remove_one(struct pci_dev *pdev)
3225{
3226 struct atm_dev *dev = pci_get_drvdata(pdev);
3227 IADEV *iadev = INPH_IA_DEV(dev);
3228
3229 /* Disable phy interrupts */
3230 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3231 SUNI_RSOP_CIE);
3232 udelay(1);
3233
3234 if (dev->phy && dev->phy->stop)
3235 dev->phy->stop(dev);
3236
3237 /* De-register device */
3238 free_irq(iadev->irq, dev);
3239 iadev_count--;
3240 ia_dev[iadev_count] = NULL;
3241 _ia_dev[iadev_count] = NULL;
3242 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3243 atm_dev_deregister(dev);
3244
3245 iounmap(iadev->base);
3246 pci_disable_device(pdev);
3247
3248 ia_free_rx(iadev);
3249 ia_free_tx(iadev);
3250
3251 kfree(iadev);
3252}
3253
3254static const struct pci_device_id ia_pci_tbl[] = {
3255 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3256 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3257 { 0,}
3258};
3259MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3260
3261static struct pci_driver ia_driver = {
3262 .name = DEV_LABEL,
3263 .id_table = ia_pci_tbl,
3264 .probe = ia_init_one,
3265 .remove = ia_remove_one,
3266};
3267
3268static int __init ia_module_init(void)
3269{
3270 int ret;
3271
3272 ret = pci_register_driver(&ia_driver);
3273 if (ret >= 0) {
3274 ia_timer.expires = jiffies + 3*HZ;
3275 add_timer(&ia_timer);
3276 } else
3277 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3278 return ret;
3279}
3280
3281static void __exit ia_module_exit(void)
3282{
3283 pci_unregister_driver(&ia_driver);
3284
3285 del_timer_sync(&ia_timer);
3286}
3287
3288module_init(ia_module_init);
3289module_exit(ia_module_exit);