Loading...
1/******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
6 Version: 1.0
7*******************************************************************************
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
13
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
25
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
31 Add the CBR support.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
37 Add SMP support.
38
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41*******************************************************************************/
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/mm.h>
46#include <linux/pci.h>
47#include <linux/errno.h>
48#include <linux/atm.h>
49#include <linux/atmdev.h>
50#include <linux/sonet.h>
51#include <linux/skbuff.h>
52#include <linux/time.h>
53#include <linux/delay.h>
54#include <linux/uio.h>
55#include <linux/init.h>
56#include <linux/interrupt.h>
57#include <linux/wait.h>
58#include <linux/slab.h>
59#include <asm/io.h>
60#include <linux/atomic.h>
61#include <asm/uaccess.h>
62#include <asm/string.h>
63#include <asm/byteorder.h>
64#include <linux/vmalloc.h>
65#include <linux/jiffies.h>
66#include "iphase.h"
67#include "suni.h"
68#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
69
70#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
71
72static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
73static void desc_dbg(IADEV *iadev);
74
75static IADEV *ia_dev[8];
76static struct atm_dev *_ia_dev[8];
77static int iadev_count;
78static void ia_led_timer(unsigned long arg);
79static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
80static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
81static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
82static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
84
85module_param(IA_TX_BUF, int, 0);
86module_param(IA_TX_BUF_SZ, int, 0);
87module_param(IA_RX_BUF, int, 0);
88module_param(IA_RX_BUF_SZ, int, 0);
89module_param(IADebugFlag, uint, 0644);
90
91MODULE_LICENSE("GPL");
92
93/**************************** IA_LIB **********************************/
94
95static void ia_init_rtn_q (IARTN_Q *que)
96{
97 que->next = NULL;
98 que->tail = NULL;
99}
100
101static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
102{
103 data->next = NULL;
104 if (que->next == NULL)
105 que->next = que->tail = data;
106 else {
107 data->next = que->next;
108 que->next = data;
109 }
110 return;
111}
112
113static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
114 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
115 if (!entry) return -1;
116 entry->data = data;
117 entry->next = NULL;
118 if (que->next == NULL)
119 que->next = que->tail = entry;
120 else {
121 que->tail->next = entry;
122 que->tail = que->tail->next;
123 }
124 return 1;
125}
126
127static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
128 IARTN_Q *tmpdata;
129 if (que->next == NULL)
130 return NULL;
131 tmpdata = que->next;
132 if ( que->next == que->tail)
133 que->next = que->tail = NULL;
134 else
135 que->next = que->next->next;
136 return tmpdata;
137}
138
139static void ia_hack_tcq(IADEV *dev) {
140
141 u_short desc1;
142 u_short tcq_wr;
143 struct ia_vcc *iavcc_r = NULL;
144
145 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
146 while (dev->host_tcq_wr != tcq_wr) {
147 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
148 if (!desc1) ;
149 else if (!dev->desc_tbl[desc1 -1].timestamp) {
150 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
151 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
152 }
153 else if (dev->desc_tbl[desc1 -1].timestamp) {
154 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
155 printk("IA: Fatal err in get_desc\n");
156 continue;
157 }
158 iavcc_r->vc_desc_cnt--;
159 dev->desc_tbl[desc1 -1].timestamp = 0;
160 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
161 dev->desc_tbl[desc1 -1].txskb, desc1);)
162 if (iavcc_r->pcr < dev->rate_limit) {
163 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
164 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
165 printk("ia_hack_tcq: No memory available\n");
166 }
167 dev->desc_tbl[desc1 -1].iavcc = NULL;
168 dev->desc_tbl[desc1 -1].txskb = NULL;
169 }
170 dev->host_tcq_wr += 2;
171 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
172 dev->host_tcq_wr = dev->ffL.tcq_st;
173 }
174} /* ia_hack_tcq */
175
176static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
177 u_short desc_num, i;
178 struct sk_buff *skb;
179 struct ia_vcc *iavcc_r = NULL;
180 unsigned long delta;
181 static unsigned long timer = 0;
182 int ltimeout;
183
184 ia_hack_tcq (dev);
185 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
186 timer = jiffies;
187 i=0;
188 while (i < dev->num_tx_desc) {
189 if (!dev->desc_tbl[i].timestamp) {
190 i++;
191 continue;
192 }
193 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
194 delta = jiffies - dev->desc_tbl[i].timestamp;
195 if (delta >= ltimeout) {
196 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
197 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
198 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
199 else
200 dev->ffL.tcq_rd -= 2;
201 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
202 if (!(skb = dev->desc_tbl[i].txskb) ||
203 !(iavcc_r = dev->desc_tbl[i].iavcc))
204 printk("Fatal err, desc table vcc or skb is NULL\n");
205 else
206 iavcc_r->vc_desc_cnt--;
207 dev->desc_tbl[i].timestamp = 0;
208 dev->desc_tbl[i].iavcc = NULL;
209 dev->desc_tbl[i].txskb = NULL;
210 }
211 i++;
212 } /* while */
213 }
214 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
215 return 0xFFFF;
216
217 /* Get the next available descriptor number from TCQ */
218 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
219
220 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
221 dev->ffL.tcq_rd += 2;
222 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
223 dev->ffL.tcq_rd = dev->ffL.tcq_st;
224 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
225 return 0xFFFF;
226 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
227 }
228
229 /* get system time */
230 dev->desc_tbl[desc_num -1].timestamp = jiffies;
231 return desc_num;
232}
233
234static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
235 u_char foundLockUp;
236 vcstatus_t *vcstatus;
237 u_short *shd_tbl;
238 u_short tempCellSlot, tempFract;
239 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
240 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
241 u_int i;
242
243 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
244 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
245 vcstatus->cnt++;
246 foundLockUp = 0;
247 if( vcstatus->cnt == 0x05 ) {
248 abr_vc += vcc->vci;
249 eabr_vc += vcc->vci;
250 if( eabr_vc->last_desc ) {
251 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
252 /* Wait for 10 Micro sec */
253 udelay(10);
254 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
255 foundLockUp = 1;
256 }
257 else {
258 tempCellSlot = abr_vc->last_cell_slot;
259 tempFract = abr_vc->fraction;
260 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
261 && (tempFract == dev->testTable[vcc->vci]->fract))
262 foundLockUp = 1;
263 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
264 dev->testTable[vcc->vci]->fract = tempFract;
265 }
266 } /* last descriptor */
267 vcstatus->cnt = 0;
268 } /* vcstatus->cnt */
269
270 if (foundLockUp) {
271 IF_ABR(printk("LOCK UP found\n");)
272 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
273 /* Wait for 10 Micro sec */
274 udelay(10);
275 abr_vc->status &= 0xFFF8;
276 abr_vc->status |= 0x0001; /* state is idle */
277 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
278 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
279 if (i < dev->num_vc)
280 shd_tbl[i] = vcc->vci;
281 else
282 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
283 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
284 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
285 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
286 vcstatus->cnt = 0;
287 } /* foundLockUp */
288
289 } /* if an ABR VC */
290
291
292}
293
294/*
295** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
296**
297** +----+----+------------------+-------------------------------+
298** | R | NZ | 5-bit exponent | 9-bit mantissa |
299** +----+----+------------------+-------------------------------+
300**
301** R = reserved (written as 0)
302** NZ = 0 if 0 cells/sec; 1 otherwise
303**
304** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
305*/
306static u16
307cellrate_to_float(u32 cr)
308{
309
310#define NZ 0x4000
311#define M_BITS 9 /* Number of bits in mantissa */
312#define E_BITS 5 /* Number of bits in exponent */
313#define M_MASK 0x1ff
314#define E_MASK 0x1f
315 u16 flot;
316 u32 tmp = cr & 0x00ffffff;
317 int i = 0;
318 if (cr == 0)
319 return 0;
320 while (tmp != 1) {
321 tmp >>= 1;
322 i++;
323 }
324 if (i == M_BITS)
325 flot = NZ | (i << M_BITS) | (cr & M_MASK);
326 else if (i < M_BITS)
327 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
328 else
329 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
330 return flot;
331}
332
333#if 0
334/*
335** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
336*/
337static u32
338float_to_cellrate(u16 rate)
339{
340 u32 exp, mantissa, cps;
341 if ((rate & NZ) == 0)
342 return 0;
343 exp = (rate >> M_BITS) & E_MASK;
344 mantissa = rate & M_MASK;
345 if (exp == 0)
346 return 1;
347 cps = (1 << M_BITS) | mantissa;
348 if (exp == M_BITS)
349 cps = cps;
350 else if (exp > M_BITS)
351 cps <<= (exp - M_BITS);
352 else
353 cps >>= (M_BITS - exp);
354 return cps;
355}
356#endif
357
358static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
359 srv_p->class_type = ATM_ABR;
360 srv_p->pcr = dev->LineRate;
361 srv_p->mcr = 0;
362 srv_p->icr = 0x055cb7;
363 srv_p->tbe = 0xffffff;
364 srv_p->frtt = 0x3a;
365 srv_p->rif = 0xf;
366 srv_p->rdf = 0xb;
367 srv_p->nrm = 0x4;
368 srv_p->trm = 0x7;
369 srv_p->cdf = 0x3;
370 srv_p->adtf = 50;
371}
372
373static int
374ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
375 struct atm_vcc *vcc, u8 flag)
376{
377 f_vc_abr_entry *f_abr_vc;
378 r_vc_abr_entry *r_abr_vc;
379 u32 icr;
380 u8 trm, nrm, crm;
381 u16 adtf, air, *ptr16;
382 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
383 f_abr_vc += vcc->vci;
384 switch (flag) {
385 case 1: /* FFRED initialization */
386#if 0 /* sanity check */
387 if (srv_p->pcr == 0)
388 return INVALID_PCR;
389 if (srv_p->pcr > dev->LineRate)
390 srv_p->pcr = dev->LineRate;
391 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
392 return MCR_UNAVAILABLE;
393 if (srv_p->mcr > srv_p->pcr)
394 return INVALID_MCR;
395 if (!(srv_p->icr))
396 srv_p->icr = srv_p->pcr;
397 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
398 return INVALID_ICR;
399 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
400 return INVALID_TBE;
401 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
402 return INVALID_FRTT;
403 if (srv_p->nrm > MAX_NRM)
404 return INVALID_NRM;
405 if (srv_p->trm > MAX_TRM)
406 return INVALID_TRM;
407 if (srv_p->adtf > MAX_ADTF)
408 return INVALID_ADTF;
409 else if (srv_p->adtf == 0)
410 srv_p->adtf = 1;
411 if (srv_p->cdf > MAX_CDF)
412 return INVALID_CDF;
413 if (srv_p->rif > MAX_RIF)
414 return INVALID_RIF;
415 if (srv_p->rdf > MAX_RDF)
416 return INVALID_RDF;
417#endif
418 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
419 f_abr_vc->f_vc_type = ABR;
420 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
421 /* i.e 2**n = 2 << (n-1) */
422 f_abr_vc->f_nrm = nrm << 8 | nrm;
423 trm = 100000/(2 << (16 - srv_p->trm));
424 if ( trm == 0) trm = 1;
425 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
426 crm = srv_p->tbe / nrm;
427 if (crm == 0) crm = 1;
428 f_abr_vc->f_crm = crm & 0xff;
429 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
430 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
431 ((srv_p->tbe/srv_p->frtt)*1000000) :
432 (1000000/(srv_p->frtt/srv_p->tbe)));
433 f_abr_vc->f_icr = cellrate_to_float(icr);
434 adtf = (10000 * srv_p->adtf)/8192;
435 if (adtf == 0) adtf = 1;
436 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
437 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
438 f_abr_vc->f_acr = f_abr_vc->f_icr;
439 f_abr_vc->f_status = 0x0042;
440 break;
441 case 0: /* RFRED initialization */
442 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
443 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
444 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
445 r_abr_vc += vcc->vci;
446 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
447 air = srv_p->pcr << (15 - srv_p->rif);
448 if (air == 0) air = 1;
449 r_abr_vc->r_air = cellrate_to_float(air);
450 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
451 dev->sum_mcr += srv_p->mcr;
452 dev->n_abr++;
453 break;
454 default:
455 break;
456 }
457 return 0;
458}
459static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
460 u32 rateLow=0, rateHigh, rate;
461 int entries;
462 struct ia_vcc *ia_vcc;
463
464 int idealSlot =0, testSlot, toBeAssigned, inc;
465 u32 spacing;
466 u16 *SchedTbl, *TstSchedTbl;
467 u16 cbrVC, vcIndex;
468 u32 fracSlot = 0;
469 u32 sp_mod = 0;
470 u32 sp_mod2 = 0;
471
472 /* IpAdjustTrafficParams */
473 if (vcc->qos.txtp.max_pcr <= 0) {
474 IF_ERR(printk("PCR for CBR not defined\n");)
475 return -1;
476 }
477 rate = vcc->qos.txtp.max_pcr;
478 entries = rate / dev->Granularity;
479 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
480 entries, rate, dev->Granularity);)
481 if (entries < 1)
482 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
483 rateLow = entries * dev->Granularity;
484 rateHigh = (entries + 1) * dev->Granularity;
485 if (3*(rate - rateLow) > (rateHigh - rate))
486 entries++;
487 if (entries > dev->CbrRemEntries) {
488 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
489 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
490 entries, dev->CbrRemEntries);)
491 return -EBUSY;
492 }
493
494 ia_vcc = INPH_IA_VCC(vcc);
495 ia_vcc->NumCbrEntry = entries;
496 dev->sum_mcr += entries * dev->Granularity;
497 /* IaFFrednInsertCbrSched */
498 // Starting at an arbitrary location, place the entries into the table
499 // as smoothly as possible
500 cbrVC = 0;
501 spacing = dev->CbrTotEntries / entries;
502 sp_mod = dev->CbrTotEntries % entries; // get modulo
503 toBeAssigned = entries;
504 fracSlot = 0;
505 vcIndex = vcc->vci;
506 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
507 while (toBeAssigned)
508 {
509 // If this is the first time, start the table loading for this connection
510 // as close to entryPoint as possible.
511 if (toBeAssigned == entries)
512 {
513 idealSlot = dev->CbrEntryPt;
514 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
515 if (dev->CbrEntryPt >= dev->CbrTotEntries)
516 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
517 } else {
518 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
519 // in the table that would be smoothest
520 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
521 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
522 }
523 if (idealSlot >= (int)dev->CbrTotEntries)
524 idealSlot -= dev->CbrTotEntries;
525 // Continuously check around this ideal value until a null
526 // location is encountered.
527 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
528 inc = 0;
529 testSlot = idealSlot;
530 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
531 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
532 testSlot, TstSchedTbl,toBeAssigned);)
533 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
534 while (cbrVC) // If another VC at this location, we have to keep looking
535 {
536 inc++;
537 testSlot = idealSlot - inc;
538 if (testSlot < 0) { // Wrap if necessary
539 testSlot += dev->CbrTotEntries;
540 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
541 SchedTbl,testSlot);)
542 }
543 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
544 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
545 if (!cbrVC)
546 break;
547 testSlot = idealSlot + inc;
548 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
549 testSlot -= dev->CbrTotEntries;
550 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
551 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
552 testSlot, toBeAssigned);)
553 }
554 // set table index and read in value
555 TstSchedTbl = (u16*)(SchedTbl + testSlot);
556 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
557 TstSchedTbl,cbrVC,inc);)
558 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
559 } /* while */
560 // Move this VCI number into this location of the CBR Sched table.
561 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
562 dev->CbrRemEntries--;
563 toBeAssigned--;
564 } /* while */
565
566 /* IaFFrednCbrEnable */
567 dev->NumEnabledCBR++;
568 if (dev->NumEnabledCBR == 1) {
569 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
570 IF_CBR(printk("CBR is enabled\n");)
571 }
572 return 0;
573}
574static void ia_cbrVc_close (struct atm_vcc *vcc) {
575 IADEV *iadev;
576 u16 *SchedTbl, NullVci = 0;
577 u32 i, NumFound;
578
579 iadev = INPH_IA_DEV(vcc->dev);
580 iadev->NumEnabledCBR--;
581 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
582 if (iadev->NumEnabledCBR == 0) {
583 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
584 IF_CBR (printk("CBR support disabled\n");)
585 }
586 NumFound = 0;
587 for (i=0; i < iadev->CbrTotEntries; i++)
588 {
589 if (*SchedTbl == vcc->vci) {
590 iadev->CbrRemEntries++;
591 *SchedTbl = NullVci;
592 IF_CBR(NumFound++;)
593 }
594 SchedTbl++;
595 }
596 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
597}
598
599static int ia_avail_descs(IADEV *iadev) {
600 int tmp = 0;
601 ia_hack_tcq(iadev);
602 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
603 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
604 else
605 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
606 iadev->ffL.tcq_st) / 2;
607 return tmp;
608}
609
610static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
611
612static int ia_que_tx (IADEV *iadev) {
613 struct sk_buff *skb;
614 int num_desc;
615 struct atm_vcc *vcc;
616 num_desc = ia_avail_descs(iadev);
617
618 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
619 if (!(vcc = ATM_SKB(skb)->vcc)) {
620 dev_kfree_skb_any(skb);
621 printk("ia_que_tx: Null vcc\n");
622 break;
623 }
624 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
625 dev_kfree_skb_any(skb);
626 printk("Free the SKB on closed vci %d \n", vcc->vci);
627 break;
628 }
629 if (ia_pkt_tx (vcc, skb)) {
630 skb_queue_head(&iadev->tx_backlog, skb);
631 }
632 num_desc--;
633 }
634 return 0;
635}
636
637static void ia_tx_poll (IADEV *iadev) {
638 struct atm_vcc *vcc = NULL;
639 struct sk_buff *skb = NULL, *skb1 = NULL;
640 struct ia_vcc *iavcc;
641 IARTN_Q * rtne;
642
643 ia_hack_tcq(iadev);
644 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
645 skb = rtne->data.txskb;
646 if (!skb) {
647 printk("ia_tx_poll: skb is null\n");
648 goto out;
649 }
650 vcc = ATM_SKB(skb)->vcc;
651 if (!vcc) {
652 printk("ia_tx_poll: vcc is null\n");
653 dev_kfree_skb_any(skb);
654 goto out;
655 }
656
657 iavcc = INPH_IA_VCC(vcc);
658 if (!iavcc) {
659 printk("ia_tx_poll: iavcc is null\n");
660 dev_kfree_skb_any(skb);
661 goto out;
662 }
663
664 skb1 = skb_dequeue(&iavcc->txing_skb);
665 while (skb1 && (skb1 != skb)) {
666 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
667 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
668 }
669 IF_ERR(printk("Release the SKB not match\n");)
670 if ((vcc->pop) && (skb1->len != 0))
671 {
672 vcc->pop(vcc, skb1);
673 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
674 (long)skb1);)
675 }
676 else
677 dev_kfree_skb_any(skb1);
678 skb1 = skb_dequeue(&iavcc->txing_skb);
679 }
680 if (!skb1) {
681 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
682 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
683 break;
684 }
685 if ((vcc->pop) && (skb->len != 0))
686 {
687 vcc->pop(vcc, skb);
688 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
689 }
690 else
691 dev_kfree_skb_any(skb);
692 kfree(rtne);
693 }
694 ia_que_tx(iadev);
695out:
696 return;
697}
698#if 0
699static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
700{
701 u32 t;
702 int i;
703 /*
704 * Issue a command to enable writes to the NOVRAM
705 */
706 NVRAM_CMD (EXTEND + EWEN);
707 NVRAM_CLR_CE;
708 /*
709 * issue the write command
710 */
711 NVRAM_CMD(IAWRITE + addr);
712 /*
713 * Send the data, starting with D15, then D14, and so on for 16 bits
714 */
715 for (i=15; i>=0; i--) {
716 NVRAM_CLKOUT (val & 0x8000);
717 val <<= 1;
718 }
719 NVRAM_CLR_CE;
720 CFG_OR(NVCE);
721 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
722 while (!(t & NVDO))
723 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
724
725 NVRAM_CLR_CE;
726 /*
727 * disable writes again
728 */
729 NVRAM_CMD(EXTEND + EWDS)
730 NVRAM_CLR_CE;
731 CFG_AND(~NVDI);
732}
733#endif
734
735static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
736{
737 u_short val;
738 u32 t;
739 int i;
740 /*
741 * Read the first bit that was clocked with the falling edge of the
742 * the last command data clock
743 */
744 NVRAM_CMD(IAREAD + addr);
745 /*
746 * Now read the rest of the bits, the next bit read is D14, then D13,
747 * and so on.
748 */
749 val = 0;
750 for (i=15; i>=0; i--) {
751 NVRAM_CLKIN(t);
752 val |= (t << i);
753 }
754 NVRAM_CLR_CE;
755 CFG_AND(~NVDI);
756 return val;
757}
758
759static void ia_hw_type(IADEV *iadev) {
760 u_short memType = ia_eeprom_get(iadev, 25);
761 iadev->memType = memType;
762 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
763 iadev->num_tx_desc = IA_TX_BUF;
764 iadev->tx_buf_sz = IA_TX_BUF_SZ;
765 iadev->num_rx_desc = IA_RX_BUF;
766 iadev->rx_buf_sz = IA_RX_BUF_SZ;
767 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
768 if (IA_TX_BUF == DFL_TX_BUFFERS)
769 iadev->num_tx_desc = IA_TX_BUF / 2;
770 else
771 iadev->num_tx_desc = IA_TX_BUF;
772 iadev->tx_buf_sz = IA_TX_BUF_SZ;
773 if (IA_RX_BUF == DFL_RX_BUFFERS)
774 iadev->num_rx_desc = IA_RX_BUF / 2;
775 else
776 iadev->num_rx_desc = IA_RX_BUF;
777 iadev->rx_buf_sz = IA_RX_BUF_SZ;
778 }
779 else {
780 if (IA_TX_BUF == DFL_TX_BUFFERS)
781 iadev->num_tx_desc = IA_TX_BUF / 8;
782 else
783 iadev->num_tx_desc = IA_TX_BUF;
784 iadev->tx_buf_sz = IA_TX_BUF_SZ;
785 if (IA_RX_BUF == DFL_RX_BUFFERS)
786 iadev->num_rx_desc = IA_RX_BUF / 8;
787 else
788 iadev->num_rx_desc = IA_RX_BUF;
789 iadev->rx_buf_sz = IA_RX_BUF_SZ;
790 }
791 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
792 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
793 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
794 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
795
796#if 0
797 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
798 iadev->phy_type = PHY_OC3C_S;
799 else if ((memType & FE_MASK) == FE_UTP_OPTION)
800 iadev->phy_type = PHY_UTP155;
801 else
802 iadev->phy_type = PHY_OC3C_M;
803#endif
804
805 iadev->phy_type = memType & FE_MASK;
806 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
807 memType,iadev->phy_type);)
808 if (iadev->phy_type == FE_25MBIT_PHY)
809 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
810 else if (iadev->phy_type == FE_DS3_PHY)
811 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
812 else if (iadev->phy_type == FE_E3_PHY)
813 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
814 else
815 iadev->LineRate = (u32)(ATM_OC3_PCR);
816 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
817
818}
819
820static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
821{
822 return readl(ia->phy + (reg >> 2));
823}
824
825static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
826{
827 writel(val, ia->phy + (reg >> 2));
828}
829
830static void ia_frontend_intr(struct iadev_priv *iadev)
831{
832 u32 status;
833
834 if (iadev->phy_type & FE_25MBIT_PHY) {
835 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
836 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
837 } else if (iadev->phy_type & FE_DS3_PHY) {
838 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
839 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
840 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
841 } else if (iadev->phy_type & FE_E3_PHY) {
842 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
843 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
844 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
845 } else {
846 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
847 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
848 }
849
850 printk(KERN_INFO "IA: SUNI carrier %s\n",
851 iadev->carrier_detect ? "detected" : "lost signal");
852}
853
854static void ia_mb25_init(struct iadev_priv *iadev)
855{
856#if 0
857 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
858#endif
859 ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
860 ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
861
862 iadev->carrier_detect =
863 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
864}
865
866struct ia_reg {
867 u16 reg;
868 u16 val;
869};
870
871static void ia_phy_write(struct iadev_priv *iadev,
872 const struct ia_reg *regs, int len)
873{
874 while (len--) {
875 ia_phy_write32(iadev, regs->reg, regs->val);
876 regs++;
877 }
878}
879
880static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
881{
882 static const struct ia_reg suni_ds3_init [] = {
883 { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
884 { SUNI_DS3_FRM_CFG, 0x01 },
885 { SUNI_DS3_TRAN_CFG, 0x01 },
886 { SUNI_CONFIG, 0 },
887 { SUNI_SPLR_CFG, 0 },
888 { SUNI_SPLT_CFG, 0 }
889 };
890 u32 status;
891
892 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
893 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
894
895 ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
896}
897
898static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
899{
900 static const struct ia_reg suni_e3_init [] = {
901 { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
902 { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
903 { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
904 { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
905 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
906 { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
907 { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
908 { SUNI_SPLR_CFG, 0x41 },
909 { SUNI_SPLT_CFG, 0x41 }
910 };
911 u32 status;
912
913 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
914 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
915 ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
916}
917
918static void ia_suni_pm7345_init(struct iadev_priv *iadev)
919{
920 static const struct ia_reg suni_init [] = {
921 /* Enable RSOP loss of signal interrupt. */
922 { SUNI_INTR_ENBL, 0x28 },
923 /* Clear error counters. */
924 { SUNI_ID_RESET, 0 },
925 /* Clear "PMCTST" in master test register. */
926 { SUNI_MASTER_TEST, 0 },
927
928 { SUNI_RXCP_CTRL, 0x2c },
929 { SUNI_RXCP_FCTRL, 0x81 },
930
931 { SUNI_RXCP_IDLE_PAT_H1, 0 },
932 { SUNI_RXCP_IDLE_PAT_H2, 0 },
933 { SUNI_RXCP_IDLE_PAT_H3, 0 },
934 { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
935
936 { SUNI_RXCP_IDLE_MASK_H1, 0xff },
937 { SUNI_RXCP_IDLE_MASK_H2, 0xff },
938 { SUNI_RXCP_IDLE_MASK_H3, 0xff },
939 { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
940
941 { SUNI_RXCP_CELL_PAT_H1, 0 },
942 { SUNI_RXCP_CELL_PAT_H2, 0 },
943 { SUNI_RXCP_CELL_PAT_H3, 0 },
944 { SUNI_RXCP_CELL_PAT_H4, 0x01 },
945
946 { SUNI_RXCP_CELL_MASK_H1, 0xff },
947 { SUNI_RXCP_CELL_MASK_H2, 0xff },
948 { SUNI_RXCP_CELL_MASK_H3, 0xff },
949 { SUNI_RXCP_CELL_MASK_H4, 0xff },
950
951 { SUNI_TXCP_CTRL, 0xa4 },
952 { SUNI_TXCP_INTR_EN_STS, 0x10 },
953 { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
954 };
955
956 if (iadev->phy_type & FE_DS3_PHY)
957 ia_suni_pm7345_init_ds3(iadev);
958 else
959 ia_suni_pm7345_init_e3(iadev);
960
961 ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
962
963 ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
964 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
965 SUNI_PM7345_DLB | SUNI_PM7345_PLB));
966#ifdef __SNMP__
967 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
968#endif /* __SNMP__ */
969 return;
970}
971
972
973/***************************** IA_LIB END *****************************/
974
975#ifdef CONFIG_ATM_IA_DEBUG
976static int tcnter = 0;
977static void xdump( u_char* cp, int length, char* prefix )
978{
979 int col, count;
980 u_char prntBuf[120];
981 u_char* pBuf = prntBuf;
982 count = 0;
983 while(count < length){
984 pBuf += sprintf( pBuf, "%s", prefix );
985 for(col = 0;count + col < length && col < 16; col++){
986 if (col != 0 && (col % 4) == 0)
987 pBuf += sprintf( pBuf, " " );
988 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
989 }
990 while(col++ < 16){ /* pad end of buffer with blanks */
991 if ((col % 4) == 0)
992 sprintf( pBuf, " " );
993 pBuf += sprintf( pBuf, " " );
994 }
995 pBuf += sprintf( pBuf, " " );
996 for(col = 0;count + col < length && col < 16; col++){
997 if (isprint((int)cp[count + col]))
998 pBuf += sprintf( pBuf, "%c", cp[count + col] );
999 else
1000 pBuf += sprintf( pBuf, "." );
1001 }
1002 printk("%s\n", prntBuf);
1003 count += col;
1004 pBuf = prntBuf;
1005 }
1006
1007} /* close xdump(... */
1008#endif /* CONFIG_ATM_IA_DEBUG */
1009
1010
1011static struct atm_dev *ia_boards = NULL;
1012
1013#define ACTUAL_RAM_BASE \
1014 RAM_BASE*((iadev->mem)/(128 * 1024))
1015#define ACTUAL_SEG_RAM_BASE \
1016 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1017#define ACTUAL_REASS_RAM_BASE \
1018 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1019
1020
1021/*-- some utilities and memory allocation stuff will come here -------------*/
1022
1023static void desc_dbg(IADEV *iadev) {
1024
1025 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1026 u32 i;
1027 void __iomem *tmp;
1028 // regval = readl((u32)ia_cmds->maddr);
1029 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1030 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1031 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1032 readw(iadev->seg_ram+tcq_wr_ptr-2));
1033 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1034 iadev->ffL.tcq_rd);
1035 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1036 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1037 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1038 i = 0;
1039 while (tcq_st_ptr != tcq_ed_ptr) {
1040 tmp = iadev->seg_ram+tcq_st_ptr;
1041 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1042 tcq_st_ptr += 2;
1043 }
1044 for(i=0; i <iadev->num_tx_desc; i++)
1045 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1046}
1047
1048
1049/*----------------------------- Receiving side stuff --------------------------*/
1050
1051static void rx_excp_rcvd(struct atm_dev *dev)
1052{
1053#if 0 /* closing the receiving size will cause too many excp int */
1054 IADEV *iadev;
1055 u_short state;
1056 u_short excpq_rd_ptr;
1057 //u_short *ptr;
1058 int vci, error = 1;
1059 iadev = INPH_IA_DEV(dev);
1060 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1061 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1062 { printk("state = %x \n", state);
1063 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1064 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1065 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1066 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1067 // TODO: update exception stat
1068 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1069 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1070 // pwang_test
1071 excpq_rd_ptr += 4;
1072 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1073 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1074 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1075 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1076 }
1077#endif
1078}
1079
1080static void free_desc(struct atm_dev *dev, int desc)
1081{
1082 IADEV *iadev;
1083 iadev = INPH_IA_DEV(dev);
1084 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1085 iadev->rfL.fdq_wr +=2;
1086 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1087 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1088 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1089}
1090
1091
1092static int rx_pkt(struct atm_dev *dev)
1093{
1094 IADEV *iadev;
1095 struct atm_vcc *vcc;
1096 unsigned short status;
1097 struct rx_buf_desc __iomem *buf_desc_ptr;
1098 int desc;
1099 struct dle* wr_ptr;
1100 int len;
1101 struct sk_buff *skb;
1102 u_int buf_addr, dma_addr;
1103
1104 iadev = INPH_IA_DEV(dev);
1105 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1106 {
1107 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1108 return -EINVAL;
1109 }
1110 /* mask 1st 3 bits to get the actual descno. */
1111 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1112 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1113 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1114 printk(" pcq_wr_ptr = 0x%x\n",
1115 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1116 /* update the read pointer - maybe we shud do this in the end*/
1117 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1118 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1119 else
1120 iadev->rfL.pcq_rd += 2;
1121 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1122
1123 /* get the buffer desc entry.
1124 update stuff. - doesn't seem to be any update necessary
1125 */
1126 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1127 /* make the ptr point to the corresponding buffer desc entry */
1128 buf_desc_ptr += desc;
1129 if (!desc || (desc > iadev->num_rx_desc) ||
1130 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1131 free_desc(dev, desc);
1132 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1133 return -1;
1134 }
1135 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1136 if (!vcc)
1137 {
1138 free_desc(dev, desc);
1139 printk("IA: null vcc, drop PDU\n");
1140 return -1;
1141 }
1142
1143
1144 /* might want to check the status bits for errors */
1145 status = (u_short) (buf_desc_ptr->desc_mode);
1146 if (status & (RX_CER | RX_PTE | RX_OFL))
1147 {
1148 atomic_inc(&vcc->stats->rx_err);
1149 IF_ERR(printk("IA: bad packet, dropping it");)
1150 if (status & RX_CER) {
1151 IF_ERR(printk(" cause: packet CRC error\n");)
1152 }
1153 else if (status & RX_PTE) {
1154 IF_ERR(printk(" cause: packet time out\n");)
1155 }
1156 else {
1157 IF_ERR(printk(" cause: buffer overflow\n");)
1158 }
1159 goto out_free_desc;
1160 }
1161
1162 /*
1163 build DLE.
1164 */
1165
1166 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1167 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1168 len = dma_addr - buf_addr;
1169 if (len > iadev->rx_buf_sz) {
1170 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1171 atomic_inc(&vcc->stats->rx_err);
1172 goto out_free_desc;
1173 }
1174
1175 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1176 if (vcc->vci < 32)
1177 printk("Drop control packets\n");
1178 goto out_free_desc;
1179 }
1180 skb_put(skb,len);
1181 // pwang_test
1182 ATM_SKB(skb)->vcc = vcc;
1183 ATM_DESC(skb) = desc;
1184 skb_queue_tail(&iadev->rx_dma_q, skb);
1185
1186 /* Build the DLE structure */
1187 wr_ptr = iadev->rx_dle_q.write;
1188 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1189 len, PCI_DMA_FROMDEVICE);
1190 wr_ptr->local_pkt_addr = buf_addr;
1191 wr_ptr->bytes = len; /* We don't know this do we ?? */
1192 wr_ptr->mode = DMA_INT_ENABLE;
1193
1194 /* shud take care of wrap around here too. */
1195 if(++wr_ptr == iadev->rx_dle_q.end)
1196 wr_ptr = iadev->rx_dle_q.start;
1197 iadev->rx_dle_q.write = wr_ptr;
1198 udelay(1);
1199 /* Increment transaction counter */
1200 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1201out: return 0;
1202out_free_desc:
1203 free_desc(dev, desc);
1204 goto out;
1205}
1206
1207static void rx_intr(struct atm_dev *dev)
1208{
1209 IADEV *iadev;
1210 u_short status;
1211 u_short state, i;
1212
1213 iadev = INPH_IA_DEV(dev);
1214 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1215 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1216 if (status & RX_PKT_RCVD)
1217 {
1218 /* do something */
1219 /* Basically recvd an interrupt for receiving a packet.
1220 A descriptor would have been written to the packet complete
1221 queue. Get all the descriptors and set up dma to move the
1222 packets till the packet complete queue is empty..
1223 */
1224 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1225 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1226 while(!(state & PCQ_EMPTY))
1227 {
1228 rx_pkt(dev);
1229 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1230 }
1231 iadev->rxing = 1;
1232 }
1233 if (status & RX_FREEQ_EMPT)
1234 {
1235 if (iadev->rxing) {
1236 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1237 iadev->rx_tmp_jif = jiffies;
1238 iadev->rxing = 0;
1239 }
1240 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1241 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1242 for (i = 1; i <= iadev->num_rx_desc; i++)
1243 free_desc(dev, i);
1244printk("Test logic RUN!!!!\n");
1245 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1246 iadev->rxing = 1;
1247 }
1248 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1249 }
1250
1251 if (status & RX_EXCP_RCVD)
1252 {
1253 /* probably need to handle the exception queue also. */
1254 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1255 rx_excp_rcvd(dev);
1256 }
1257
1258
1259 if (status & RX_RAW_RCVD)
1260 {
1261 /* need to handle the raw incoming cells. This deepnds on
1262 whether we have programmed to receive the raw cells or not.
1263 Else ignore. */
1264 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1265 }
1266}
1267
1268
1269static void rx_dle_intr(struct atm_dev *dev)
1270{
1271 IADEV *iadev;
1272 struct atm_vcc *vcc;
1273 struct sk_buff *skb;
1274 int desc;
1275 u_short state;
1276 struct dle *dle, *cur_dle;
1277 u_int dle_lp;
1278 int len;
1279 iadev = INPH_IA_DEV(dev);
1280
1281 /* free all the dles done, that is just update our own dle read pointer
1282 - do we really need to do this. Think not. */
1283 /* DMA is done, just get all the recevie buffers from the rx dma queue
1284 and push them up to the higher layer protocol. Also free the desc
1285 associated with the buffer. */
1286 dle = iadev->rx_dle_q.read;
1287 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1288 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1289 while(dle != cur_dle)
1290 {
1291 /* free the DMAed skb */
1292 skb = skb_dequeue(&iadev->rx_dma_q);
1293 if (!skb)
1294 goto INCR_DLE;
1295 desc = ATM_DESC(skb);
1296 free_desc(dev, desc);
1297
1298 if (!(len = skb->len))
1299 {
1300 printk("rx_dle_intr: skb len 0\n");
1301 dev_kfree_skb_any(skb);
1302 }
1303 else
1304 {
1305 struct cpcs_trailer *trailer;
1306 u_short length;
1307 struct ia_vcc *ia_vcc;
1308
1309 pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1310 len, PCI_DMA_FROMDEVICE);
1311 /* no VCC related housekeeping done as yet. lets see */
1312 vcc = ATM_SKB(skb)->vcc;
1313 if (!vcc) {
1314 printk("IA: null vcc\n");
1315 dev_kfree_skb_any(skb);
1316 goto INCR_DLE;
1317 }
1318 ia_vcc = INPH_IA_VCC(vcc);
1319 if (ia_vcc == NULL)
1320 {
1321 atomic_inc(&vcc->stats->rx_err);
1322 atm_return(vcc, skb->truesize);
1323 dev_kfree_skb_any(skb);
1324 goto INCR_DLE;
1325 }
1326 // get real pkt length pwang_test
1327 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1328 skb->len - sizeof(*trailer));
1329 length = swap_byte_order(trailer->length);
1330 if ((length > iadev->rx_buf_sz) || (length >
1331 (skb->len - sizeof(struct cpcs_trailer))))
1332 {
1333 atomic_inc(&vcc->stats->rx_err);
1334 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1335 length, skb->len);)
1336 atm_return(vcc, skb->truesize);
1337 dev_kfree_skb_any(skb);
1338 goto INCR_DLE;
1339 }
1340 skb_trim(skb, length);
1341
1342 /* Display the packet */
1343 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1344 xdump(skb->data, skb->len, "RX: ");
1345 printk("\n");)
1346
1347 IF_RX(printk("rx_dle_intr: skb push");)
1348 vcc->push(vcc,skb);
1349 atomic_inc(&vcc->stats->rx);
1350 iadev->rx_pkt_cnt++;
1351 }
1352INCR_DLE:
1353 if (++dle == iadev->rx_dle_q.end)
1354 dle = iadev->rx_dle_q.start;
1355 }
1356 iadev->rx_dle_q.read = dle;
1357
1358 /* if the interrupts are masked because there were no free desc available,
1359 unmask them now. */
1360 if (!iadev->rxing) {
1361 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1362 if (!(state & FREEQ_EMPTY)) {
1363 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1364 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1365 iadev->reass_reg+REASS_MASK_REG);
1366 iadev->rxing++;
1367 }
1368 }
1369}
1370
1371
1372static int open_rx(struct atm_vcc *vcc)
1373{
1374 IADEV *iadev;
1375 u_short __iomem *vc_table;
1376 u_short __iomem *reass_ptr;
1377 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1378
1379 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1380 iadev = INPH_IA_DEV(vcc->dev);
1381 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1382 if (iadev->phy_type & FE_25MBIT_PHY) {
1383 printk("IA: ABR not support\n");
1384 return -EINVAL;
1385 }
1386 }
1387 /* Make only this VCI in the vc table valid and let all
1388 others be invalid entries */
1389 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1390 vc_table += vcc->vci;
1391 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1392
1393 *vc_table = vcc->vci << 6;
1394 /* Also keep a list of open rx vcs so that we can attach them with
1395 incoming PDUs later. */
1396 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1397 (vcc->qos.txtp.traffic_class == ATM_ABR))
1398 {
1399 srv_cls_param_t srv_p;
1400 init_abr_vc(iadev, &srv_p);
1401 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1402 }
1403 else { /* for UBR later may need to add CBR logic */
1404 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1405 reass_ptr += vcc->vci;
1406 *reass_ptr = NO_AAL5_PKT;
1407 }
1408
1409 if (iadev->rx_open[vcc->vci])
1410 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1411 vcc->dev->number, vcc->vci);
1412 iadev->rx_open[vcc->vci] = vcc;
1413 return 0;
1414}
1415
1416static int rx_init(struct atm_dev *dev)
1417{
1418 IADEV *iadev;
1419 struct rx_buf_desc __iomem *buf_desc_ptr;
1420 unsigned long rx_pkt_start = 0;
1421 void *dle_addr;
1422 struct abr_vc_table *abr_vc_table;
1423 u16 *vc_table;
1424 u16 *reass_table;
1425 int i,j, vcsize_sel;
1426 u_short freeq_st_adr;
1427 u_short *freeq_start;
1428
1429 iadev = INPH_IA_DEV(dev);
1430 // spin_lock_init(&iadev->rx_lock);
1431
1432 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1433 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1434 &iadev->rx_dle_dma);
1435 if (!dle_addr) {
1436 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1437 goto err_out;
1438 }
1439 iadev->rx_dle_q.start = (struct dle *)dle_addr;
1440 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1441 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1442 iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1443 /* the end of the dle q points to the entry after the last
1444 DLE that can be used. */
1445
1446 /* write the upper 20 bits of the start address to rx list address register */
1447 /* We know this is 32bit bus addressed so the following is safe */
1448 writel(iadev->rx_dle_dma & 0xfffff000,
1449 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1450 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1451 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1452 readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1453 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1454 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1455 readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1456
1457 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1458 writew(0, iadev->reass_reg+MODE_REG);
1459 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1460
1461 /* Receive side control memory map
1462 -------------------------------
1463
1464 Buffer descr 0x0000 (736 - 23K)
1465 VP Table 0x5c00 (256 - 512)
1466 Except q 0x5e00 (128 - 512)
1467 Free buffer q 0x6000 (1K - 2K)
1468 Packet comp q 0x6800 (1K - 2K)
1469 Reass Table 0x7000 (1K - 2K)
1470 VC Table 0x7800 (1K - 2K)
1471 ABR VC Table 0x8000 (1K - 32K)
1472 */
1473
1474 /* Base address for Buffer Descriptor Table */
1475 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1476 /* Set the buffer size register */
1477 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1478
1479 /* Initialize each entry in the Buffer Descriptor Table */
1480 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1481 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1482 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1483 buf_desc_ptr++;
1484 rx_pkt_start = iadev->rx_pkt_ram;
1485 for(i=1; i<=iadev->num_rx_desc; i++)
1486 {
1487 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1488 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1489 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1490 buf_desc_ptr++;
1491 rx_pkt_start += iadev->rx_buf_sz;
1492 }
1493 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1494 i = FREE_BUF_DESC_Q*iadev->memSize;
1495 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1496 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1497 writew(i+iadev->num_rx_desc*sizeof(u_short),
1498 iadev->reass_reg+FREEQ_ED_ADR);
1499 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1500 writew(i+iadev->num_rx_desc*sizeof(u_short),
1501 iadev->reass_reg+FREEQ_WR_PTR);
1502 /* Fill the FREEQ with all the free descriptors. */
1503 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1504 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1505 for(i=1; i<=iadev->num_rx_desc; i++)
1506 {
1507 *freeq_start = (u_short)i;
1508 freeq_start++;
1509 }
1510 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1511 /* Packet Complete Queue */
1512 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1513 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1514 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1515 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1516 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1517
1518 /* Exception Queue */
1519 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1520 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1521 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1522 iadev->reass_reg+EXCP_Q_ED_ADR);
1523 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1524 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1525
1526 /* Load local copy of FREEQ and PCQ ptrs */
1527 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1528 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1529 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1530 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1531 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1532 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1533 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1534 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1535
1536 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1537 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1538 iadev->rfL.pcq_wr);)
1539 /* just for check - no VP TBL */
1540 /* VP Table */
1541 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1542 /* initialize VP Table for invalid VPIs
1543 - I guess we can write all 1s or 0x000f in the entire memory
1544 space or something similar.
1545 */
1546
1547 /* This seems to work and looks right to me too !!! */
1548 i = REASS_TABLE * iadev->memSize;
1549 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1550 /* initialize Reassembly table to I don't know what ???? */
1551 reass_table = (u16 *)(iadev->reass_ram+i);
1552 j = REASS_TABLE_SZ * iadev->memSize;
1553 for(i=0; i < j; i++)
1554 *reass_table++ = NO_AAL5_PKT;
1555 i = 8*1024;
1556 vcsize_sel = 0;
1557 while (i != iadev->num_vc) {
1558 i /= 2;
1559 vcsize_sel++;
1560 }
1561 i = RX_VC_TABLE * iadev->memSize;
1562 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1563 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1564 j = RX_VC_TABLE_SZ * iadev->memSize;
1565 for(i = 0; i < j; i++)
1566 {
1567 /* shift the reassembly pointer by 3 + lower 3 bits of
1568 vc_lkup_base register (=3 for 1K VCs) and the last byte
1569 is those low 3 bits.
1570 Shall program this later.
1571 */
1572 *vc_table = (i << 6) | 15; /* for invalid VCI */
1573 vc_table++;
1574 }
1575 /* ABR VC table */
1576 i = ABR_VC_TABLE * iadev->memSize;
1577 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1578
1579 i = ABR_VC_TABLE * iadev->memSize;
1580 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1581 j = REASS_TABLE_SZ * iadev->memSize;
1582 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1583 for(i = 0; i < j; i++) {
1584 abr_vc_table->rdf = 0x0003;
1585 abr_vc_table->air = 0x5eb1;
1586 abr_vc_table++;
1587 }
1588
1589 /* Initialize other registers */
1590
1591 /* VP Filter Register set for VC Reassembly only */
1592 writew(0xff00, iadev->reass_reg+VP_FILTER);
1593 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1594 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1595
1596 /* Packet Timeout Count related Registers :
1597 Set packet timeout to occur in about 3 seconds
1598 Set Packet Aging Interval count register to overflow in about 4 us
1599 */
1600 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1601
1602 i = (j >> 6) & 0xFF;
1603 j += 2 * (j - 1);
1604 i |= ((j << 2) & 0xFF00);
1605 writew(i, iadev->reass_reg+TMOUT_RANGE);
1606
1607 /* initiate the desc_tble */
1608 for(i=0; i<iadev->num_tx_desc;i++)
1609 iadev->desc_tbl[i].timestamp = 0;
1610
1611 /* to clear the interrupt status register - read it */
1612 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1613
1614 /* Mask Register - clear it */
1615 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1616
1617 skb_queue_head_init(&iadev->rx_dma_q);
1618 iadev->rx_free_desc_qhead = NULL;
1619
1620 iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1621 if (!iadev->rx_open) {
1622 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1623 dev->number);
1624 goto err_free_dle;
1625 }
1626
1627 iadev->rxing = 1;
1628 iadev->rx_pkt_cnt = 0;
1629 /* Mode Register */
1630 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1631 return 0;
1632
1633err_free_dle:
1634 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1635 iadev->rx_dle_dma);
1636err_out:
1637 return -ENOMEM;
1638}
1639
1640
1641/*
1642 The memory map suggested in appendix A and the coding for it.
1643 Keeping it around just in case we change our mind later.
1644
1645 Buffer descr 0x0000 (128 - 4K)
1646 UBR sched 0x1000 (1K - 4K)
1647 UBR Wait q 0x2000 (1K - 4K)
1648 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1649 (128 - 256) each
1650 extended VC 0x4000 (1K - 8K)
1651 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1652 CBR sched 0x7000 (as needed)
1653 VC table 0x8000 (1K - 32K)
1654*/
1655
1656static void tx_intr(struct atm_dev *dev)
1657{
1658 IADEV *iadev;
1659 unsigned short status;
1660 unsigned long flags;
1661
1662 iadev = INPH_IA_DEV(dev);
1663
1664 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1665 if (status & TRANSMIT_DONE){
1666
1667 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1668 spin_lock_irqsave(&iadev->tx_lock, flags);
1669 ia_tx_poll(iadev);
1670 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1671 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1672 if (iadev->close_pending)
1673 wake_up(&iadev->close_wait);
1674 }
1675 if (status & TCQ_NOT_EMPTY)
1676 {
1677 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1678 }
1679}
1680
1681static void tx_dle_intr(struct atm_dev *dev)
1682{
1683 IADEV *iadev;
1684 struct dle *dle, *cur_dle;
1685 struct sk_buff *skb;
1686 struct atm_vcc *vcc;
1687 struct ia_vcc *iavcc;
1688 u_int dle_lp;
1689 unsigned long flags;
1690
1691 iadev = INPH_IA_DEV(dev);
1692 spin_lock_irqsave(&iadev->tx_lock, flags);
1693 dle = iadev->tx_dle_q.read;
1694 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1695 (sizeof(struct dle)*DLE_ENTRIES - 1);
1696 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1697 while (dle != cur_dle)
1698 {
1699 /* free the DMAed skb */
1700 skb = skb_dequeue(&iadev->tx_dma_q);
1701 if (!skb) break;
1702
1703 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1704 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1705 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1706 PCI_DMA_TODEVICE);
1707 }
1708 vcc = ATM_SKB(skb)->vcc;
1709 if (!vcc) {
1710 printk("tx_dle_intr: vcc is null\n");
1711 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1712 dev_kfree_skb_any(skb);
1713
1714 return;
1715 }
1716 iavcc = INPH_IA_VCC(vcc);
1717 if (!iavcc) {
1718 printk("tx_dle_intr: iavcc is null\n");
1719 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1720 dev_kfree_skb_any(skb);
1721 return;
1722 }
1723 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1724 if ((vcc->pop) && (skb->len != 0))
1725 {
1726 vcc->pop(vcc, skb);
1727 }
1728 else {
1729 dev_kfree_skb_any(skb);
1730 }
1731 }
1732 else { /* Hold the rate-limited skb for flow control */
1733 IA_SKB_STATE(skb) |= IA_DLED;
1734 skb_queue_tail(&iavcc->txing_skb, skb);
1735 }
1736 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1737 if (++dle == iadev->tx_dle_q.end)
1738 dle = iadev->tx_dle_q.start;
1739 }
1740 iadev->tx_dle_q.read = dle;
1741 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1742}
1743
1744static int open_tx(struct atm_vcc *vcc)
1745{
1746 struct ia_vcc *ia_vcc;
1747 IADEV *iadev;
1748 struct main_vc *vc;
1749 struct ext_vc *evc;
1750 int ret;
1751 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1752 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1753 iadev = INPH_IA_DEV(vcc->dev);
1754
1755 if (iadev->phy_type & FE_25MBIT_PHY) {
1756 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1757 printk("IA: ABR not support\n");
1758 return -EINVAL;
1759 }
1760 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1761 printk("IA: CBR not support\n");
1762 return -EINVAL;
1763 }
1764 }
1765 ia_vcc = INPH_IA_VCC(vcc);
1766 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1767 if (vcc->qos.txtp.max_sdu >
1768 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1769 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1770 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1771 vcc->dev_data = NULL;
1772 kfree(ia_vcc);
1773 return -EINVAL;
1774 }
1775 ia_vcc->vc_desc_cnt = 0;
1776 ia_vcc->txing = 1;
1777
1778 /* find pcr */
1779 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1780 vcc->qos.txtp.pcr = iadev->LineRate;
1781 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1782 vcc->qos.txtp.pcr = iadev->LineRate;
1783 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1784 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1785 if (vcc->qos.txtp.pcr > iadev->LineRate)
1786 vcc->qos.txtp.pcr = iadev->LineRate;
1787 ia_vcc->pcr = vcc->qos.txtp.pcr;
1788
1789 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1790 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1791 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1792 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1793 if (ia_vcc->pcr < iadev->rate_limit)
1794 skb_queue_head_init (&ia_vcc->txing_skb);
1795 if (ia_vcc->pcr < iadev->rate_limit) {
1796 struct sock *sk = sk_atm(vcc);
1797
1798 if (vcc->qos.txtp.max_sdu != 0) {
1799 if (ia_vcc->pcr > 60000)
1800 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1801 else if (ia_vcc->pcr > 2000)
1802 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1803 else
1804 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1805 }
1806 else
1807 sk->sk_sndbuf = 24576;
1808 }
1809
1810 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1811 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1812 vc += vcc->vci;
1813 evc += vcc->vci;
1814 memset((caddr_t)vc, 0, sizeof(*vc));
1815 memset((caddr_t)evc, 0, sizeof(*evc));
1816
1817 /* store the most significant 4 bits of vci as the last 4 bits
1818 of first part of atm header.
1819 store the last 12 bits of vci as first 12 bits of the second
1820 part of the atm header.
1821 */
1822 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1823 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1824
1825 /* check the following for different traffic classes */
1826 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1827 {
1828 vc->type = UBR;
1829 vc->status = CRC_APPEND;
1830 vc->acr = cellrate_to_float(iadev->LineRate);
1831 if (vcc->qos.txtp.pcr > 0)
1832 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1833 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1834 vcc->qos.txtp.max_pcr,vc->acr);)
1835 }
1836 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1837 { srv_cls_param_t srv_p;
1838 IF_ABR(printk("Tx ABR VCC\n");)
1839 init_abr_vc(iadev, &srv_p);
1840 if (vcc->qos.txtp.pcr > 0)
1841 srv_p.pcr = vcc->qos.txtp.pcr;
1842 if (vcc->qos.txtp.min_pcr > 0) {
1843 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1844 if (tmpsum > iadev->LineRate)
1845 return -EBUSY;
1846 srv_p.mcr = vcc->qos.txtp.min_pcr;
1847 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1848 }
1849 else srv_p.mcr = 0;
1850 if (vcc->qos.txtp.icr)
1851 srv_p.icr = vcc->qos.txtp.icr;
1852 if (vcc->qos.txtp.tbe)
1853 srv_p.tbe = vcc->qos.txtp.tbe;
1854 if (vcc->qos.txtp.frtt)
1855 srv_p.frtt = vcc->qos.txtp.frtt;
1856 if (vcc->qos.txtp.rif)
1857 srv_p.rif = vcc->qos.txtp.rif;
1858 if (vcc->qos.txtp.rdf)
1859 srv_p.rdf = vcc->qos.txtp.rdf;
1860 if (vcc->qos.txtp.nrm_pres)
1861 srv_p.nrm = vcc->qos.txtp.nrm;
1862 if (vcc->qos.txtp.trm_pres)
1863 srv_p.trm = vcc->qos.txtp.trm;
1864 if (vcc->qos.txtp.adtf_pres)
1865 srv_p.adtf = vcc->qos.txtp.adtf;
1866 if (vcc->qos.txtp.cdf_pres)
1867 srv_p.cdf = vcc->qos.txtp.cdf;
1868 if (srv_p.icr > srv_p.pcr)
1869 srv_p.icr = srv_p.pcr;
1870 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1871 srv_p.pcr, srv_p.mcr);)
1872 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1873 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1874 if (iadev->phy_type & FE_25MBIT_PHY) {
1875 printk("IA: CBR not support\n");
1876 return -EINVAL;
1877 }
1878 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1879 IF_CBR(printk("PCR is not available\n");)
1880 return -1;
1881 }
1882 vc->type = CBR;
1883 vc->status = CRC_APPEND;
1884 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1885 return ret;
1886 }
1887 }
1888 else
1889 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1890
1891 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1892 IF_EVENT(printk("ia open_tx returning \n");)
1893 return 0;
1894}
1895
1896
1897static int tx_init(struct atm_dev *dev)
1898{
1899 IADEV *iadev;
1900 struct tx_buf_desc *buf_desc_ptr;
1901 unsigned int tx_pkt_start;
1902 void *dle_addr;
1903 int i;
1904 u_short tcq_st_adr;
1905 u_short *tcq_start;
1906 u_short prq_st_adr;
1907 u_short *prq_start;
1908 struct main_vc *vc;
1909 struct ext_vc *evc;
1910 u_short tmp16;
1911 u32 vcsize_sel;
1912
1913 iadev = INPH_IA_DEV(dev);
1914 spin_lock_init(&iadev->tx_lock);
1915
1916 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1917 readw(iadev->seg_reg+SEG_MASK_REG));)
1918
1919 /* Allocate 4k (boundary aligned) bytes */
1920 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1921 &iadev->tx_dle_dma);
1922 if (!dle_addr) {
1923 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1924 goto err_out;
1925 }
1926 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1927 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1928 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1929 iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1930
1931 /* write the upper 20 bits of the start address to tx list address register */
1932 writel(iadev->tx_dle_dma & 0xfffff000,
1933 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1934 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1935 writew(0, iadev->seg_reg+MODE_REG_0);
1936 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1937 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1938 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1939 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1940
1941 /*
1942 Transmit side control memory map
1943 --------------------------------
1944 Buffer descr 0x0000 (128 - 4K)
1945 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1946 (512 - 1K) each
1947 TCQ - 4K, PRQ - 5K
1948 CBR Table 0x1800 (as needed) - 6K
1949 UBR Table 0x3000 (1K - 4K) - 12K
1950 UBR Wait queue 0x4000 (1K - 4K) - 16K
1951 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1952 ABR Tbl - 20K, ABR Wq - 22K
1953 extended VC 0x6000 (1K - 8K) - 24K
1954 VC Table 0x8000 (1K - 32K) - 32K
1955
1956 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1957 and Wait q, which can be allotted later.
1958 */
1959
1960 /* Buffer Descriptor Table Base address */
1961 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1962
1963 /* initialize each entry in the buffer descriptor table */
1964 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1965 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1966 buf_desc_ptr++;
1967 tx_pkt_start = TX_PACKET_RAM;
1968 for(i=1; i<=iadev->num_tx_desc; i++)
1969 {
1970 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1971 buf_desc_ptr->desc_mode = AAL5;
1972 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1973 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1974 buf_desc_ptr++;
1975 tx_pkt_start += iadev->tx_buf_sz;
1976 }
1977 iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1978 if (!iadev->tx_buf) {
1979 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1980 goto err_free_dle;
1981 }
1982 for (i= 0; i< iadev->num_tx_desc; i++)
1983 {
1984 struct cpcs_trailer *cpcs;
1985
1986 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1987 if(!cpcs) {
1988 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1989 goto err_free_tx_bufs;
1990 }
1991 iadev->tx_buf[i].cpcs = cpcs;
1992 iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1993 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1994 }
1995 iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1996 sizeof(struct desc_tbl_t), GFP_KERNEL);
1997 if (!iadev->desc_tbl) {
1998 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1999 goto err_free_all_tx_bufs;
2000 }
2001
2002 /* Communication Queues base address */
2003 i = TX_COMP_Q * iadev->memSize;
2004 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2005
2006 /* Transmit Complete Queue */
2007 writew(i, iadev->seg_reg+TCQ_ST_ADR);
2008 writew(i, iadev->seg_reg+TCQ_RD_PTR);
2009 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2010 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2011 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2012 iadev->seg_reg+TCQ_ED_ADR);
2013 /* Fill the TCQ with all the free descriptors. */
2014 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2015 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2016 for(i=1; i<=iadev->num_tx_desc; i++)
2017 {
2018 *tcq_start = (u_short)i;
2019 tcq_start++;
2020 }
2021
2022 /* Packet Ready Queue */
2023 i = PKT_RDY_Q * iadev->memSize;
2024 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2025 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2026 iadev->seg_reg+PRQ_ED_ADR);
2027 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2028 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2029
2030 /* Load local copy of PRQ and TCQ ptrs */
2031 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2032 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2033 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2034
2035 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2036 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2037 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2038
2039 /* Just for safety initializing the queue to have desc 1 always */
2040 /* Fill the PRQ with all the free descriptors. */
2041 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2042 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2043 for(i=1; i<=iadev->num_tx_desc; i++)
2044 {
2045 *prq_start = (u_short)0; /* desc 1 in all entries */
2046 prq_start++;
2047 }
2048 /* CBR Table */
2049 IF_INIT(printk("Start CBR Init\n");)
2050#if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2051 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2052#else /* Charlie's logic is wrong ? */
2053 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2054 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2055 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2056#endif
2057
2058 IF_INIT(printk("value in register = 0x%x\n",
2059 readw(iadev->seg_reg+CBR_PTR_BASE));)
2060 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2061 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2062 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2063 readw(iadev->seg_reg+CBR_TAB_BEG));)
2064 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2065 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2066 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2067 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2068 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2069 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2070 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2071 readw(iadev->seg_reg+CBR_TAB_END+1));)
2072
2073 /* Initialize the CBR Schedualing Table */
2074 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2075 0, iadev->num_vc*6);
2076 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2077 iadev->CbrEntryPt = 0;
2078 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2079 iadev->NumEnabledCBR = 0;
2080
2081 /* UBR scheduling Table and wait queue */
2082 /* initialize all bytes of UBR scheduler table and wait queue to 0
2083 - SCHEDSZ is 1K (# of entries).
2084 - UBR Table size is 4K
2085 - UBR wait queue is 4K
2086 since the table and wait queues are contiguous, all the bytes
2087 can be initialized by one memeset.
2088 */
2089
2090 vcsize_sel = 0;
2091 i = 8*1024;
2092 while (i != iadev->num_vc) {
2093 i /= 2;
2094 vcsize_sel++;
2095 }
2096
2097 i = MAIN_VC_TABLE * iadev->memSize;
2098 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2099 i = EXT_VC_TABLE * iadev->memSize;
2100 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2101 i = UBR_SCHED_TABLE * iadev->memSize;
2102 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2103 i = UBR_WAIT_Q * iadev->memSize;
2104 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2105 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2106 0, iadev->num_vc*8);
2107 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2108 /* initialize all bytes of ABR scheduler table and wait queue to 0
2109 - SCHEDSZ is 1K (# of entries).
2110 - ABR Table size is 2K
2111 - ABR wait queue is 2K
2112 since the table and wait queues are contiguous, all the bytes
2113 can be initialized by one memeset.
2114 */
2115 i = ABR_SCHED_TABLE * iadev->memSize;
2116 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2117 i = ABR_WAIT_Q * iadev->memSize;
2118 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2119
2120 i = ABR_SCHED_TABLE*iadev->memSize;
2121 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2122 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2123 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2124 iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2125 if (!iadev->testTable) {
2126 printk("Get freepage failed\n");
2127 goto err_free_desc_tbl;
2128 }
2129 for(i=0; i<iadev->num_vc; i++)
2130 {
2131 memset((caddr_t)vc, 0, sizeof(*vc));
2132 memset((caddr_t)evc, 0, sizeof(*evc));
2133 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2134 GFP_KERNEL);
2135 if (!iadev->testTable[i])
2136 goto err_free_test_tables;
2137 iadev->testTable[i]->lastTime = 0;
2138 iadev->testTable[i]->fract = 0;
2139 iadev->testTable[i]->vc_status = VC_UBR;
2140 vc++;
2141 evc++;
2142 }
2143
2144 /* Other Initialization */
2145
2146 /* Max Rate Register */
2147 if (iadev->phy_type & FE_25MBIT_PHY) {
2148 writew(RATE25, iadev->seg_reg+MAXRATE);
2149 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2150 }
2151 else {
2152 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2153 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2154 }
2155 /* Set Idle Header Reigisters to be sure */
2156 writew(0, iadev->seg_reg+IDLEHEADHI);
2157 writew(0, iadev->seg_reg+IDLEHEADLO);
2158
2159 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2160 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2161
2162 iadev->close_pending = 0;
2163 init_waitqueue_head(&iadev->close_wait);
2164 init_waitqueue_head(&iadev->timeout_wait);
2165 skb_queue_head_init(&iadev->tx_dma_q);
2166 ia_init_rtn_q(&iadev->tx_return_q);
2167
2168 /* RM Cell Protocol ID and Message Type */
2169 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2170 skb_queue_head_init (&iadev->tx_backlog);
2171
2172 /* Mode Register 1 */
2173 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2174
2175 /* Mode Register 0 */
2176 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2177
2178 /* Interrupt Status Register - read to clear */
2179 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2180
2181 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2182 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2183 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2184 iadev->tx_pkt_cnt = 0;
2185 iadev->rate_limit = iadev->LineRate / 3;
2186
2187 return 0;
2188
2189err_free_test_tables:
2190 while (--i >= 0)
2191 kfree(iadev->testTable[i]);
2192 kfree(iadev->testTable);
2193err_free_desc_tbl:
2194 kfree(iadev->desc_tbl);
2195err_free_all_tx_bufs:
2196 i = iadev->num_tx_desc;
2197err_free_tx_bufs:
2198 while (--i >= 0) {
2199 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2200
2201 pci_unmap_single(iadev->pci, desc->dma_addr,
2202 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2203 kfree(desc->cpcs);
2204 }
2205 kfree(iadev->tx_buf);
2206err_free_dle:
2207 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2208 iadev->tx_dle_dma);
2209err_out:
2210 return -ENOMEM;
2211}
2212
2213static irqreturn_t ia_int(int irq, void *dev_id)
2214{
2215 struct atm_dev *dev;
2216 IADEV *iadev;
2217 unsigned int status;
2218 int handled = 0;
2219
2220 dev = dev_id;
2221 iadev = INPH_IA_DEV(dev);
2222 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2223 {
2224 handled = 1;
2225 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2226 if (status & STAT_REASSINT)
2227 {
2228 /* do something */
2229 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2230 rx_intr(dev);
2231 }
2232 if (status & STAT_DLERINT)
2233 {
2234 /* Clear this bit by writing a 1 to it. */
2235 writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2236 rx_dle_intr(dev);
2237 }
2238 if (status & STAT_SEGINT)
2239 {
2240 /* do something */
2241 IF_EVENT(printk("IA: tx_intr \n");)
2242 tx_intr(dev);
2243 }
2244 if (status & STAT_DLETINT)
2245 {
2246 writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2247 tx_dle_intr(dev);
2248 }
2249 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2250 {
2251 if (status & STAT_FEINT)
2252 ia_frontend_intr(iadev);
2253 }
2254 }
2255 return IRQ_RETVAL(handled);
2256}
2257
2258
2259
2260/*----------------------------- entries --------------------------------*/
2261static int get_esi(struct atm_dev *dev)
2262{
2263 IADEV *iadev;
2264 int i;
2265 u32 mac1;
2266 u16 mac2;
2267
2268 iadev = INPH_IA_DEV(dev);
2269 mac1 = cpu_to_be32(le32_to_cpu(readl(
2270 iadev->reg+IPHASE5575_MAC1)));
2271 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2272 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2273 for (i=0; i<MAC1_LEN; i++)
2274 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2275
2276 for (i=0; i<MAC2_LEN; i++)
2277 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2278 return 0;
2279}
2280
2281static int reset_sar(struct atm_dev *dev)
2282{
2283 IADEV *iadev;
2284 int i, error = 1;
2285 unsigned int pci[64];
2286
2287 iadev = INPH_IA_DEV(dev);
2288 for(i=0; i<64; i++)
2289 if ((error = pci_read_config_dword(iadev->pci,
2290 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2291 return error;
2292 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2293 for(i=0; i<64; i++)
2294 if ((error = pci_write_config_dword(iadev->pci,
2295 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2296 return error;
2297 udelay(5);
2298 return 0;
2299}
2300
2301
2302static int ia_init(struct atm_dev *dev)
2303{
2304 IADEV *iadev;
2305 unsigned long real_base;
2306 void __iomem *base;
2307 unsigned short command;
2308 int error, i;
2309
2310 /* The device has been identified and registered. Now we read
2311 necessary configuration info like memory base address,
2312 interrupt number etc */
2313
2314 IF_INIT(printk(">ia_init\n");)
2315 dev->ci_range.vpi_bits = 0;
2316 dev->ci_range.vci_bits = NR_VCI_LD;
2317
2318 iadev = INPH_IA_DEV(dev);
2319 real_base = pci_resource_start (iadev->pci, 0);
2320 iadev->irq = iadev->pci->irq;
2321
2322 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2323 if (error) {
2324 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2325 dev->number,error);
2326 return -EINVAL;
2327 }
2328 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2329 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2330
2331 /* find mapping size of board */
2332
2333 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2334
2335 if (iadev->pci_map_size == 0x100000){
2336 iadev->num_vc = 4096;
2337 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2338 iadev->memSize = 4;
2339 }
2340 else if (iadev->pci_map_size == 0x40000) {
2341 iadev->num_vc = 1024;
2342 iadev->memSize = 1;
2343 }
2344 else {
2345 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2346 return -EINVAL;
2347 }
2348 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2349
2350 /* enable bus mastering */
2351 pci_set_master(iadev->pci);
2352
2353 /*
2354 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2355 */
2356 udelay(10);
2357
2358 /* mapping the physical address to a virtual address in address space */
2359 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2360
2361 if (!base)
2362 {
2363 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2364 dev->number);
2365 return -ENOMEM;
2366 }
2367 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2368 dev->number, iadev->pci->revision, base, iadev->irq);)
2369
2370 /* filling the iphase dev structure */
2371 iadev->mem = iadev->pci_map_size /2;
2372 iadev->real_base = real_base;
2373 iadev->base = base;
2374
2375 /* Bus Interface Control Registers */
2376 iadev->reg = base + REG_BASE;
2377 /* Segmentation Control Registers */
2378 iadev->seg_reg = base + SEG_BASE;
2379 /* Reassembly Control Registers */
2380 iadev->reass_reg = base + REASS_BASE;
2381 /* Front end/ DMA control registers */
2382 iadev->phy = base + PHY_BASE;
2383 iadev->dma = base + PHY_BASE;
2384 /* RAM - Segmentation RAm and Reassembly RAM */
2385 iadev->ram = base + ACTUAL_RAM_BASE;
2386 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2387 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2388
2389 /* lets print out the above */
2390 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2391 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2392 iadev->phy, iadev->ram, iadev->seg_ram,
2393 iadev->reass_ram);)
2394
2395 /* lets try reading the MAC address */
2396 error = get_esi(dev);
2397 if (error) {
2398 iounmap(iadev->base);
2399 return error;
2400 }
2401 printk("IA: ");
2402 for (i=0; i < ESI_LEN; i++)
2403 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2404 printk("\n");
2405
2406 /* reset SAR */
2407 if (reset_sar(dev)) {
2408 iounmap(iadev->base);
2409 printk("IA: reset SAR fail, please try again\n");
2410 return 1;
2411 }
2412 return 0;
2413}
2414
2415static void ia_update_stats(IADEV *iadev) {
2416 if (!iadev->carrier_detect)
2417 return;
2418 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2419 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2420 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2421 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2422 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2423 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2424 return;
2425}
2426
2427static void ia_led_timer(unsigned long arg) {
2428 unsigned long flags;
2429 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2430 u_char i;
2431 static u32 ctrl_reg;
2432 for (i = 0; i < iadev_count; i++) {
2433 if (ia_dev[i]) {
2434 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2435 if (blinking[i] == 0) {
2436 blinking[i]++;
2437 ctrl_reg &= (~CTRL_LED);
2438 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2439 ia_update_stats(ia_dev[i]);
2440 }
2441 else {
2442 blinking[i] = 0;
2443 ctrl_reg |= CTRL_LED;
2444 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2445 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2446 if (ia_dev[i]->close_pending)
2447 wake_up(&ia_dev[i]->close_wait);
2448 ia_tx_poll(ia_dev[i]);
2449 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2450 }
2451 }
2452 }
2453 mod_timer(&ia_timer, jiffies + HZ / 4);
2454 return;
2455}
2456
2457static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2458 unsigned long addr)
2459{
2460 writel(value, INPH_IA_DEV(dev)->phy+addr);
2461}
2462
2463static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2464{
2465 return readl(INPH_IA_DEV(dev)->phy+addr);
2466}
2467
2468static void ia_free_tx(IADEV *iadev)
2469{
2470 int i;
2471
2472 kfree(iadev->desc_tbl);
2473 for (i = 0; i < iadev->num_vc; i++)
2474 kfree(iadev->testTable[i]);
2475 kfree(iadev->testTable);
2476 for (i = 0; i < iadev->num_tx_desc; i++) {
2477 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2478
2479 pci_unmap_single(iadev->pci, desc->dma_addr,
2480 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2481 kfree(desc->cpcs);
2482 }
2483 kfree(iadev->tx_buf);
2484 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2485 iadev->tx_dle_dma);
2486}
2487
2488static void ia_free_rx(IADEV *iadev)
2489{
2490 kfree(iadev->rx_open);
2491 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2492 iadev->rx_dle_dma);
2493}
2494
2495static int ia_start(struct atm_dev *dev)
2496{
2497 IADEV *iadev;
2498 int error;
2499 unsigned char phy;
2500 u32 ctrl_reg;
2501 IF_EVENT(printk(">ia_start\n");)
2502 iadev = INPH_IA_DEV(dev);
2503 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2504 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2505 dev->number, iadev->irq);
2506 error = -EAGAIN;
2507 goto err_out;
2508 }
2509 /* @@@ should release IRQ on error */
2510 /* enabling memory + master */
2511 if ((error = pci_write_config_word(iadev->pci,
2512 PCI_COMMAND,
2513 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2514 {
2515 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2516 "master (0x%x)\n",dev->number, error);
2517 error = -EIO;
2518 goto err_free_irq;
2519 }
2520 udelay(10);
2521
2522 /* Maybe we should reset the front end, initialize Bus Interface Control
2523 Registers and see. */
2524
2525 IF_INIT(printk("Bus ctrl reg: %08x\n",
2526 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2527 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2528 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2529 | CTRL_B8
2530 | CTRL_B16
2531 | CTRL_B32
2532 | CTRL_B48
2533 | CTRL_B64
2534 | CTRL_B128
2535 | CTRL_ERRMASK
2536 | CTRL_DLETMASK /* shud be removed l8r */
2537 | CTRL_DLERMASK
2538 | CTRL_SEGMASK
2539 | CTRL_REASSMASK
2540 | CTRL_FEMASK
2541 | CTRL_CSPREEMPT;
2542
2543 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2544
2545 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2546 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2547 printk("Bus status reg after init: %08x\n",
2548 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2549
2550 ia_hw_type(iadev);
2551 error = tx_init(dev);
2552 if (error)
2553 goto err_free_irq;
2554 error = rx_init(dev);
2555 if (error)
2556 goto err_free_tx;
2557
2558 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2559 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2560 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2561 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2562 phy = 0; /* resolve compiler complaint */
2563 IF_INIT (
2564 if ((phy=ia_phy_get(dev,0)) == 0x30)
2565 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2566 else
2567 printk("IA: utopia,rev.%0x\n",phy);)
2568
2569 if (iadev->phy_type & FE_25MBIT_PHY)
2570 ia_mb25_init(iadev);
2571 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2572 ia_suni_pm7345_init(iadev);
2573 else {
2574 error = suni_init(dev);
2575 if (error)
2576 goto err_free_rx;
2577 if (dev->phy->start) {
2578 error = dev->phy->start(dev);
2579 if (error)
2580 goto err_free_rx;
2581 }
2582 /* Get iadev->carrier_detect status */
2583 ia_frontend_intr(iadev);
2584 }
2585 return 0;
2586
2587err_free_rx:
2588 ia_free_rx(iadev);
2589err_free_tx:
2590 ia_free_tx(iadev);
2591err_free_irq:
2592 free_irq(iadev->irq, dev);
2593err_out:
2594 return error;
2595}
2596
2597static void ia_close(struct atm_vcc *vcc)
2598{
2599 DEFINE_WAIT(wait);
2600 u16 *vc_table;
2601 IADEV *iadev;
2602 struct ia_vcc *ia_vcc;
2603 struct sk_buff *skb = NULL;
2604 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2605 unsigned long closetime, flags;
2606
2607 iadev = INPH_IA_DEV(vcc->dev);
2608 ia_vcc = INPH_IA_VCC(vcc);
2609 if (!ia_vcc) return;
2610
2611 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2612 ia_vcc->vc_desc_cnt,vcc->vci);)
2613 clear_bit(ATM_VF_READY,&vcc->flags);
2614 skb_queue_head_init (&tmp_tx_backlog);
2615 skb_queue_head_init (&tmp_vcc_backlog);
2616 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2617 iadev->close_pending++;
2618 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2619 schedule_timeout(50);
2620 finish_wait(&iadev->timeout_wait, &wait);
2621 spin_lock_irqsave(&iadev->tx_lock, flags);
2622 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2623 if (ATM_SKB(skb)->vcc == vcc){
2624 if (vcc->pop) vcc->pop(vcc, skb);
2625 else dev_kfree_skb_any(skb);
2626 }
2627 else
2628 skb_queue_tail(&tmp_tx_backlog, skb);
2629 }
2630 while((skb = skb_dequeue(&tmp_tx_backlog)))
2631 skb_queue_tail(&iadev->tx_backlog, skb);
2632 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2633 closetime = 300000 / ia_vcc->pcr;
2634 if (closetime == 0)
2635 closetime = 1;
2636 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2637 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2638 spin_lock_irqsave(&iadev->tx_lock, flags);
2639 iadev->close_pending--;
2640 iadev->testTable[vcc->vci]->lastTime = 0;
2641 iadev->testTable[vcc->vci]->fract = 0;
2642 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2643 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2644 if (vcc->qos.txtp.min_pcr > 0)
2645 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2646 }
2647 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2648 ia_vcc = INPH_IA_VCC(vcc);
2649 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2650 ia_cbrVc_close (vcc);
2651 }
2652 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2653 }
2654
2655 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2656 // reset reass table
2657 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2658 vc_table += vcc->vci;
2659 *vc_table = NO_AAL5_PKT;
2660 // reset vc table
2661 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2662 vc_table += vcc->vci;
2663 *vc_table = (vcc->vci << 6) | 15;
2664 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2665 struct abr_vc_table __iomem *abr_vc_table =
2666 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2667 abr_vc_table += vcc->vci;
2668 abr_vc_table->rdf = 0x0003;
2669 abr_vc_table->air = 0x5eb1;
2670 }
2671 // Drain the packets
2672 rx_dle_intr(vcc->dev);
2673 iadev->rx_open[vcc->vci] = NULL;
2674 }
2675 kfree(INPH_IA_VCC(vcc));
2676 ia_vcc = NULL;
2677 vcc->dev_data = NULL;
2678 clear_bit(ATM_VF_ADDR,&vcc->flags);
2679 return;
2680}
2681
2682static int ia_open(struct atm_vcc *vcc)
2683{
2684 struct ia_vcc *ia_vcc;
2685 int error;
2686 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2687 {
2688 IF_EVENT(printk("ia: not partially allocated resources\n");)
2689 vcc->dev_data = NULL;
2690 }
2691 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2692 {
2693 IF_EVENT(printk("iphase open: unspec part\n");)
2694 set_bit(ATM_VF_ADDR,&vcc->flags);
2695 }
2696 if (vcc->qos.aal != ATM_AAL5)
2697 return -EINVAL;
2698 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2699 vcc->dev->number, vcc->vpi, vcc->vci);)
2700
2701 /* Device dependent initialization */
2702 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2703 if (!ia_vcc) return -ENOMEM;
2704 vcc->dev_data = ia_vcc;
2705
2706 if ((error = open_rx(vcc)))
2707 {
2708 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2709 ia_close(vcc);
2710 return error;
2711 }
2712
2713 if ((error = open_tx(vcc)))
2714 {
2715 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2716 ia_close(vcc);
2717 return error;
2718 }
2719
2720 set_bit(ATM_VF_READY,&vcc->flags);
2721
2722#if 0
2723 {
2724 static u8 first = 1;
2725 if (first) {
2726 ia_timer.expires = jiffies + 3*HZ;
2727 add_timer(&ia_timer);
2728 first = 0;
2729 }
2730 }
2731#endif
2732 IF_EVENT(printk("ia open returning\n");)
2733 return 0;
2734}
2735
2736static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2737{
2738 IF_EVENT(printk(">ia_change_qos\n");)
2739 return 0;
2740}
2741
2742static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2743{
2744 IA_CMDBUF ia_cmds;
2745 IADEV *iadev;
2746 int i, board;
2747 u16 __user *tmps;
2748 IF_EVENT(printk(">ia_ioctl\n");)
2749 if (cmd != IA_CMD) {
2750 if (!dev->phy->ioctl) return -EINVAL;
2751 return dev->phy->ioctl(dev,cmd,arg);
2752 }
2753 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2754 board = ia_cmds.status;
2755 if ((board < 0) || (board > iadev_count))
2756 board = 0;
2757 iadev = ia_dev[board];
2758 switch (ia_cmds.cmd) {
2759 case MEMDUMP:
2760 {
2761 switch (ia_cmds.sub_cmd) {
2762 case MEMDUMP_DEV:
2763 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2764 if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2765 return -EFAULT;
2766 ia_cmds.status = 0;
2767 break;
2768 case MEMDUMP_SEGREG:
2769 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2770 tmps = (u16 __user *)ia_cmds.buf;
2771 for(i=0; i<0x80; i+=2, tmps++)
2772 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2773 ia_cmds.status = 0;
2774 ia_cmds.len = 0x80;
2775 break;
2776 case MEMDUMP_REASSREG:
2777 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2778 tmps = (u16 __user *)ia_cmds.buf;
2779 for(i=0; i<0x80; i+=2, tmps++)
2780 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2781 ia_cmds.status = 0;
2782 ia_cmds.len = 0x80;
2783 break;
2784 case MEMDUMP_FFL:
2785 {
2786 ia_regs_t *regs_local;
2787 ffredn_t *ffL;
2788 rfredn_t *rfL;
2789
2790 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2791 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2792 if (!regs_local) return -ENOMEM;
2793 ffL = ®s_local->ffredn;
2794 rfL = ®s_local->rfredn;
2795 /* Copy real rfred registers into the local copy */
2796 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2797 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2798 /* Copy real ffred registers into the local copy */
2799 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2800 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2801
2802 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2803 kfree(regs_local);
2804 return -EFAULT;
2805 }
2806 kfree(regs_local);
2807 printk("Board %d registers dumped\n", board);
2808 ia_cmds.status = 0;
2809 }
2810 break;
2811 case READ_REG:
2812 {
2813 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2814 desc_dbg(iadev);
2815 ia_cmds.status = 0;
2816 }
2817 break;
2818 case 0x6:
2819 {
2820 ia_cmds.status = 0;
2821 printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2822 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2823 }
2824 break;
2825 case 0x8:
2826 {
2827 struct k_sonet_stats *stats;
2828 stats = &PRIV(_ia_dev[board])->sonet_stats;
2829 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2830 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2831 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2832 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2833 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2834 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2835 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2836 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2837 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2838 }
2839 ia_cmds.status = 0;
2840 break;
2841 case 0x9:
2842 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2843 for (i = 1; i <= iadev->num_rx_desc; i++)
2844 free_desc(_ia_dev[board], i);
2845 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2846 iadev->reass_reg+REASS_MASK_REG);
2847 iadev->rxing = 1;
2848
2849 ia_cmds.status = 0;
2850 break;
2851
2852 case 0xb:
2853 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2854 ia_frontend_intr(iadev);
2855 break;
2856 case 0xa:
2857 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2858 {
2859 ia_cmds.status = 0;
2860 IADebugFlag = ia_cmds.maddr;
2861 printk("New debug option loaded\n");
2862 }
2863 break;
2864 default:
2865 ia_cmds.status = 0;
2866 break;
2867 }
2868 }
2869 break;
2870 default:
2871 break;
2872
2873 }
2874 return 0;
2875}
2876
2877static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2878 void __user *optval, int optlen)
2879{
2880 IF_EVENT(printk(">ia_getsockopt\n");)
2881 return -EINVAL;
2882}
2883
2884static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2885 void __user *optval, unsigned int optlen)
2886{
2887 IF_EVENT(printk(">ia_setsockopt\n");)
2888 return -EINVAL;
2889}
2890
2891static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2892 IADEV *iadev;
2893 struct dle *wr_ptr;
2894 struct tx_buf_desc __iomem *buf_desc_ptr;
2895 int desc;
2896 int comp_code;
2897 int total_len;
2898 struct cpcs_trailer *trailer;
2899 struct ia_vcc *iavcc;
2900
2901 iadev = INPH_IA_DEV(vcc->dev);
2902 iavcc = INPH_IA_VCC(vcc);
2903 if (!iavcc->txing) {
2904 printk("discard packet on closed VC\n");
2905 if (vcc->pop)
2906 vcc->pop(vcc, skb);
2907 else
2908 dev_kfree_skb_any(skb);
2909 return 0;
2910 }
2911
2912 if (skb->len > iadev->tx_buf_sz - 8) {
2913 printk("Transmit size over tx buffer size\n");
2914 if (vcc->pop)
2915 vcc->pop(vcc, skb);
2916 else
2917 dev_kfree_skb_any(skb);
2918 return 0;
2919 }
2920 if ((unsigned long)skb->data & 3) {
2921 printk("Misaligned SKB\n");
2922 if (vcc->pop)
2923 vcc->pop(vcc, skb);
2924 else
2925 dev_kfree_skb_any(skb);
2926 return 0;
2927 }
2928 /* Get a descriptor number from our free descriptor queue
2929 We get the descr number from the TCQ now, since I am using
2930 the TCQ as a free buffer queue. Initially TCQ will be
2931 initialized with all the descriptors and is hence, full.
2932 */
2933 desc = get_desc (iadev, iavcc);
2934 if (desc == 0xffff)
2935 return 1;
2936 comp_code = desc >> 13;
2937 desc &= 0x1fff;
2938
2939 if ((desc == 0) || (desc > iadev->num_tx_desc))
2940 {
2941 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2942 atomic_inc(&vcc->stats->tx);
2943 if (vcc->pop)
2944 vcc->pop(vcc, skb);
2945 else
2946 dev_kfree_skb_any(skb);
2947 return 0; /* return SUCCESS */
2948 }
2949
2950 if (comp_code)
2951 {
2952 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2953 desc, comp_code);)
2954 }
2955
2956 /* remember the desc and vcc mapping */
2957 iavcc->vc_desc_cnt++;
2958 iadev->desc_tbl[desc-1].iavcc = iavcc;
2959 iadev->desc_tbl[desc-1].txskb = skb;
2960 IA_SKB_STATE(skb) = 0;
2961
2962 iadev->ffL.tcq_rd += 2;
2963 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2964 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2965 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2966
2967 /* Put the descriptor number in the packet ready queue
2968 and put the updated write pointer in the DLE field
2969 */
2970 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2971
2972 iadev->ffL.prq_wr += 2;
2973 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2974 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2975
2976 /* Figure out the exact length of the packet and padding required to
2977 make it aligned on a 48 byte boundary. */
2978 total_len = skb->len + sizeof(struct cpcs_trailer);
2979 total_len = ((total_len + 47) / 48) * 48;
2980 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2981
2982 /* Put the packet in a tx buffer */
2983 trailer = iadev->tx_buf[desc-1].cpcs;
2984 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2985 skb, skb->data, skb->len, desc);)
2986 trailer->control = 0;
2987 /*big endian*/
2988 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2989 trailer->crc32 = 0; /* not needed - dummy bytes */
2990
2991 /* Display the packet */
2992 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2993 skb->len, tcnter++);
2994 xdump(skb->data, skb->len, "TX: ");
2995 printk("\n");)
2996
2997 /* Build the buffer descriptor */
2998 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2999 buf_desc_ptr += desc; /* points to the corresponding entry */
3000 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
3001 /* Huh ? p.115 of users guide describes this as a read-only register */
3002 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3003 buf_desc_ptr->vc_index = vcc->vci;
3004 buf_desc_ptr->bytes = total_len;
3005
3006 if (vcc->qos.txtp.traffic_class == ATM_ABR)
3007 clear_lockup (vcc, iadev);
3008
3009 /* Build the DLE structure */
3010 wr_ptr = iadev->tx_dle_q.write;
3011 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3012 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3013 skb->len, PCI_DMA_TODEVICE);
3014 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3015 buf_desc_ptr->buf_start_lo;
3016 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3017 wr_ptr->bytes = skb->len;
3018
3019 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3020 if ((wr_ptr->bytes >> 2) == 0xb)
3021 wr_ptr->bytes = 0x30;
3022
3023 wr_ptr->mode = TX_DLE_PSI;
3024 wr_ptr->prq_wr_ptr_data = 0;
3025
3026 /* end is not to be used for the DLE q */
3027 if (++wr_ptr == iadev->tx_dle_q.end)
3028 wr_ptr = iadev->tx_dle_q.start;
3029
3030 /* Build trailer dle */
3031 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3032 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3033 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3034
3035 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3036 wr_ptr->mode = DMA_INT_ENABLE;
3037 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3038
3039 /* end is not to be used for the DLE q */
3040 if (++wr_ptr == iadev->tx_dle_q.end)
3041 wr_ptr = iadev->tx_dle_q.start;
3042
3043 iadev->tx_dle_q.write = wr_ptr;
3044 ATM_DESC(skb) = vcc->vci;
3045 skb_queue_tail(&iadev->tx_dma_q, skb);
3046
3047 atomic_inc(&vcc->stats->tx);
3048 iadev->tx_pkt_cnt++;
3049 /* Increment transaction counter */
3050 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3051
3052#if 0
3053 /* add flow control logic */
3054 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3055 if (iavcc->vc_desc_cnt > 10) {
3056 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3057 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3058 iavcc->flow_inc = -1;
3059 iavcc->saved_tx_quota = vcc->tx_quota;
3060 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3061 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3062 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3063 iavcc->flow_inc = 0;
3064 }
3065 }
3066#endif
3067 IF_TX(printk("ia send done\n");)
3068 return 0;
3069}
3070
3071static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3072{
3073 IADEV *iadev;
3074 unsigned long flags;
3075
3076 iadev = INPH_IA_DEV(vcc->dev);
3077 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3078 {
3079 if (!skb)
3080 printk(KERN_CRIT "null skb in ia_send\n");
3081 else dev_kfree_skb_any(skb);
3082 return -EINVAL;
3083 }
3084 spin_lock_irqsave(&iadev->tx_lock, flags);
3085 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3086 dev_kfree_skb_any(skb);
3087 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3088 return -EINVAL;
3089 }
3090 ATM_SKB(skb)->vcc = vcc;
3091
3092 if (skb_peek(&iadev->tx_backlog)) {
3093 skb_queue_tail(&iadev->tx_backlog, skb);
3094 }
3095 else {
3096 if (ia_pkt_tx (vcc, skb)) {
3097 skb_queue_tail(&iadev->tx_backlog, skb);
3098 }
3099 }
3100 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3101 return 0;
3102
3103}
3104
3105static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3106{
3107 int left = *pos, n;
3108 char *tmpPtr;
3109 IADEV *iadev = INPH_IA_DEV(dev);
3110 if(!left--) {
3111 if (iadev->phy_type == FE_25MBIT_PHY) {
3112 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3113 return n;
3114 }
3115 if (iadev->phy_type == FE_DS3_PHY)
3116 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3117 else if (iadev->phy_type == FE_E3_PHY)
3118 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3119 else if (iadev->phy_type == FE_UTP_OPTION)
3120 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3121 else
3122 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3123 tmpPtr = page + n;
3124 if (iadev->pci_map_size == 0x40000)
3125 n += sprintf(tmpPtr, "-1KVC-");
3126 else
3127 n += sprintf(tmpPtr, "-4KVC-");
3128 tmpPtr = page + n;
3129 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3130 n += sprintf(tmpPtr, "1M \n");
3131 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3132 n += sprintf(tmpPtr, "512K\n");
3133 else
3134 n += sprintf(tmpPtr, "128K\n");
3135 return n;
3136 }
3137 if (!left) {
3138 return sprintf(page, " Number of Tx Buffer: %u\n"
3139 " Size of Tx Buffer : %u\n"
3140 " Number of Rx Buffer: %u\n"
3141 " Size of Rx Buffer : %u\n"
3142 " Packets Receiverd : %u\n"
3143 " Packets Transmitted: %u\n"
3144 " Cells Received : %u\n"
3145 " Cells Transmitted : %u\n"
3146 " Board Dropped Cells: %u\n"
3147 " Board Dropped Pkts : %u\n",
3148 iadev->num_tx_desc, iadev->tx_buf_sz,
3149 iadev->num_rx_desc, iadev->rx_buf_sz,
3150 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3151 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3152 iadev->drop_rxcell, iadev->drop_rxpkt);
3153 }
3154 return 0;
3155}
3156
3157static const struct atmdev_ops ops = {
3158 .open = ia_open,
3159 .close = ia_close,
3160 .ioctl = ia_ioctl,
3161 .getsockopt = ia_getsockopt,
3162 .setsockopt = ia_setsockopt,
3163 .send = ia_send,
3164 .phy_put = ia_phy_put,
3165 .phy_get = ia_phy_get,
3166 .change_qos = ia_change_qos,
3167 .proc_read = ia_proc_read,
3168 .owner = THIS_MODULE,
3169};
3170
3171static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3172{
3173 struct atm_dev *dev;
3174 IADEV *iadev;
3175 int ret;
3176
3177 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3178 if (!iadev) {
3179 ret = -ENOMEM;
3180 goto err_out;
3181 }
3182
3183 iadev->pci = pdev;
3184
3185 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3186 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3187 if (pci_enable_device(pdev)) {
3188 ret = -ENODEV;
3189 goto err_out_free_iadev;
3190 }
3191 dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3192 if (!dev) {
3193 ret = -ENOMEM;
3194 goto err_out_disable_dev;
3195 }
3196 dev->dev_data = iadev;
3197 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3198 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3199 iadev->LineRate);)
3200
3201 pci_set_drvdata(pdev, dev);
3202
3203 ia_dev[iadev_count] = iadev;
3204 _ia_dev[iadev_count] = dev;
3205 iadev_count++;
3206 if (ia_init(dev) || ia_start(dev)) {
3207 IF_INIT(printk("IA register failed!\n");)
3208 iadev_count--;
3209 ia_dev[iadev_count] = NULL;
3210 _ia_dev[iadev_count] = NULL;
3211 ret = -EINVAL;
3212 goto err_out_deregister_dev;
3213 }
3214 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3215
3216 iadev->next_board = ia_boards;
3217 ia_boards = dev;
3218
3219 return 0;
3220
3221err_out_deregister_dev:
3222 atm_dev_deregister(dev);
3223err_out_disable_dev:
3224 pci_disable_device(pdev);
3225err_out_free_iadev:
3226 kfree(iadev);
3227err_out:
3228 return ret;
3229}
3230
3231static void ia_remove_one(struct pci_dev *pdev)
3232{
3233 struct atm_dev *dev = pci_get_drvdata(pdev);
3234 IADEV *iadev = INPH_IA_DEV(dev);
3235
3236 /* Disable phy interrupts */
3237 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3238 SUNI_RSOP_CIE);
3239 udelay(1);
3240
3241 if (dev->phy && dev->phy->stop)
3242 dev->phy->stop(dev);
3243
3244 /* De-register device */
3245 free_irq(iadev->irq, dev);
3246 iadev_count--;
3247 ia_dev[iadev_count] = NULL;
3248 _ia_dev[iadev_count] = NULL;
3249 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3250 atm_dev_deregister(dev);
3251
3252 iounmap(iadev->base);
3253 pci_disable_device(pdev);
3254
3255 ia_free_rx(iadev);
3256 ia_free_tx(iadev);
3257
3258 kfree(iadev);
3259}
3260
3261static struct pci_device_id ia_pci_tbl[] = {
3262 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3263 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3264 { 0,}
3265};
3266MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3267
3268static struct pci_driver ia_driver = {
3269 .name = DEV_LABEL,
3270 .id_table = ia_pci_tbl,
3271 .probe = ia_init_one,
3272 .remove = ia_remove_one,
3273};
3274
3275static int __init ia_module_init(void)
3276{
3277 int ret;
3278
3279 ret = pci_register_driver(&ia_driver);
3280 if (ret >= 0) {
3281 ia_timer.expires = jiffies + 3*HZ;
3282 add_timer(&ia_timer);
3283 } else
3284 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3285 return ret;
3286}
3287
3288static void __exit ia_module_exit(void)
3289{
3290 pci_unregister_driver(&ia_driver);
3291
3292 del_timer(&ia_timer);
3293}
3294
3295module_init(ia_module_init);
3296module_exit(ia_module_exit);
1/******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
6 Version: 1.0
7*******************************************************************************
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
13
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
25
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
31 Add the CBR support.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
37 Add SMP support.
38
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41*******************************************************************************/
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/mm.h>
46#include <linux/pci.h>
47#include <linux/errno.h>
48#include <linux/atm.h>
49#include <linux/atmdev.h>
50#include <linux/sonet.h>
51#include <linux/skbuff.h>
52#include <linux/time.h>
53#include <linux/delay.h>
54#include <linux/uio.h>
55#include <linux/init.h>
56#include <linux/interrupt.h>
57#include <linux/wait.h>
58#include <linux/slab.h>
59#include <asm/io.h>
60#include <linux/atomic.h>
61#include <asm/uaccess.h>
62#include <asm/string.h>
63#include <asm/byteorder.h>
64#include <linux/vmalloc.h>
65#include <linux/jiffies.h>
66#include "iphase.h"
67#include "suni.h"
68#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
69
70#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
71
72static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
73static void desc_dbg(IADEV *iadev);
74
75static IADEV *ia_dev[8];
76static struct atm_dev *_ia_dev[8];
77static int iadev_count;
78static void ia_led_timer(unsigned long arg);
79static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
80static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
81static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
82static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
84
85module_param(IA_TX_BUF, int, 0);
86module_param(IA_TX_BUF_SZ, int, 0);
87module_param(IA_RX_BUF, int, 0);
88module_param(IA_RX_BUF_SZ, int, 0);
89module_param(IADebugFlag, uint, 0644);
90
91MODULE_LICENSE("GPL");
92
93/**************************** IA_LIB **********************************/
94
95static void ia_init_rtn_q (IARTN_Q *que)
96{
97 que->next = NULL;
98 que->tail = NULL;
99}
100
101static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
102{
103 data->next = NULL;
104 if (que->next == NULL)
105 que->next = que->tail = data;
106 else {
107 data->next = que->next;
108 que->next = data;
109 }
110 return;
111}
112
113static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
114 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
115 if (!entry)
116 return -ENOMEM;
117 entry->data = data;
118 entry->next = NULL;
119 if (que->next == NULL)
120 que->next = que->tail = entry;
121 else {
122 que->tail->next = entry;
123 que->tail = que->tail->next;
124 }
125 return 1;
126}
127
128static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
129 IARTN_Q *tmpdata;
130 if (que->next == NULL)
131 return NULL;
132 tmpdata = que->next;
133 if ( que->next == que->tail)
134 que->next = que->tail = NULL;
135 else
136 que->next = que->next->next;
137 return tmpdata;
138}
139
140static void ia_hack_tcq(IADEV *dev) {
141
142 u_short desc1;
143 u_short tcq_wr;
144 struct ia_vcc *iavcc_r = NULL;
145
146 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
147 while (dev->host_tcq_wr != tcq_wr) {
148 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
149 if (!desc1) ;
150 else if (!dev->desc_tbl[desc1 -1].timestamp) {
151 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
152 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
153 }
154 else if (dev->desc_tbl[desc1 -1].timestamp) {
155 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
156 printk("IA: Fatal err in get_desc\n");
157 continue;
158 }
159 iavcc_r->vc_desc_cnt--;
160 dev->desc_tbl[desc1 -1].timestamp = 0;
161 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
162 dev->desc_tbl[desc1 -1].txskb, desc1);)
163 if (iavcc_r->pcr < dev->rate_limit) {
164 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
165 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
166 printk("ia_hack_tcq: No memory available\n");
167 }
168 dev->desc_tbl[desc1 -1].iavcc = NULL;
169 dev->desc_tbl[desc1 -1].txskb = NULL;
170 }
171 dev->host_tcq_wr += 2;
172 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
173 dev->host_tcq_wr = dev->ffL.tcq_st;
174 }
175} /* ia_hack_tcq */
176
177static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
178 u_short desc_num, i;
179 struct sk_buff *skb;
180 struct ia_vcc *iavcc_r = NULL;
181 unsigned long delta;
182 static unsigned long timer = 0;
183 int ltimeout;
184
185 ia_hack_tcq (dev);
186 if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
187 timer = jiffies;
188 i=0;
189 while (i < dev->num_tx_desc) {
190 if (!dev->desc_tbl[i].timestamp) {
191 i++;
192 continue;
193 }
194 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
195 delta = jiffies - dev->desc_tbl[i].timestamp;
196 if (delta >= ltimeout) {
197 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
198 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
199 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
200 else
201 dev->ffL.tcq_rd -= 2;
202 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
203 if (!(skb = dev->desc_tbl[i].txskb) ||
204 !(iavcc_r = dev->desc_tbl[i].iavcc))
205 printk("Fatal err, desc table vcc or skb is NULL\n");
206 else
207 iavcc_r->vc_desc_cnt--;
208 dev->desc_tbl[i].timestamp = 0;
209 dev->desc_tbl[i].iavcc = NULL;
210 dev->desc_tbl[i].txskb = NULL;
211 }
212 i++;
213 } /* while */
214 }
215 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
216 return 0xFFFF;
217
218 /* Get the next available descriptor number from TCQ */
219 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
220
221 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
222 dev->ffL.tcq_rd += 2;
223 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
224 dev->ffL.tcq_rd = dev->ffL.tcq_st;
225 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
226 return 0xFFFF;
227 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
228 }
229
230 /* get system time */
231 dev->desc_tbl[desc_num -1].timestamp = jiffies;
232 return desc_num;
233}
234
235static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
236 u_char foundLockUp;
237 vcstatus_t *vcstatus;
238 u_short *shd_tbl;
239 u_short tempCellSlot, tempFract;
240 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
241 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
242 u_int i;
243
244 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
245 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
246 vcstatus->cnt++;
247 foundLockUp = 0;
248 if( vcstatus->cnt == 0x05 ) {
249 abr_vc += vcc->vci;
250 eabr_vc += vcc->vci;
251 if( eabr_vc->last_desc ) {
252 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
253 /* Wait for 10 Micro sec */
254 udelay(10);
255 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
256 foundLockUp = 1;
257 }
258 else {
259 tempCellSlot = abr_vc->last_cell_slot;
260 tempFract = abr_vc->fraction;
261 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
262 && (tempFract == dev->testTable[vcc->vci]->fract))
263 foundLockUp = 1;
264 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
265 dev->testTable[vcc->vci]->fract = tempFract;
266 }
267 } /* last descriptor */
268 vcstatus->cnt = 0;
269 } /* vcstatus->cnt */
270
271 if (foundLockUp) {
272 IF_ABR(printk("LOCK UP found\n");)
273 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
274 /* Wait for 10 Micro sec */
275 udelay(10);
276 abr_vc->status &= 0xFFF8;
277 abr_vc->status |= 0x0001; /* state is idle */
278 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
279 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
280 if (i < dev->num_vc)
281 shd_tbl[i] = vcc->vci;
282 else
283 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
284 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
285 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
286 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
287 vcstatus->cnt = 0;
288 } /* foundLockUp */
289
290 } /* if an ABR VC */
291
292
293}
294
295/*
296** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
297**
298** +----+----+------------------+-------------------------------+
299** | R | NZ | 5-bit exponent | 9-bit mantissa |
300** +----+----+------------------+-------------------------------+
301**
302** R = reserved (written as 0)
303** NZ = 0 if 0 cells/sec; 1 otherwise
304**
305** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
306*/
307static u16
308cellrate_to_float(u32 cr)
309{
310
311#define NZ 0x4000
312#define M_BITS 9 /* Number of bits in mantissa */
313#define E_BITS 5 /* Number of bits in exponent */
314#define M_MASK 0x1ff
315#define E_MASK 0x1f
316 u16 flot;
317 u32 tmp = cr & 0x00ffffff;
318 int i = 0;
319 if (cr == 0)
320 return 0;
321 while (tmp != 1) {
322 tmp >>= 1;
323 i++;
324 }
325 if (i == M_BITS)
326 flot = NZ | (i << M_BITS) | (cr & M_MASK);
327 else if (i < M_BITS)
328 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
329 else
330 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
331 return flot;
332}
333
334#if 0
335/*
336** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
337*/
338static u32
339float_to_cellrate(u16 rate)
340{
341 u32 exp, mantissa, cps;
342 if ((rate & NZ) == 0)
343 return 0;
344 exp = (rate >> M_BITS) & E_MASK;
345 mantissa = rate & M_MASK;
346 if (exp == 0)
347 return 1;
348 cps = (1 << M_BITS) | mantissa;
349 if (exp == M_BITS)
350 cps = cps;
351 else if (exp > M_BITS)
352 cps <<= (exp - M_BITS);
353 else
354 cps >>= (M_BITS - exp);
355 return cps;
356}
357#endif
358
359static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
360 srv_p->class_type = ATM_ABR;
361 srv_p->pcr = dev->LineRate;
362 srv_p->mcr = 0;
363 srv_p->icr = 0x055cb7;
364 srv_p->tbe = 0xffffff;
365 srv_p->frtt = 0x3a;
366 srv_p->rif = 0xf;
367 srv_p->rdf = 0xb;
368 srv_p->nrm = 0x4;
369 srv_p->trm = 0x7;
370 srv_p->cdf = 0x3;
371 srv_p->adtf = 50;
372}
373
374static int
375ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
376 struct atm_vcc *vcc, u8 flag)
377{
378 f_vc_abr_entry *f_abr_vc;
379 r_vc_abr_entry *r_abr_vc;
380 u32 icr;
381 u8 trm, nrm, crm;
382 u16 adtf, air, *ptr16;
383 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
384 f_abr_vc += vcc->vci;
385 switch (flag) {
386 case 1: /* FFRED initialization */
387#if 0 /* sanity check */
388 if (srv_p->pcr == 0)
389 return INVALID_PCR;
390 if (srv_p->pcr > dev->LineRate)
391 srv_p->pcr = dev->LineRate;
392 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
393 return MCR_UNAVAILABLE;
394 if (srv_p->mcr > srv_p->pcr)
395 return INVALID_MCR;
396 if (!(srv_p->icr))
397 srv_p->icr = srv_p->pcr;
398 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
399 return INVALID_ICR;
400 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
401 return INVALID_TBE;
402 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
403 return INVALID_FRTT;
404 if (srv_p->nrm > MAX_NRM)
405 return INVALID_NRM;
406 if (srv_p->trm > MAX_TRM)
407 return INVALID_TRM;
408 if (srv_p->adtf > MAX_ADTF)
409 return INVALID_ADTF;
410 else if (srv_p->adtf == 0)
411 srv_p->adtf = 1;
412 if (srv_p->cdf > MAX_CDF)
413 return INVALID_CDF;
414 if (srv_p->rif > MAX_RIF)
415 return INVALID_RIF;
416 if (srv_p->rdf > MAX_RDF)
417 return INVALID_RDF;
418#endif
419 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
420 f_abr_vc->f_vc_type = ABR;
421 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
422 /* i.e 2**n = 2 << (n-1) */
423 f_abr_vc->f_nrm = nrm << 8 | nrm;
424 trm = 100000/(2 << (16 - srv_p->trm));
425 if ( trm == 0) trm = 1;
426 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
427 crm = srv_p->tbe / nrm;
428 if (crm == 0) crm = 1;
429 f_abr_vc->f_crm = crm & 0xff;
430 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
431 icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
432 ((srv_p->tbe/srv_p->frtt)*1000000) :
433 (1000000/(srv_p->frtt/srv_p->tbe)));
434 f_abr_vc->f_icr = cellrate_to_float(icr);
435 adtf = (10000 * srv_p->adtf)/8192;
436 if (adtf == 0) adtf = 1;
437 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
438 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
439 f_abr_vc->f_acr = f_abr_vc->f_icr;
440 f_abr_vc->f_status = 0x0042;
441 break;
442 case 0: /* RFRED initialization */
443 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
444 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
445 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
446 r_abr_vc += vcc->vci;
447 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
448 air = srv_p->pcr << (15 - srv_p->rif);
449 if (air == 0) air = 1;
450 r_abr_vc->r_air = cellrate_to_float(air);
451 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
452 dev->sum_mcr += srv_p->mcr;
453 dev->n_abr++;
454 break;
455 default:
456 break;
457 }
458 return 0;
459}
460static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
461 u32 rateLow=0, rateHigh, rate;
462 int entries;
463 struct ia_vcc *ia_vcc;
464
465 int idealSlot =0, testSlot, toBeAssigned, inc;
466 u32 spacing;
467 u16 *SchedTbl, *TstSchedTbl;
468 u16 cbrVC, vcIndex;
469 u32 fracSlot = 0;
470 u32 sp_mod = 0;
471 u32 sp_mod2 = 0;
472
473 /* IpAdjustTrafficParams */
474 if (vcc->qos.txtp.max_pcr <= 0) {
475 IF_ERR(printk("PCR for CBR not defined\n");)
476 return -1;
477 }
478 rate = vcc->qos.txtp.max_pcr;
479 entries = rate / dev->Granularity;
480 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
481 entries, rate, dev->Granularity);)
482 if (entries < 1)
483 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
484 rateLow = entries * dev->Granularity;
485 rateHigh = (entries + 1) * dev->Granularity;
486 if (3*(rate - rateLow) > (rateHigh - rate))
487 entries++;
488 if (entries > dev->CbrRemEntries) {
489 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
490 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
491 entries, dev->CbrRemEntries);)
492 return -EBUSY;
493 }
494
495 ia_vcc = INPH_IA_VCC(vcc);
496 ia_vcc->NumCbrEntry = entries;
497 dev->sum_mcr += entries * dev->Granularity;
498 /* IaFFrednInsertCbrSched */
499 // Starting at an arbitrary location, place the entries into the table
500 // as smoothly as possible
501 cbrVC = 0;
502 spacing = dev->CbrTotEntries / entries;
503 sp_mod = dev->CbrTotEntries % entries; // get modulo
504 toBeAssigned = entries;
505 fracSlot = 0;
506 vcIndex = vcc->vci;
507 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
508 while (toBeAssigned)
509 {
510 // If this is the first time, start the table loading for this connection
511 // as close to entryPoint as possible.
512 if (toBeAssigned == entries)
513 {
514 idealSlot = dev->CbrEntryPt;
515 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
516 if (dev->CbrEntryPt >= dev->CbrTotEntries)
517 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
518 } else {
519 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
520 // in the table that would be smoothest
521 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
522 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
523 }
524 if (idealSlot >= (int)dev->CbrTotEntries)
525 idealSlot -= dev->CbrTotEntries;
526 // Continuously check around this ideal value until a null
527 // location is encountered.
528 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
529 inc = 0;
530 testSlot = idealSlot;
531 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
532 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
533 testSlot, TstSchedTbl,toBeAssigned);)
534 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
535 while (cbrVC) // If another VC at this location, we have to keep looking
536 {
537 inc++;
538 testSlot = idealSlot - inc;
539 if (testSlot < 0) { // Wrap if necessary
540 testSlot += dev->CbrTotEntries;
541 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
542 SchedTbl,testSlot);)
543 }
544 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
545 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
546 if (!cbrVC)
547 break;
548 testSlot = idealSlot + inc;
549 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
550 testSlot -= dev->CbrTotEntries;
551 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
552 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
553 testSlot, toBeAssigned);)
554 }
555 // set table index and read in value
556 TstSchedTbl = (u16*)(SchedTbl + testSlot);
557 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
558 TstSchedTbl,cbrVC,inc);)
559 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
560 } /* while */
561 // Move this VCI number into this location of the CBR Sched table.
562 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
563 dev->CbrRemEntries--;
564 toBeAssigned--;
565 } /* while */
566
567 /* IaFFrednCbrEnable */
568 dev->NumEnabledCBR++;
569 if (dev->NumEnabledCBR == 1) {
570 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
571 IF_CBR(printk("CBR is enabled\n");)
572 }
573 return 0;
574}
575static void ia_cbrVc_close (struct atm_vcc *vcc) {
576 IADEV *iadev;
577 u16 *SchedTbl, NullVci = 0;
578 u32 i, NumFound;
579
580 iadev = INPH_IA_DEV(vcc->dev);
581 iadev->NumEnabledCBR--;
582 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
583 if (iadev->NumEnabledCBR == 0) {
584 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
585 IF_CBR (printk("CBR support disabled\n");)
586 }
587 NumFound = 0;
588 for (i=0; i < iadev->CbrTotEntries; i++)
589 {
590 if (*SchedTbl == vcc->vci) {
591 iadev->CbrRemEntries++;
592 *SchedTbl = NullVci;
593 IF_CBR(NumFound++;)
594 }
595 SchedTbl++;
596 }
597 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
598}
599
600static int ia_avail_descs(IADEV *iadev) {
601 int tmp = 0;
602 ia_hack_tcq(iadev);
603 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
604 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
605 else
606 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
607 iadev->ffL.tcq_st) / 2;
608 return tmp;
609}
610
611static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
612
613static int ia_que_tx (IADEV *iadev) {
614 struct sk_buff *skb;
615 int num_desc;
616 struct atm_vcc *vcc;
617 num_desc = ia_avail_descs(iadev);
618
619 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
620 if (!(vcc = ATM_SKB(skb)->vcc)) {
621 dev_kfree_skb_any(skb);
622 printk("ia_que_tx: Null vcc\n");
623 break;
624 }
625 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
626 dev_kfree_skb_any(skb);
627 printk("Free the SKB on closed vci %d \n", vcc->vci);
628 break;
629 }
630 if (ia_pkt_tx (vcc, skb)) {
631 skb_queue_head(&iadev->tx_backlog, skb);
632 }
633 num_desc--;
634 }
635 return 0;
636}
637
638static void ia_tx_poll (IADEV *iadev) {
639 struct atm_vcc *vcc = NULL;
640 struct sk_buff *skb = NULL, *skb1 = NULL;
641 struct ia_vcc *iavcc;
642 IARTN_Q * rtne;
643
644 ia_hack_tcq(iadev);
645 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
646 skb = rtne->data.txskb;
647 if (!skb) {
648 printk("ia_tx_poll: skb is null\n");
649 goto out;
650 }
651 vcc = ATM_SKB(skb)->vcc;
652 if (!vcc) {
653 printk("ia_tx_poll: vcc is null\n");
654 dev_kfree_skb_any(skb);
655 goto out;
656 }
657
658 iavcc = INPH_IA_VCC(vcc);
659 if (!iavcc) {
660 printk("ia_tx_poll: iavcc is null\n");
661 dev_kfree_skb_any(skb);
662 goto out;
663 }
664
665 skb1 = skb_dequeue(&iavcc->txing_skb);
666 while (skb1 && (skb1 != skb)) {
667 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
668 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
669 }
670 IF_ERR(printk("Release the SKB not match\n");)
671 if ((vcc->pop) && (skb1->len != 0))
672 {
673 vcc->pop(vcc, skb1);
674 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
675 (long)skb1);)
676 }
677 else
678 dev_kfree_skb_any(skb1);
679 skb1 = skb_dequeue(&iavcc->txing_skb);
680 }
681 if (!skb1) {
682 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
683 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
684 break;
685 }
686 if ((vcc->pop) && (skb->len != 0))
687 {
688 vcc->pop(vcc, skb);
689 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
690 }
691 else
692 dev_kfree_skb_any(skb);
693 kfree(rtne);
694 }
695 ia_que_tx(iadev);
696out:
697 return;
698}
699#if 0
700static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
701{
702 u32 t;
703 int i;
704 /*
705 * Issue a command to enable writes to the NOVRAM
706 */
707 NVRAM_CMD (EXTEND + EWEN);
708 NVRAM_CLR_CE;
709 /*
710 * issue the write command
711 */
712 NVRAM_CMD(IAWRITE + addr);
713 /*
714 * Send the data, starting with D15, then D14, and so on for 16 bits
715 */
716 for (i=15; i>=0; i--) {
717 NVRAM_CLKOUT (val & 0x8000);
718 val <<= 1;
719 }
720 NVRAM_CLR_CE;
721 CFG_OR(NVCE);
722 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
723 while (!(t & NVDO))
724 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
725
726 NVRAM_CLR_CE;
727 /*
728 * disable writes again
729 */
730 NVRAM_CMD(EXTEND + EWDS)
731 NVRAM_CLR_CE;
732 CFG_AND(~NVDI);
733}
734#endif
735
736static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
737{
738 u_short val;
739 u32 t;
740 int i;
741 /*
742 * Read the first bit that was clocked with the falling edge of the
743 * the last command data clock
744 */
745 NVRAM_CMD(IAREAD + addr);
746 /*
747 * Now read the rest of the bits, the next bit read is D14, then D13,
748 * and so on.
749 */
750 val = 0;
751 for (i=15; i>=0; i--) {
752 NVRAM_CLKIN(t);
753 val |= (t << i);
754 }
755 NVRAM_CLR_CE;
756 CFG_AND(~NVDI);
757 return val;
758}
759
760static void ia_hw_type(IADEV *iadev) {
761 u_short memType = ia_eeprom_get(iadev, 25);
762 iadev->memType = memType;
763 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
764 iadev->num_tx_desc = IA_TX_BUF;
765 iadev->tx_buf_sz = IA_TX_BUF_SZ;
766 iadev->num_rx_desc = IA_RX_BUF;
767 iadev->rx_buf_sz = IA_RX_BUF_SZ;
768 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
769 if (IA_TX_BUF == DFL_TX_BUFFERS)
770 iadev->num_tx_desc = IA_TX_BUF / 2;
771 else
772 iadev->num_tx_desc = IA_TX_BUF;
773 iadev->tx_buf_sz = IA_TX_BUF_SZ;
774 if (IA_RX_BUF == DFL_RX_BUFFERS)
775 iadev->num_rx_desc = IA_RX_BUF / 2;
776 else
777 iadev->num_rx_desc = IA_RX_BUF;
778 iadev->rx_buf_sz = IA_RX_BUF_SZ;
779 }
780 else {
781 if (IA_TX_BUF == DFL_TX_BUFFERS)
782 iadev->num_tx_desc = IA_TX_BUF / 8;
783 else
784 iadev->num_tx_desc = IA_TX_BUF;
785 iadev->tx_buf_sz = IA_TX_BUF_SZ;
786 if (IA_RX_BUF == DFL_RX_BUFFERS)
787 iadev->num_rx_desc = IA_RX_BUF / 8;
788 else
789 iadev->num_rx_desc = IA_RX_BUF;
790 iadev->rx_buf_sz = IA_RX_BUF_SZ;
791 }
792 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
793 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
794 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
795 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
796
797#if 0
798 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
799 iadev->phy_type = PHY_OC3C_S;
800 else if ((memType & FE_MASK) == FE_UTP_OPTION)
801 iadev->phy_type = PHY_UTP155;
802 else
803 iadev->phy_type = PHY_OC3C_M;
804#endif
805
806 iadev->phy_type = memType & FE_MASK;
807 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
808 memType,iadev->phy_type);)
809 if (iadev->phy_type == FE_25MBIT_PHY)
810 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
811 else if (iadev->phy_type == FE_DS3_PHY)
812 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
813 else if (iadev->phy_type == FE_E3_PHY)
814 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
815 else
816 iadev->LineRate = (u32)(ATM_OC3_PCR);
817 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
818
819}
820
821static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
822{
823 return readl(ia->phy + (reg >> 2));
824}
825
826static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
827{
828 writel(val, ia->phy + (reg >> 2));
829}
830
831static void ia_frontend_intr(struct iadev_priv *iadev)
832{
833 u32 status;
834
835 if (iadev->phy_type & FE_25MBIT_PHY) {
836 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
837 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
838 } else if (iadev->phy_type & FE_DS3_PHY) {
839 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
840 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
841 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
842 } else if (iadev->phy_type & FE_E3_PHY) {
843 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
844 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
845 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
846 } else {
847 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
848 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
849 }
850
851 printk(KERN_INFO "IA: SUNI carrier %s\n",
852 iadev->carrier_detect ? "detected" : "lost signal");
853}
854
855static void ia_mb25_init(struct iadev_priv *iadev)
856{
857#if 0
858 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
859#endif
860 ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
861 ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
862
863 iadev->carrier_detect =
864 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
865}
866
867struct ia_reg {
868 u16 reg;
869 u16 val;
870};
871
872static void ia_phy_write(struct iadev_priv *iadev,
873 const struct ia_reg *regs, int len)
874{
875 while (len--) {
876 ia_phy_write32(iadev, regs->reg, regs->val);
877 regs++;
878 }
879}
880
881static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
882{
883 static const struct ia_reg suni_ds3_init [] = {
884 { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
885 { SUNI_DS3_FRM_CFG, 0x01 },
886 { SUNI_DS3_TRAN_CFG, 0x01 },
887 { SUNI_CONFIG, 0 },
888 { SUNI_SPLR_CFG, 0 },
889 { SUNI_SPLT_CFG, 0 }
890 };
891 u32 status;
892
893 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
894 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
895
896 ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
897}
898
899static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
900{
901 static const struct ia_reg suni_e3_init [] = {
902 { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
903 { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
904 { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
905 { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
906 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
907 { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
908 { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
909 { SUNI_SPLR_CFG, 0x41 },
910 { SUNI_SPLT_CFG, 0x41 }
911 };
912 u32 status;
913
914 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
915 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
916 ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
917}
918
919static void ia_suni_pm7345_init(struct iadev_priv *iadev)
920{
921 static const struct ia_reg suni_init [] = {
922 /* Enable RSOP loss of signal interrupt. */
923 { SUNI_INTR_ENBL, 0x28 },
924 /* Clear error counters. */
925 { SUNI_ID_RESET, 0 },
926 /* Clear "PMCTST" in master test register. */
927 { SUNI_MASTER_TEST, 0 },
928
929 { SUNI_RXCP_CTRL, 0x2c },
930 { SUNI_RXCP_FCTRL, 0x81 },
931
932 { SUNI_RXCP_IDLE_PAT_H1, 0 },
933 { SUNI_RXCP_IDLE_PAT_H2, 0 },
934 { SUNI_RXCP_IDLE_PAT_H3, 0 },
935 { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
936
937 { SUNI_RXCP_IDLE_MASK_H1, 0xff },
938 { SUNI_RXCP_IDLE_MASK_H2, 0xff },
939 { SUNI_RXCP_IDLE_MASK_H3, 0xff },
940 { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
941
942 { SUNI_RXCP_CELL_PAT_H1, 0 },
943 { SUNI_RXCP_CELL_PAT_H2, 0 },
944 { SUNI_RXCP_CELL_PAT_H3, 0 },
945 { SUNI_RXCP_CELL_PAT_H4, 0x01 },
946
947 { SUNI_RXCP_CELL_MASK_H1, 0xff },
948 { SUNI_RXCP_CELL_MASK_H2, 0xff },
949 { SUNI_RXCP_CELL_MASK_H3, 0xff },
950 { SUNI_RXCP_CELL_MASK_H4, 0xff },
951
952 { SUNI_TXCP_CTRL, 0xa4 },
953 { SUNI_TXCP_INTR_EN_STS, 0x10 },
954 { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
955 };
956
957 if (iadev->phy_type & FE_DS3_PHY)
958 ia_suni_pm7345_init_ds3(iadev);
959 else
960 ia_suni_pm7345_init_e3(iadev);
961
962 ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
963
964 ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
965 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
966 SUNI_PM7345_DLB | SUNI_PM7345_PLB));
967#ifdef __SNMP__
968 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
969#endif /* __SNMP__ */
970 return;
971}
972
973
974/***************************** IA_LIB END *****************************/
975
976#ifdef CONFIG_ATM_IA_DEBUG
977static int tcnter = 0;
978static void xdump( u_char* cp, int length, char* prefix )
979{
980 int col, count;
981 u_char prntBuf[120];
982 u_char* pBuf = prntBuf;
983 count = 0;
984 while(count < length){
985 pBuf += sprintf( pBuf, "%s", prefix );
986 for(col = 0;count + col < length && col < 16; col++){
987 if (col != 0 && (col % 4) == 0)
988 pBuf += sprintf( pBuf, " " );
989 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
990 }
991 while(col++ < 16){ /* pad end of buffer with blanks */
992 if ((col % 4) == 0)
993 sprintf( pBuf, " " );
994 pBuf += sprintf( pBuf, " " );
995 }
996 pBuf += sprintf( pBuf, " " );
997 for(col = 0;count + col < length && col < 16; col++){
998 if (isprint((int)cp[count + col]))
999 pBuf += sprintf( pBuf, "%c", cp[count + col] );
1000 else
1001 pBuf += sprintf( pBuf, "." );
1002 }
1003 printk("%s\n", prntBuf);
1004 count += col;
1005 pBuf = prntBuf;
1006 }
1007
1008} /* close xdump(... */
1009#endif /* CONFIG_ATM_IA_DEBUG */
1010
1011
1012static struct atm_dev *ia_boards = NULL;
1013
1014#define ACTUAL_RAM_BASE \
1015 RAM_BASE*((iadev->mem)/(128 * 1024))
1016#define ACTUAL_SEG_RAM_BASE \
1017 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1018#define ACTUAL_REASS_RAM_BASE \
1019 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1020
1021
1022/*-- some utilities and memory allocation stuff will come here -------------*/
1023
1024static void desc_dbg(IADEV *iadev) {
1025
1026 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1027 u32 i;
1028 void __iomem *tmp;
1029 // regval = readl((u32)ia_cmds->maddr);
1030 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1031 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1032 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1033 readw(iadev->seg_ram+tcq_wr_ptr-2));
1034 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1035 iadev->ffL.tcq_rd);
1036 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1037 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1038 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1039 i = 0;
1040 while (tcq_st_ptr != tcq_ed_ptr) {
1041 tmp = iadev->seg_ram+tcq_st_ptr;
1042 printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
1043 tcq_st_ptr += 2;
1044 }
1045 for(i=0; i <iadev->num_tx_desc; i++)
1046 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1047}
1048
1049
1050/*----------------------------- Receiving side stuff --------------------------*/
1051
1052static void rx_excp_rcvd(struct atm_dev *dev)
1053{
1054#if 0 /* closing the receiving size will cause too many excp int */
1055 IADEV *iadev;
1056 u_short state;
1057 u_short excpq_rd_ptr;
1058 //u_short *ptr;
1059 int vci, error = 1;
1060 iadev = INPH_IA_DEV(dev);
1061 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1062 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1063 { printk("state = %x \n", state);
1064 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1065 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1066 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1067 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1068 // TODO: update exception stat
1069 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1070 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1071 // pwang_test
1072 excpq_rd_ptr += 4;
1073 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1074 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1075 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1076 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1077 }
1078#endif
1079}
1080
1081static void free_desc(struct atm_dev *dev, int desc)
1082{
1083 IADEV *iadev;
1084 iadev = INPH_IA_DEV(dev);
1085 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1086 iadev->rfL.fdq_wr +=2;
1087 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1088 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1089 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1090}
1091
1092
1093static int rx_pkt(struct atm_dev *dev)
1094{
1095 IADEV *iadev;
1096 struct atm_vcc *vcc;
1097 unsigned short status;
1098 struct rx_buf_desc __iomem *buf_desc_ptr;
1099 int desc;
1100 struct dle* wr_ptr;
1101 int len;
1102 struct sk_buff *skb;
1103 u_int buf_addr, dma_addr;
1104
1105 iadev = INPH_IA_DEV(dev);
1106 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1107 {
1108 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1109 return -EINVAL;
1110 }
1111 /* mask 1st 3 bits to get the actual descno. */
1112 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1113 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1114 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1115 printk(" pcq_wr_ptr = 0x%x\n",
1116 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1117 /* update the read pointer - maybe we shud do this in the end*/
1118 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1119 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1120 else
1121 iadev->rfL.pcq_rd += 2;
1122 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1123
1124 /* get the buffer desc entry.
1125 update stuff. - doesn't seem to be any update necessary
1126 */
1127 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1128 /* make the ptr point to the corresponding buffer desc entry */
1129 buf_desc_ptr += desc;
1130 if (!desc || (desc > iadev->num_rx_desc) ||
1131 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1132 free_desc(dev, desc);
1133 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1134 return -1;
1135 }
1136 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1137 if (!vcc)
1138 {
1139 free_desc(dev, desc);
1140 printk("IA: null vcc, drop PDU\n");
1141 return -1;
1142 }
1143
1144
1145 /* might want to check the status bits for errors */
1146 status = (u_short) (buf_desc_ptr->desc_mode);
1147 if (status & (RX_CER | RX_PTE | RX_OFL))
1148 {
1149 atomic_inc(&vcc->stats->rx_err);
1150 IF_ERR(printk("IA: bad packet, dropping it");)
1151 if (status & RX_CER) {
1152 IF_ERR(printk(" cause: packet CRC error\n");)
1153 }
1154 else if (status & RX_PTE) {
1155 IF_ERR(printk(" cause: packet time out\n");)
1156 }
1157 else {
1158 IF_ERR(printk(" cause: buffer overflow\n");)
1159 }
1160 goto out_free_desc;
1161 }
1162
1163 /*
1164 build DLE.
1165 */
1166
1167 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1168 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1169 len = dma_addr - buf_addr;
1170 if (len > iadev->rx_buf_sz) {
1171 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1172 atomic_inc(&vcc->stats->rx_err);
1173 goto out_free_desc;
1174 }
1175
1176 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1177 if (vcc->vci < 32)
1178 printk("Drop control packets\n");
1179 goto out_free_desc;
1180 }
1181 skb_put(skb,len);
1182 // pwang_test
1183 ATM_SKB(skb)->vcc = vcc;
1184 ATM_DESC(skb) = desc;
1185 skb_queue_tail(&iadev->rx_dma_q, skb);
1186
1187 /* Build the DLE structure */
1188 wr_ptr = iadev->rx_dle_q.write;
1189 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1190 len, DMA_FROM_DEVICE);
1191 wr_ptr->local_pkt_addr = buf_addr;
1192 wr_ptr->bytes = len; /* We don't know this do we ?? */
1193 wr_ptr->mode = DMA_INT_ENABLE;
1194
1195 /* shud take care of wrap around here too. */
1196 if(++wr_ptr == iadev->rx_dle_q.end)
1197 wr_ptr = iadev->rx_dle_q.start;
1198 iadev->rx_dle_q.write = wr_ptr;
1199 udelay(1);
1200 /* Increment transaction counter */
1201 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1202out: return 0;
1203out_free_desc:
1204 free_desc(dev, desc);
1205 goto out;
1206}
1207
1208static void rx_intr(struct atm_dev *dev)
1209{
1210 IADEV *iadev;
1211 u_short status;
1212 u_short state, i;
1213
1214 iadev = INPH_IA_DEV(dev);
1215 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1216 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1217 if (status & RX_PKT_RCVD)
1218 {
1219 /* do something */
1220 /* Basically recvd an interrupt for receiving a packet.
1221 A descriptor would have been written to the packet complete
1222 queue. Get all the descriptors and set up dma to move the
1223 packets till the packet complete queue is empty..
1224 */
1225 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1226 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1227 while(!(state & PCQ_EMPTY))
1228 {
1229 rx_pkt(dev);
1230 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1231 }
1232 iadev->rxing = 1;
1233 }
1234 if (status & RX_FREEQ_EMPT)
1235 {
1236 if (iadev->rxing) {
1237 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1238 iadev->rx_tmp_jif = jiffies;
1239 iadev->rxing = 0;
1240 }
1241 else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1242 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1243 for (i = 1; i <= iadev->num_rx_desc; i++)
1244 free_desc(dev, i);
1245printk("Test logic RUN!!!!\n");
1246 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1247 iadev->rxing = 1;
1248 }
1249 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1250 }
1251
1252 if (status & RX_EXCP_RCVD)
1253 {
1254 /* probably need to handle the exception queue also. */
1255 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1256 rx_excp_rcvd(dev);
1257 }
1258
1259
1260 if (status & RX_RAW_RCVD)
1261 {
1262 /* need to handle the raw incoming cells. This deepnds on
1263 whether we have programmed to receive the raw cells or not.
1264 Else ignore. */
1265 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1266 }
1267}
1268
1269
1270static void rx_dle_intr(struct atm_dev *dev)
1271{
1272 IADEV *iadev;
1273 struct atm_vcc *vcc;
1274 struct sk_buff *skb;
1275 int desc;
1276 u_short state;
1277 struct dle *dle, *cur_dle;
1278 u_int dle_lp;
1279 int len;
1280 iadev = INPH_IA_DEV(dev);
1281
1282 /* free all the dles done, that is just update our own dle read pointer
1283 - do we really need to do this. Think not. */
1284 /* DMA is done, just get all the recevie buffers from the rx dma queue
1285 and push them up to the higher layer protocol. Also free the desc
1286 associated with the buffer. */
1287 dle = iadev->rx_dle_q.read;
1288 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1289 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1290 while(dle != cur_dle)
1291 {
1292 /* free the DMAed skb */
1293 skb = skb_dequeue(&iadev->rx_dma_q);
1294 if (!skb)
1295 goto INCR_DLE;
1296 desc = ATM_DESC(skb);
1297 free_desc(dev, desc);
1298
1299 if (!(len = skb->len))
1300 {
1301 printk("rx_dle_intr: skb len 0\n");
1302 dev_kfree_skb_any(skb);
1303 }
1304 else
1305 {
1306 struct cpcs_trailer *trailer;
1307 u_short length;
1308 struct ia_vcc *ia_vcc;
1309
1310 dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1311 len, DMA_FROM_DEVICE);
1312 /* no VCC related housekeeping done as yet. lets see */
1313 vcc = ATM_SKB(skb)->vcc;
1314 if (!vcc) {
1315 printk("IA: null vcc\n");
1316 dev_kfree_skb_any(skb);
1317 goto INCR_DLE;
1318 }
1319 ia_vcc = INPH_IA_VCC(vcc);
1320 if (ia_vcc == NULL)
1321 {
1322 atomic_inc(&vcc->stats->rx_err);
1323 atm_return(vcc, skb->truesize);
1324 dev_kfree_skb_any(skb);
1325 goto INCR_DLE;
1326 }
1327 // get real pkt length pwang_test
1328 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1329 skb->len - sizeof(*trailer));
1330 length = swap_byte_order(trailer->length);
1331 if ((length > iadev->rx_buf_sz) || (length >
1332 (skb->len - sizeof(struct cpcs_trailer))))
1333 {
1334 atomic_inc(&vcc->stats->rx_err);
1335 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1336 length, skb->len);)
1337 atm_return(vcc, skb->truesize);
1338 dev_kfree_skb_any(skb);
1339 goto INCR_DLE;
1340 }
1341 skb_trim(skb, length);
1342
1343 /* Display the packet */
1344 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1345 xdump(skb->data, skb->len, "RX: ");
1346 printk("\n");)
1347
1348 IF_RX(printk("rx_dle_intr: skb push");)
1349 vcc->push(vcc,skb);
1350 atomic_inc(&vcc->stats->rx);
1351 iadev->rx_pkt_cnt++;
1352 }
1353INCR_DLE:
1354 if (++dle == iadev->rx_dle_q.end)
1355 dle = iadev->rx_dle_q.start;
1356 }
1357 iadev->rx_dle_q.read = dle;
1358
1359 /* if the interrupts are masked because there were no free desc available,
1360 unmask them now. */
1361 if (!iadev->rxing) {
1362 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1363 if (!(state & FREEQ_EMPTY)) {
1364 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1365 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1366 iadev->reass_reg+REASS_MASK_REG);
1367 iadev->rxing++;
1368 }
1369 }
1370}
1371
1372
1373static int open_rx(struct atm_vcc *vcc)
1374{
1375 IADEV *iadev;
1376 u_short __iomem *vc_table;
1377 u_short __iomem *reass_ptr;
1378 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1379
1380 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1381 iadev = INPH_IA_DEV(vcc->dev);
1382 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1383 if (iadev->phy_type & FE_25MBIT_PHY) {
1384 printk("IA: ABR not support\n");
1385 return -EINVAL;
1386 }
1387 }
1388 /* Make only this VCI in the vc table valid and let all
1389 others be invalid entries */
1390 vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1391 vc_table += vcc->vci;
1392 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1393
1394 *vc_table = vcc->vci << 6;
1395 /* Also keep a list of open rx vcs so that we can attach them with
1396 incoming PDUs later. */
1397 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1398 (vcc->qos.txtp.traffic_class == ATM_ABR))
1399 {
1400 srv_cls_param_t srv_p;
1401 init_abr_vc(iadev, &srv_p);
1402 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1403 }
1404 else { /* for UBR later may need to add CBR logic */
1405 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1406 reass_ptr += vcc->vci;
1407 *reass_ptr = NO_AAL5_PKT;
1408 }
1409
1410 if (iadev->rx_open[vcc->vci])
1411 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1412 vcc->dev->number, vcc->vci);
1413 iadev->rx_open[vcc->vci] = vcc;
1414 return 0;
1415}
1416
1417static int rx_init(struct atm_dev *dev)
1418{
1419 IADEV *iadev;
1420 struct rx_buf_desc __iomem *buf_desc_ptr;
1421 unsigned long rx_pkt_start = 0;
1422 void *dle_addr;
1423 struct abr_vc_table *abr_vc_table;
1424 u16 *vc_table;
1425 u16 *reass_table;
1426 int i,j, vcsize_sel;
1427 u_short freeq_st_adr;
1428 u_short *freeq_start;
1429
1430 iadev = INPH_IA_DEV(dev);
1431 // spin_lock_init(&iadev->rx_lock);
1432
1433 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1434 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1435 &iadev->rx_dle_dma, GFP_KERNEL);
1436 if (!dle_addr) {
1437 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1438 goto err_out;
1439 }
1440 iadev->rx_dle_q.start = (struct dle *)dle_addr;
1441 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1442 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1443 iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1444 /* the end of the dle q points to the entry after the last
1445 DLE that can be used. */
1446
1447 /* write the upper 20 bits of the start address to rx list address register */
1448 /* We know this is 32bit bus addressed so the following is safe */
1449 writel(iadev->rx_dle_dma & 0xfffff000,
1450 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1451 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1452 iadev->dma+IPHASE5575_TX_LIST_ADDR,
1453 readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1454 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1455 iadev->dma+IPHASE5575_RX_LIST_ADDR,
1456 readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1457
1458 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1459 writew(0, iadev->reass_reg+MODE_REG);
1460 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1461
1462 /* Receive side control memory map
1463 -------------------------------
1464
1465 Buffer descr 0x0000 (736 - 23K)
1466 VP Table 0x5c00 (256 - 512)
1467 Except q 0x5e00 (128 - 512)
1468 Free buffer q 0x6000 (1K - 2K)
1469 Packet comp q 0x6800 (1K - 2K)
1470 Reass Table 0x7000 (1K - 2K)
1471 VC Table 0x7800 (1K - 2K)
1472 ABR VC Table 0x8000 (1K - 32K)
1473 */
1474
1475 /* Base address for Buffer Descriptor Table */
1476 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1477 /* Set the buffer size register */
1478 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1479
1480 /* Initialize each entry in the Buffer Descriptor Table */
1481 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1482 buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1483 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1484 buf_desc_ptr++;
1485 rx_pkt_start = iadev->rx_pkt_ram;
1486 for(i=1; i<=iadev->num_rx_desc; i++)
1487 {
1488 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1489 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1490 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1491 buf_desc_ptr++;
1492 rx_pkt_start += iadev->rx_buf_sz;
1493 }
1494 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1495 i = FREE_BUF_DESC_Q*iadev->memSize;
1496 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1497 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1498 writew(i+iadev->num_rx_desc*sizeof(u_short),
1499 iadev->reass_reg+FREEQ_ED_ADR);
1500 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1501 writew(i+iadev->num_rx_desc*sizeof(u_short),
1502 iadev->reass_reg+FREEQ_WR_PTR);
1503 /* Fill the FREEQ with all the free descriptors. */
1504 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1505 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1506 for(i=1; i<=iadev->num_rx_desc; i++)
1507 {
1508 *freeq_start = (u_short)i;
1509 freeq_start++;
1510 }
1511 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1512 /* Packet Complete Queue */
1513 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1514 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1515 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1516 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1517 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1518
1519 /* Exception Queue */
1520 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1521 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1522 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1523 iadev->reass_reg+EXCP_Q_ED_ADR);
1524 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1525 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1526
1527 /* Load local copy of FREEQ and PCQ ptrs */
1528 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1529 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1530 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1531 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1532 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1533 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1534 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1535 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1536
1537 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1538 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1539 iadev->rfL.pcq_wr);)
1540 /* just for check - no VP TBL */
1541 /* VP Table */
1542 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1543 /* initialize VP Table for invalid VPIs
1544 - I guess we can write all 1s or 0x000f in the entire memory
1545 space or something similar.
1546 */
1547
1548 /* This seems to work and looks right to me too !!! */
1549 i = REASS_TABLE * iadev->memSize;
1550 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1551 /* initialize Reassembly table to I don't know what ???? */
1552 reass_table = (u16 *)(iadev->reass_ram+i);
1553 j = REASS_TABLE_SZ * iadev->memSize;
1554 for(i=0; i < j; i++)
1555 *reass_table++ = NO_AAL5_PKT;
1556 i = 8*1024;
1557 vcsize_sel = 0;
1558 while (i != iadev->num_vc) {
1559 i /= 2;
1560 vcsize_sel++;
1561 }
1562 i = RX_VC_TABLE * iadev->memSize;
1563 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1564 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1565 j = RX_VC_TABLE_SZ * iadev->memSize;
1566 for(i = 0; i < j; i++)
1567 {
1568 /* shift the reassembly pointer by 3 + lower 3 bits of
1569 vc_lkup_base register (=3 for 1K VCs) and the last byte
1570 is those low 3 bits.
1571 Shall program this later.
1572 */
1573 *vc_table = (i << 6) | 15; /* for invalid VCI */
1574 vc_table++;
1575 }
1576 /* ABR VC table */
1577 i = ABR_VC_TABLE * iadev->memSize;
1578 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1579
1580 i = ABR_VC_TABLE * iadev->memSize;
1581 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1582 j = REASS_TABLE_SZ * iadev->memSize;
1583 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1584 for(i = 0; i < j; i++) {
1585 abr_vc_table->rdf = 0x0003;
1586 abr_vc_table->air = 0x5eb1;
1587 abr_vc_table++;
1588 }
1589
1590 /* Initialize other registers */
1591
1592 /* VP Filter Register set for VC Reassembly only */
1593 writew(0xff00, iadev->reass_reg+VP_FILTER);
1594 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1595 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1596
1597 /* Packet Timeout Count related Registers :
1598 Set packet timeout to occur in about 3 seconds
1599 Set Packet Aging Interval count register to overflow in about 4 us
1600 */
1601 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1602
1603 i = (j >> 6) & 0xFF;
1604 j += 2 * (j - 1);
1605 i |= ((j << 2) & 0xFF00);
1606 writew(i, iadev->reass_reg+TMOUT_RANGE);
1607
1608 /* initiate the desc_tble */
1609 for(i=0; i<iadev->num_tx_desc;i++)
1610 iadev->desc_tbl[i].timestamp = 0;
1611
1612 /* to clear the interrupt status register - read it */
1613 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1614
1615 /* Mask Register - clear it */
1616 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1617
1618 skb_queue_head_init(&iadev->rx_dma_q);
1619 iadev->rx_free_desc_qhead = NULL;
1620
1621 iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1622 if (!iadev->rx_open) {
1623 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1624 dev->number);
1625 goto err_free_dle;
1626 }
1627
1628 iadev->rxing = 1;
1629 iadev->rx_pkt_cnt = 0;
1630 /* Mode Register */
1631 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1632 return 0;
1633
1634err_free_dle:
1635 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1636 iadev->rx_dle_dma);
1637err_out:
1638 return -ENOMEM;
1639}
1640
1641
1642/*
1643 The memory map suggested in appendix A and the coding for it.
1644 Keeping it around just in case we change our mind later.
1645
1646 Buffer descr 0x0000 (128 - 4K)
1647 UBR sched 0x1000 (1K - 4K)
1648 UBR Wait q 0x2000 (1K - 4K)
1649 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1650 (128 - 256) each
1651 extended VC 0x4000 (1K - 8K)
1652 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1653 CBR sched 0x7000 (as needed)
1654 VC table 0x8000 (1K - 32K)
1655*/
1656
1657static void tx_intr(struct atm_dev *dev)
1658{
1659 IADEV *iadev;
1660 unsigned short status;
1661 unsigned long flags;
1662
1663 iadev = INPH_IA_DEV(dev);
1664
1665 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1666 if (status & TRANSMIT_DONE){
1667
1668 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1669 spin_lock_irqsave(&iadev->tx_lock, flags);
1670 ia_tx_poll(iadev);
1671 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1672 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1673 if (iadev->close_pending)
1674 wake_up(&iadev->close_wait);
1675 }
1676 if (status & TCQ_NOT_EMPTY)
1677 {
1678 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1679 }
1680}
1681
1682static void tx_dle_intr(struct atm_dev *dev)
1683{
1684 IADEV *iadev;
1685 struct dle *dle, *cur_dle;
1686 struct sk_buff *skb;
1687 struct atm_vcc *vcc;
1688 struct ia_vcc *iavcc;
1689 u_int dle_lp;
1690 unsigned long flags;
1691
1692 iadev = INPH_IA_DEV(dev);
1693 spin_lock_irqsave(&iadev->tx_lock, flags);
1694 dle = iadev->tx_dle_q.read;
1695 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1696 (sizeof(struct dle)*DLE_ENTRIES - 1);
1697 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1698 while (dle != cur_dle)
1699 {
1700 /* free the DMAed skb */
1701 skb = skb_dequeue(&iadev->tx_dma_q);
1702 if (!skb) break;
1703
1704 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1705 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1706 dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1707 DMA_TO_DEVICE);
1708 }
1709 vcc = ATM_SKB(skb)->vcc;
1710 if (!vcc) {
1711 printk("tx_dle_intr: vcc is null\n");
1712 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1713 dev_kfree_skb_any(skb);
1714
1715 return;
1716 }
1717 iavcc = INPH_IA_VCC(vcc);
1718 if (!iavcc) {
1719 printk("tx_dle_intr: iavcc is null\n");
1720 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1721 dev_kfree_skb_any(skb);
1722 return;
1723 }
1724 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1725 if ((vcc->pop) && (skb->len != 0))
1726 {
1727 vcc->pop(vcc, skb);
1728 }
1729 else {
1730 dev_kfree_skb_any(skb);
1731 }
1732 }
1733 else { /* Hold the rate-limited skb for flow control */
1734 IA_SKB_STATE(skb) |= IA_DLED;
1735 skb_queue_tail(&iavcc->txing_skb, skb);
1736 }
1737 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1738 if (++dle == iadev->tx_dle_q.end)
1739 dle = iadev->tx_dle_q.start;
1740 }
1741 iadev->tx_dle_q.read = dle;
1742 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1743}
1744
1745static int open_tx(struct atm_vcc *vcc)
1746{
1747 struct ia_vcc *ia_vcc;
1748 IADEV *iadev;
1749 struct main_vc *vc;
1750 struct ext_vc *evc;
1751 int ret;
1752 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1753 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1754 iadev = INPH_IA_DEV(vcc->dev);
1755
1756 if (iadev->phy_type & FE_25MBIT_PHY) {
1757 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1758 printk("IA: ABR not support\n");
1759 return -EINVAL;
1760 }
1761 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1762 printk("IA: CBR not support\n");
1763 return -EINVAL;
1764 }
1765 }
1766 ia_vcc = INPH_IA_VCC(vcc);
1767 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1768 if (vcc->qos.txtp.max_sdu >
1769 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1770 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1771 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1772 vcc->dev_data = NULL;
1773 kfree(ia_vcc);
1774 return -EINVAL;
1775 }
1776 ia_vcc->vc_desc_cnt = 0;
1777 ia_vcc->txing = 1;
1778
1779 /* find pcr */
1780 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1781 vcc->qos.txtp.pcr = iadev->LineRate;
1782 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1783 vcc->qos.txtp.pcr = iadev->LineRate;
1784 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1785 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1786 if (vcc->qos.txtp.pcr > iadev->LineRate)
1787 vcc->qos.txtp.pcr = iadev->LineRate;
1788 ia_vcc->pcr = vcc->qos.txtp.pcr;
1789
1790 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1791 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1792 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1793 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1794 if (ia_vcc->pcr < iadev->rate_limit)
1795 skb_queue_head_init (&ia_vcc->txing_skb);
1796 if (ia_vcc->pcr < iadev->rate_limit) {
1797 struct sock *sk = sk_atm(vcc);
1798
1799 if (vcc->qos.txtp.max_sdu != 0) {
1800 if (ia_vcc->pcr > 60000)
1801 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1802 else if (ia_vcc->pcr > 2000)
1803 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1804 else
1805 sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1806 }
1807 else
1808 sk->sk_sndbuf = 24576;
1809 }
1810
1811 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1812 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1813 vc += vcc->vci;
1814 evc += vcc->vci;
1815 memset((caddr_t)vc, 0, sizeof(*vc));
1816 memset((caddr_t)evc, 0, sizeof(*evc));
1817
1818 /* store the most significant 4 bits of vci as the last 4 bits
1819 of first part of atm header.
1820 store the last 12 bits of vci as first 12 bits of the second
1821 part of the atm header.
1822 */
1823 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1824 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1825
1826 /* check the following for different traffic classes */
1827 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1828 {
1829 vc->type = UBR;
1830 vc->status = CRC_APPEND;
1831 vc->acr = cellrate_to_float(iadev->LineRate);
1832 if (vcc->qos.txtp.pcr > 0)
1833 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1834 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1835 vcc->qos.txtp.max_pcr,vc->acr);)
1836 }
1837 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1838 { srv_cls_param_t srv_p;
1839 IF_ABR(printk("Tx ABR VCC\n");)
1840 init_abr_vc(iadev, &srv_p);
1841 if (vcc->qos.txtp.pcr > 0)
1842 srv_p.pcr = vcc->qos.txtp.pcr;
1843 if (vcc->qos.txtp.min_pcr > 0) {
1844 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1845 if (tmpsum > iadev->LineRate)
1846 return -EBUSY;
1847 srv_p.mcr = vcc->qos.txtp.min_pcr;
1848 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1849 }
1850 else srv_p.mcr = 0;
1851 if (vcc->qos.txtp.icr)
1852 srv_p.icr = vcc->qos.txtp.icr;
1853 if (vcc->qos.txtp.tbe)
1854 srv_p.tbe = vcc->qos.txtp.tbe;
1855 if (vcc->qos.txtp.frtt)
1856 srv_p.frtt = vcc->qos.txtp.frtt;
1857 if (vcc->qos.txtp.rif)
1858 srv_p.rif = vcc->qos.txtp.rif;
1859 if (vcc->qos.txtp.rdf)
1860 srv_p.rdf = vcc->qos.txtp.rdf;
1861 if (vcc->qos.txtp.nrm_pres)
1862 srv_p.nrm = vcc->qos.txtp.nrm;
1863 if (vcc->qos.txtp.trm_pres)
1864 srv_p.trm = vcc->qos.txtp.trm;
1865 if (vcc->qos.txtp.adtf_pres)
1866 srv_p.adtf = vcc->qos.txtp.adtf;
1867 if (vcc->qos.txtp.cdf_pres)
1868 srv_p.cdf = vcc->qos.txtp.cdf;
1869 if (srv_p.icr > srv_p.pcr)
1870 srv_p.icr = srv_p.pcr;
1871 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1872 srv_p.pcr, srv_p.mcr);)
1873 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1874 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1875 if (iadev->phy_type & FE_25MBIT_PHY) {
1876 printk("IA: CBR not support\n");
1877 return -EINVAL;
1878 }
1879 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1880 IF_CBR(printk("PCR is not available\n");)
1881 return -1;
1882 }
1883 vc->type = CBR;
1884 vc->status = CRC_APPEND;
1885 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1886 return ret;
1887 }
1888 }
1889 else
1890 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1891
1892 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1893 IF_EVENT(printk("ia open_tx returning \n");)
1894 return 0;
1895}
1896
1897
1898static int tx_init(struct atm_dev *dev)
1899{
1900 IADEV *iadev;
1901 struct tx_buf_desc *buf_desc_ptr;
1902 unsigned int tx_pkt_start;
1903 void *dle_addr;
1904 int i;
1905 u_short tcq_st_adr;
1906 u_short *tcq_start;
1907 u_short prq_st_adr;
1908 u_short *prq_start;
1909 struct main_vc *vc;
1910 struct ext_vc *evc;
1911 u_short tmp16;
1912 u32 vcsize_sel;
1913
1914 iadev = INPH_IA_DEV(dev);
1915 spin_lock_init(&iadev->tx_lock);
1916
1917 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1918 readw(iadev->seg_reg+SEG_MASK_REG));)
1919
1920 /* Allocate 4k (boundary aligned) bytes */
1921 dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1922 &iadev->tx_dle_dma, GFP_KERNEL);
1923 if (!dle_addr) {
1924 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1925 goto err_out;
1926 }
1927 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1928 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1929 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1930 iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1931
1932 /* write the upper 20 bits of the start address to tx list address register */
1933 writel(iadev->tx_dle_dma & 0xfffff000,
1934 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1935 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1936 writew(0, iadev->seg_reg+MODE_REG_0);
1937 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1938 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1939 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1940 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1941
1942 /*
1943 Transmit side control memory map
1944 --------------------------------
1945 Buffer descr 0x0000 (128 - 4K)
1946 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1947 (512 - 1K) each
1948 TCQ - 4K, PRQ - 5K
1949 CBR Table 0x1800 (as needed) - 6K
1950 UBR Table 0x3000 (1K - 4K) - 12K
1951 UBR Wait queue 0x4000 (1K - 4K) - 16K
1952 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1953 ABR Tbl - 20K, ABR Wq - 22K
1954 extended VC 0x6000 (1K - 8K) - 24K
1955 VC Table 0x8000 (1K - 32K) - 32K
1956
1957 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1958 and Wait q, which can be allotted later.
1959 */
1960
1961 /* Buffer Descriptor Table Base address */
1962 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1963
1964 /* initialize each entry in the buffer descriptor table */
1965 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1966 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1967 buf_desc_ptr++;
1968 tx_pkt_start = TX_PACKET_RAM;
1969 for(i=1; i<=iadev->num_tx_desc; i++)
1970 {
1971 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1972 buf_desc_ptr->desc_mode = AAL5;
1973 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1974 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1975 buf_desc_ptr++;
1976 tx_pkt_start += iadev->tx_buf_sz;
1977 }
1978 iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1979 if (!iadev->tx_buf) {
1980 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1981 goto err_free_dle;
1982 }
1983 for (i= 0; i< iadev->num_tx_desc; i++)
1984 {
1985 struct cpcs_trailer *cpcs;
1986
1987 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1988 if(!cpcs) {
1989 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1990 goto err_free_tx_bufs;
1991 }
1992 iadev->tx_buf[i].cpcs = cpcs;
1993 iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1994 cpcs,
1995 sizeof(*cpcs),
1996 DMA_TO_DEVICE);
1997 }
1998 iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1999 sizeof(struct desc_tbl_t), GFP_KERNEL);
2000 if (!iadev->desc_tbl) {
2001 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2002 goto err_free_all_tx_bufs;
2003 }
2004
2005 /* Communication Queues base address */
2006 i = TX_COMP_Q * iadev->memSize;
2007 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2008
2009 /* Transmit Complete Queue */
2010 writew(i, iadev->seg_reg+TCQ_ST_ADR);
2011 writew(i, iadev->seg_reg+TCQ_RD_PTR);
2012 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2013 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2014 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2015 iadev->seg_reg+TCQ_ED_ADR);
2016 /* Fill the TCQ with all the free descriptors. */
2017 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2018 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2019 for(i=1; i<=iadev->num_tx_desc; i++)
2020 {
2021 *tcq_start = (u_short)i;
2022 tcq_start++;
2023 }
2024
2025 /* Packet Ready Queue */
2026 i = PKT_RDY_Q * iadev->memSize;
2027 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2028 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2029 iadev->seg_reg+PRQ_ED_ADR);
2030 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2031 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2032
2033 /* Load local copy of PRQ and TCQ ptrs */
2034 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2035 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2036 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2037
2038 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2039 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2040 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2041
2042 /* Just for safety initializing the queue to have desc 1 always */
2043 /* Fill the PRQ with all the free descriptors. */
2044 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2045 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2046 for(i=1; i<=iadev->num_tx_desc; i++)
2047 {
2048 *prq_start = (u_short)0; /* desc 1 in all entries */
2049 prq_start++;
2050 }
2051 /* CBR Table */
2052 IF_INIT(printk("Start CBR Init\n");)
2053#if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2054 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2055#else /* Charlie's logic is wrong ? */
2056 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2057 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2058 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2059#endif
2060
2061 IF_INIT(printk("value in register = 0x%x\n",
2062 readw(iadev->seg_reg+CBR_PTR_BASE));)
2063 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2064 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2065 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2066 readw(iadev->seg_reg+CBR_TAB_BEG));)
2067 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2068 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2069 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2070 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2071 iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2072 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2073 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2074 readw(iadev->seg_reg+CBR_TAB_END+1));)
2075
2076 /* Initialize the CBR Schedualing Table */
2077 memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2078 0, iadev->num_vc*6);
2079 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2080 iadev->CbrEntryPt = 0;
2081 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2082 iadev->NumEnabledCBR = 0;
2083
2084 /* UBR scheduling Table and wait queue */
2085 /* initialize all bytes of UBR scheduler table and wait queue to 0
2086 - SCHEDSZ is 1K (# of entries).
2087 - UBR Table size is 4K
2088 - UBR wait queue is 4K
2089 since the table and wait queues are contiguous, all the bytes
2090 can be initialized by one memeset.
2091 */
2092
2093 vcsize_sel = 0;
2094 i = 8*1024;
2095 while (i != iadev->num_vc) {
2096 i /= 2;
2097 vcsize_sel++;
2098 }
2099
2100 i = MAIN_VC_TABLE * iadev->memSize;
2101 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2102 i = EXT_VC_TABLE * iadev->memSize;
2103 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2104 i = UBR_SCHED_TABLE * iadev->memSize;
2105 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2106 i = UBR_WAIT_Q * iadev->memSize;
2107 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2108 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2109 0, iadev->num_vc*8);
2110 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2111 /* initialize all bytes of ABR scheduler table and wait queue to 0
2112 - SCHEDSZ is 1K (# of entries).
2113 - ABR Table size is 2K
2114 - ABR wait queue is 2K
2115 since the table and wait queues are contiguous, all the bytes
2116 can be initialized by one memeset.
2117 */
2118 i = ABR_SCHED_TABLE * iadev->memSize;
2119 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2120 i = ABR_WAIT_Q * iadev->memSize;
2121 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2122
2123 i = ABR_SCHED_TABLE*iadev->memSize;
2124 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2125 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2126 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2127 iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2128 if (!iadev->testTable) {
2129 printk("Get freepage failed\n");
2130 goto err_free_desc_tbl;
2131 }
2132 for(i=0; i<iadev->num_vc; i++)
2133 {
2134 memset((caddr_t)vc, 0, sizeof(*vc));
2135 memset((caddr_t)evc, 0, sizeof(*evc));
2136 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2137 GFP_KERNEL);
2138 if (!iadev->testTable[i])
2139 goto err_free_test_tables;
2140 iadev->testTable[i]->lastTime = 0;
2141 iadev->testTable[i]->fract = 0;
2142 iadev->testTable[i]->vc_status = VC_UBR;
2143 vc++;
2144 evc++;
2145 }
2146
2147 /* Other Initialization */
2148
2149 /* Max Rate Register */
2150 if (iadev->phy_type & FE_25MBIT_PHY) {
2151 writew(RATE25, iadev->seg_reg+MAXRATE);
2152 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2153 }
2154 else {
2155 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2156 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2157 }
2158 /* Set Idle Header Reigisters to be sure */
2159 writew(0, iadev->seg_reg+IDLEHEADHI);
2160 writew(0, iadev->seg_reg+IDLEHEADLO);
2161
2162 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2163 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2164
2165 iadev->close_pending = 0;
2166 init_waitqueue_head(&iadev->close_wait);
2167 init_waitqueue_head(&iadev->timeout_wait);
2168 skb_queue_head_init(&iadev->tx_dma_q);
2169 ia_init_rtn_q(&iadev->tx_return_q);
2170
2171 /* RM Cell Protocol ID and Message Type */
2172 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2173 skb_queue_head_init (&iadev->tx_backlog);
2174
2175 /* Mode Register 1 */
2176 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2177
2178 /* Mode Register 0 */
2179 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2180
2181 /* Interrupt Status Register - read to clear */
2182 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2183
2184 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2185 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2186 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2187 iadev->tx_pkt_cnt = 0;
2188 iadev->rate_limit = iadev->LineRate / 3;
2189
2190 return 0;
2191
2192err_free_test_tables:
2193 while (--i >= 0)
2194 kfree(iadev->testTable[i]);
2195 kfree(iadev->testTable);
2196err_free_desc_tbl:
2197 kfree(iadev->desc_tbl);
2198err_free_all_tx_bufs:
2199 i = iadev->num_tx_desc;
2200err_free_tx_bufs:
2201 while (--i >= 0) {
2202 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2203
2204 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2205 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2206 kfree(desc->cpcs);
2207 }
2208 kfree(iadev->tx_buf);
2209err_free_dle:
2210 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2211 iadev->tx_dle_dma);
2212err_out:
2213 return -ENOMEM;
2214}
2215
2216static irqreturn_t ia_int(int irq, void *dev_id)
2217{
2218 struct atm_dev *dev;
2219 IADEV *iadev;
2220 unsigned int status;
2221 int handled = 0;
2222
2223 dev = dev_id;
2224 iadev = INPH_IA_DEV(dev);
2225 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2226 {
2227 handled = 1;
2228 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2229 if (status & STAT_REASSINT)
2230 {
2231 /* do something */
2232 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2233 rx_intr(dev);
2234 }
2235 if (status & STAT_DLERINT)
2236 {
2237 /* Clear this bit by writing a 1 to it. */
2238 writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2239 rx_dle_intr(dev);
2240 }
2241 if (status & STAT_SEGINT)
2242 {
2243 /* do something */
2244 IF_EVENT(printk("IA: tx_intr \n");)
2245 tx_intr(dev);
2246 }
2247 if (status & STAT_DLETINT)
2248 {
2249 writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2250 tx_dle_intr(dev);
2251 }
2252 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2253 {
2254 if (status & STAT_FEINT)
2255 ia_frontend_intr(iadev);
2256 }
2257 }
2258 return IRQ_RETVAL(handled);
2259}
2260
2261
2262
2263/*----------------------------- entries --------------------------------*/
2264static int get_esi(struct atm_dev *dev)
2265{
2266 IADEV *iadev;
2267 int i;
2268 u32 mac1;
2269 u16 mac2;
2270
2271 iadev = INPH_IA_DEV(dev);
2272 mac1 = cpu_to_be32(le32_to_cpu(readl(
2273 iadev->reg+IPHASE5575_MAC1)));
2274 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2275 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2276 for (i=0; i<MAC1_LEN; i++)
2277 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2278
2279 for (i=0; i<MAC2_LEN; i++)
2280 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2281 return 0;
2282}
2283
2284static int reset_sar(struct atm_dev *dev)
2285{
2286 IADEV *iadev;
2287 int i, error = 1;
2288 unsigned int pci[64];
2289
2290 iadev = INPH_IA_DEV(dev);
2291 for(i=0; i<64; i++)
2292 if ((error = pci_read_config_dword(iadev->pci,
2293 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2294 return error;
2295 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2296 for(i=0; i<64; i++)
2297 if ((error = pci_write_config_dword(iadev->pci,
2298 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2299 return error;
2300 udelay(5);
2301 return 0;
2302}
2303
2304
2305static int ia_init(struct atm_dev *dev)
2306{
2307 IADEV *iadev;
2308 unsigned long real_base;
2309 void __iomem *base;
2310 unsigned short command;
2311 int error, i;
2312
2313 /* The device has been identified and registered. Now we read
2314 necessary configuration info like memory base address,
2315 interrupt number etc */
2316
2317 IF_INIT(printk(">ia_init\n");)
2318 dev->ci_range.vpi_bits = 0;
2319 dev->ci_range.vci_bits = NR_VCI_LD;
2320
2321 iadev = INPH_IA_DEV(dev);
2322 real_base = pci_resource_start (iadev->pci, 0);
2323 iadev->irq = iadev->pci->irq;
2324
2325 error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2326 if (error) {
2327 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2328 dev->number,error);
2329 return -EINVAL;
2330 }
2331 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2332 dev->number, iadev->pci->revision, real_base, iadev->irq);)
2333
2334 /* find mapping size of board */
2335
2336 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2337
2338 if (iadev->pci_map_size == 0x100000){
2339 iadev->num_vc = 4096;
2340 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2341 iadev->memSize = 4;
2342 }
2343 else if (iadev->pci_map_size == 0x40000) {
2344 iadev->num_vc = 1024;
2345 iadev->memSize = 1;
2346 }
2347 else {
2348 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2349 return -EINVAL;
2350 }
2351 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2352
2353 /* enable bus mastering */
2354 pci_set_master(iadev->pci);
2355
2356 /*
2357 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2358 */
2359 udelay(10);
2360
2361 /* mapping the physical address to a virtual address in address space */
2362 base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2363
2364 if (!base)
2365 {
2366 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2367 dev->number);
2368 return -ENOMEM;
2369 }
2370 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2371 dev->number, iadev->pci->revision, base, iadev->irq);)
2372
2373 /* filling the iphase dev structure */
2374 iadev->mem = iadev->pci_map_size /2;
2375 iadev->real_base = real_base;
2376 iadev->base = base;
2377
2378 /* Bus Interface Control Registers */
2379 iadev->reg = base + REG_BASE;
2380 /* Segmentation Control Registers */
2381 iadev->seg_reg = base + SEG_BASE;
2382 /* Reassembly Control Registers */
2383 iadev->reass_reg = base + REASS_BASE;
2384 /* Front end/ DMA control registers */
2385 iadev->phy = base + PHY_BASE;
2386 iadev->dma = base + PHY_BASE;
2387 /* RAM - Segmentation RAm and Reassembly RAM */
2388 iadev->ram = base + ACTUAL_RAM_BASE;
2389 iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2390 iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2391
2392 /* lets print out the above */
2393 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2394 iadev->reg,iadev->seg_reg,iadev->reass_reg,
2395 iadev->phy, iadev->ram, iadev->seg_ram,
2396 iadev->reass_ram);)
2397
2398 /* lets try reading the MAC address */
2399 error = get_esi(dev);
2400 if (error) {
2401 iounmap(iadev->base);
2402 return error;
2403 }
2404 printk("IA: ");
2405 for (i=0; i < ESI_LEN; i++)
2406 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2407 printk("\n");
2408
2409 /* reset SAR */
2410 if (reset_sar(dev)) {
2411 iounmap(iadev->base);
2412 printk("IA: reset SAR fail, please try again\n");
2413 return 1;
2414 }
2415 return 0;
2416}
2417
2418static void ia_update_stats(IADEV *iadev) {
2419 if (!iadev->carrier_detect)
2420 return;
2421 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2422 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2423 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2424 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2425 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2426 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2427 return;
2428}
2429
2430static void ia_led_timer(unsigned long arg) {
2431 unsigned long flags;
2432 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2433 u_char i;
2434 static u32 ctrl_reg;
2435 for (i = 0; i < iadev_count; i++) {
2436 if (ia_dev[i]) {
2437 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2438 if (blinking[i] == 0) {
2439 blinking[i]++;
2440 ctrl_reg &= (~CTRL_LED);
2441 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2442 ia_update_stats(ia_dev[i]);
2443 }
2444 else {
2445 blinking[i] = 0;
2446 ctrl_reg |= CTRL_LED;
2447 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2448 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2449 if (ia_dev[i]->close_pending)
2450 wake_up(&ia_dev[i]->close_wait);
2451 ia_tx_poll(ia_dev[i]);
2452 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2453 }
2454 }
2455 }
2456 mod_timer(&ia_timer, jiffies + HZ / 4);
2457 return;
2458}
2459
2460static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2461 unsigned long addr)
2462{
2463 writel(value, INPH_IA_DEV(dev)->phy+addr);
2464}
2465
2466static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2467{
2468 return readl(INPH_IA_DEV(dev)->phy+addr);
2469}
2470
2471static void ia_free_tx(IADEV *iadev)
2472{
2473 int i;
2474
2475 kfree(iadev->desc_tbl);
2476 for (i = 0; i < iadev->num_vc; i++)
2477 kfree(iadev->testTable[i]);
2478 kfree(iadev->testTable);
2479 for (i = 0; i < iadev->num_tx_desc; i++) {
2480 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2481
2482 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2483 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2484 kfree(desc->cpcs);
2485 }
2486 kfree(iadev->tx_buf);
2487 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2488 iadev->tx_dle_dma);
2489}
2490
2491static void ia_free_rx(IADEV *iadev)
2492{
2493 kfree(iadev->rx_open);
2494 dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2495 iadev->rx_dle_dma);
2496}
2497
2498static int ia_start(struct atm_dev *dev)
2499{
2500 IADEV *iadev;
2501 int error;
2502 unsigned char phy;
2503 u32 ctrl_reg;
2504 IF_EVENT(printk(">ia_start\n");)
2505 iadev = INPH_IA_DEV(dev);
2506 if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2507 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2508 dev->number, iadev->irq);
2509 error = -EAGAIN;
2510 goto err_out;
2511 }
2512 /* @@@ should release IRQ on error */
2513 /* enabling memory + master */
2514 if ((error = pci_write_config_word(iadev->pci,
2515 PCI_COMMAND,
2516 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2517 {
2518 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2519 "master (0x%x)\n",dev->number, error);
2520 error = -EIO;
2521 goto err_free_irq;
2522 }
2523 udelay(10);
2524
2525 /* Maybe we should reset the front end, initialize Bus Interface Control
2526 Registers and see. */
2527
2528 IF_INIT(printk("Bus ctrl reg: %08x\n",
2529 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2530 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2531 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2532 | CTRL_B8
2533 | CTRL_B16
2534 | CTRL_B32
2535 | CTRL_B48
2536 | CTRL_B64
2537 | CTRL_B128
2538 | CTRL_ERRMASK
2539 | CTRL_DLETMASK /* shud be removed l8r */
2540 | CTRL_DLERMASK
2541 | CTRL_SEGMASK
2542 | CTRL_REASSMASK
2543 | CTRL_FEMASK
2544 | CTRL_CSPREEMPT;
2545
2546 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2547
2548 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2549 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2550 printk("Bus status reg after init: %08x\n",
2551 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2552
2553 ia_hw_type(iadev);
2554 error = tx_init(dev);
2555 if (error)
2556 goto err_free_irq;
2557 error = rx_init(dev);
2558 if (error)
2559 goto err_free_tx;
2560
2561 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2562 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2563 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2564 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2565 phy = 0; /* resolve compiler complaint */
2566 IF_INIT (
2567 if ((phy=ia_phy_get(dev,0)) == 0x30)
2568 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2569 else
2570 printk("IA: utopia,rev.%0x\n",phy);)
2571
2572 if (iadev->phy_type & FE_25MBIT_PHY)
2573 ia_mb25_init(iadev);
2574 else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2575 ia_suni_pm7345_init(iadev);
2576 else {
2577 error = suni_init(dev);
2578 if (error)
2579 goto err_free_rx;
2580 if (dev->phy->start) {
2581 error = dev->phy->start(dev);
2582 if (error)
2583 goto err_free_rx;
2584 }
2585 /* Get iadev->carrier_detect status */
2586 ia_frontend_intr(iadev);
2587 }
2588 return 0;
2589
2590err_free_rx:
2591 ia_free_rx(iadev);
2592err_free_tx:
2593 ia_free_tx(iadev);
2594err_free_irq:
2595 free_irq(iadev->irq, dev);
2596err_out:
2597 return error;
2598}
2599
2600static void ia_close(struct atm_vcc *vcc)
2601{
2602 DEFINE_WAIT(wait);
2603 u16 *vc_table;
2604 IADEV *iadev;
2605 struct ia_vcc *ia_vcc;
2606 struct sk_buff *skb = NULL;
2607 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2608 unsigned long closetime, flags;
2609
2610 iadev = INPH_IA_DEV(vcc->dev);
2611 ia_vcc = INPH_IA_VCC(vcc);
2612 if (!ia_vcc) return;
2613
2614 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2615 ia_vcc->vc_desc_cnt,vcc->vci);)
2616 clear_bit(ATM_VF_READY,&vcc->flags);
2617 skb_queue_head_init (&tmp_tx_backlog);
2618 skb_queue_head_init (&tmp_vcc_backlog);
2619 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2620 iadev->close_pending++;
2621 prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2622 schedule_timeout(msecs_to_jiffies(500));
2623 finish_wait(&iadev->timeout_wait, &wait);
2624 spin_lock_irqsave(&iadev->tx_lock, flags);
2625 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2626 if (ATM_SKB(skb)->vcc == vcc){
2627 if (vcc->pop) vcc->pop(vcc, skb);
2628 else dev_kfree_skb_any(skb);
2629 }
2630 else
2631 skb_queue_tail(&tmp_tx_backlog, skb);
2632 }
2633 while((skb = skb_dequeue(&tmp_tx_backlog)))
2634 skb_queue_tail(&iadev->tx_backlog, skb);
2635 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2636 closetime = 300000 / ia_vcc->pcr;
2637 if (closetime == 0)
2638 closetime = 1;
2639 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2640 wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2641 spin_lock_irqsave(&iadev->tx_lock, flags);
2642 iadev->close_pending--;
2643 iadev->testTable[vcc->vci]->lastTime = 0;
2644 iadev->testTable[vcc->vci]->fract = 0;
2645 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2646 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2647 if (vcc->qos.txtp.min_pcr > 0)
2648 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2649 }
2650 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2651 ia_vcc = INPH_IA_VCC(vcc);
2652 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2653 ia_cbrVc_close (vcc);
2654 }
2655 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2656 }
2657
2658 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2659 // reset reass table
2660 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2661 vc_table += vcc->vci;
2662 *vc_table = NO_AAL5_PKT;
2663 // reset vc table
2664 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2665 vc_table += vcc->vci;
2666 *vc_table = (vcc->vci << 6) | 15;
2667 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2668 struct abr_vc_table __iomem *abr_vc_table =
2669 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2670 abr_vc_table += vcc->vci;
2671 abr_vc_table->rdf = 0x0003;
2672 abr_vc_table->air = 0x5eb1;
2673 }
2674 // Drain the packets
2675 rx_dle_intr(vcc->dev);
2676 iadev->rx_open[vcc->vci] = NULL;
2677 }
2678 kfree(INPH_IA_VCC(vcc));
2679 ia_vcc = NULL;
2680 vcc->dev_data = NULL;
2681 clear_bit(ATM_VF_ADDR,&vcc->flags);
2682 return;
2683}
2684
2685static int ia_open(struct atm_vcc *vcc)
2686{
2687 struct ia_vcc *ia_vcc;
2688 int error;
2689 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2690 {
2691 IF_EVENT(printk("ia: not partially allocated resources\n");)
2692 vcc->dev_data = NULL;
2693 }
2694 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2695 {
2696 IF_EVENT(printk("iphase open: unspec part\n");)
2697 set_bit(ATM_VF_ADDR,&vcc->flags);
2698 }
2699 if (vcc->qos.aal != ATM_AAL5)
2700 return -EINVAL;
2701 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2702 vcc->dev->number, vcc->vpi, vcc->vci);)
2703
2704 /* Device dependent initialization */
2705 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2706 if (!ia_vcc) return -ENOMEM;
2707 vcc->dev_data = ia_vcc;
2708
2709 if ((error = open_rx(vcc)))
2710 {
2711 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2712 ia_close(vcc);
2713 return error;
2714 }
2715
2716 if ((error = open_tx(vcc)))
2717 {
2718 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2719 ia_close(vcc);
2720 return error;
2721 }
2722
2723 set_bit(ATM_VF_READY,&vcc->flags);
2724
2725#if 0
2726 {
2727 static u8 first = 1;
2728 if (first) {
2729 ia_timer.expires = jiffies + 3*HZ;
2730 add_timer(&ia_timer);
2731 first = 0;
2732 }
2733 }
2734#endif
2735 IF_EVENT(printk("ia open returning\n");)
2736 return 0;
2737}
2738
2739static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2740{
2741 IF_EVENT(printk(">ia_change_qos\n");)
2742 return 0;
2743}
2744
2745static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2746{
2747 IA_CMDBUF ia_cmds;
2748 IADEV *iadev;
2749 int i, board;
2750 u16 __user *tmps;
2751 IF_EVENT(printk(">ia_ioctl\n");)
2752 if (cmd != IA_CMD) {
2753 if (!dev->phy->ioctl) return -EINVAL;
2754 return dev->phy->ioctl(dev,cmd,arg);
2755 }
2756 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2757 board = ia_cmds.status;
2758 if ((board < 0) || (board > iadev_count))
2759 board = 0;
2760 iadev = ia_dev[board];
2761 switch (ia_cmds.cmd) {
2762 case MEMDUMP:
2763 {
2764 switch (ia_cmds.sub_cmd) {
2765 case MEMDUMP_DEV:
2766 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2767 if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2768 return -EFAULT;
2769 ia_cmds.status = 0;
2770 break;
2771 case MEMDUMP_SEGREG:
2772 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2773 tmps = (u16 __user *)ia_cmds.buf;
2774 for(i=0; i<0x80; i+=2, tmps++)
2775 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2776 ia_cmds.status = 0;
2777 ia_cmds.len = 0x80;
2778 break;
2779 case MEMDUMP_REASSREG:
2780 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2781 tmps = (u16 __user *)ia_cmds.buf;
2782 for(i=0; i<0x80; i+=2, tmps++)
2783 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2784 ia_cmds.status = 0;
2785 ia_cmds.len = 0x80;
2786 break;
2787 case MEMDUMP_FFL:
2788 {
2789 ia_regs_t *regs_local;
2790 ffredn_t *ffL;
2791 rfredn_t *rfL;
2792
2793 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2794 regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2795 if (!regs_local) return -ENOMEM;
2796 ffL = ®s_local->ffredn;
2797 rfL = ®s_local->rfredn;
2798 /* Copy real rfred registers into the local copy */
2799 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2800 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2801 /* Copy real ffred registers into the local copy */
2802 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2803 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2804
2805 if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2806 kfree(regs_local);
2807 return -EFAULT;
2808 }
2809 kfree(regs_local);
2810 printk("Board %d registers dumped\n", board);
2811 ia_cmds.status = 0;
2812 }
2813 break;
2814 case READ_REG:
2815 {
2816 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2817 desc_dbg(iadev);
2818 ia_cmds.status = 0;
2819 }
2820 break;
2821 case 0x6:
2822 {
2823 ia_cmds.status = 0;
2824 printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2825 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2826 }
2827 break;
2828 case 0x8:
2829 {
2830 struct k_sonet_stats *stats;
2831 stats = &PRIV(_ia_dev[board])->sonet_stats;
2832 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2833 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2834 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2835 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2836 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2837 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2838 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2839 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2840 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2841 }
2842 ia_cmds.status = 0;
2843 break;
2844 case 0x9:
2845 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2846 for (i = 1; i <= iadev->num_rx_desc; i++)
2847 free_desc(_ia_dev[board], i);
2848 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2849 iadev->reass_reg+REASS_MASK_REG);
2850 iadev->rxing = 1;
2851
2852 ia_cmds.status = 0;
2853 break;
2854
2855 case 0xb:
2856 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2857 ia_frontend_intr(iadev);
2858 break;
2859 case 0xa:
2860 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2861 {
2862 ia_cmds.status = 0;
2863 IADebugFlag = ia_cmds.maddr;
2864 printk("New debug option loaded\n");
2865 }
2866 break;
2867 default:
2868 ia_cmds.status = 0;
2869 break;
2870 }
2871 }
2872 break;
2873 default:
2874 break;
2875
2876 }
2877 return 0;
2878}
2879
2880static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2881 void __user *optval, int optlen)
2882{
2883 IF_EVENT(printk(">ia_getsockopt\n");)
2884 return -EINVAL;
2885}
2886
2887static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2888 void __user *optval, unsigned int optlen)
2889{
2890 IF_EVENT(printk(">ia_setsockopt\n");)
2891 return -EINVAL;
2892}
2893
2894static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2895 IADEV *iadev;
2896 struct dle *wr_ptr;
2897 struct tx_buf_desc __iomem *buf_desc_ptr;
2898 int desc;
2899 int comp_code;
2900 int total_len;
2901 struct cpcs_trailer *trailer;
2902 struct ia_vcc *iavcc;
2903
2904 iadev = INPH_IA_DEV(vcc->dev);
2905 iavcc = INPH_IA_VCC(vcc);
2906 if (!iavcc->txing) {
2907 printk("discard packet on closed VC\n");
2908 if (vcc->pop)
2909 vcc->pop(vcc, skb);
2910 else
2911 dev_kfree_skb_any(skb);
2912 return 0;
2913 }
2914
2915 if (skb->len > iadev->tx_buf_sz - 8) {
2916 printk("Transmit size over tx buffer size\n");
2917 if (vcc->pop)
2918 vcc->pop(vcc, skb);
2919 else
2920 dev_kfree_skb_any(skb);
2921 return 0;
2922 }
2923 if ((unsigned long)skb->data & 3) {
2924 printk("Misaligned SKB\n");
2925 if (vcc->pop)
2926 vcc->pop(vcc, skb);
2927 else
2928 dev_kfree_skb_any(skb);
2929 return 0;
2930 }
2931 /* Get a descriptor number from our free descriptor queue
2932 We get the descr number from the TCQ now, since I am using
2933 the TCQ as a free buffer queue. Initially TCQ will be
2934 initialized with all the descriptors and is hence, full.
2935 */
2936 desc = get_desc (iadev, iavcc);
2937 if (desc == 0xffff)
2938 return 1;
2939 comp_code = desc >> 13;
2940 desc &= 0x1fff;
2941
2942 if ((desc == 0) || (desc > iadev->num_tx_desc))
2943 {
2944 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2945 atomic_inc(&vcc->stats->tx);
2946 if (vcc->pop)
2947 vcc->pop(vcc, skb);
2948 else
2949 dev_kfree_skb_any(skb);
2950 return 0; /* return SUCCESS */
2951 }
2952
2953 if (comp_code)
2954 {
2955 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2956 desc, comp_code);)
2957 }
2958
2959 /* remember the desc and vcc mapping */
2960 iavcc->vc_desc_cnt++;
2961 iadev->desc_tbl[desc-1].iavcc = iavcc;
2962 iadev->desc_tbl[desc-1].txskb = skb;
2963 IA_SKB_STATE(skb) = 0;
2964
2965 iadev->ffL.tcq_rd += 2;
2966 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2967 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
2968 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2969
2970 /* Put the descriptor number in the packet ready queue
2971 and put the updated write pointer in the DLE field
2972 */
2973 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2974
2975 iadev->ffL.prq_wr += 2;
2976 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2977 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2978
2979 /* Figure out the exact length of the packet and padding required to
2980 make it aligned on a 48 byte boundary. */
2981 total_len = skb->len + sizeof(struct cpcs_trailer);
2982 total_len = ((total_len + 47) / 48) * 48;
2983 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2984
2985 /* Put the packet in a tx buffer */
2986 trailer = iadev->tx_buf[desc-1].cpcs;
2987 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2988 skb, skb->data, skb->len, desc);)
2989 trailer->control = 0;
2990 /*big endian*/
2991 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2992 trailer->crc32 = 0; /* not needed - dummy bytes */
2993
2994 /* Display the packet */
2995 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2996 skb->len, tcnter++);
2997 xdump(skb->data, skb->len, "TX: ");
2998 printk("\n");)
2999
3000 /* Build the buffer descriptor */
3001 buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
3002 buf_desc_ptr += desc; /* points to the corresponding entry */
3003 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
3004 /* Huh ? p.115 of users guide describes this as a read-only register */
3005 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3006 buf_desc_ptr->vc_index = vcc->vci;
3007 buf_desc_ptr->bytes = total_len;
3008
3009 if (vcc->qos.txtp.traffic_class == ATM_ABR)
3010 clear_lockup (vcc, iadev);
3011
3012 /* Build the DLE structure */
3013 wr_ptr = iadev->tx_dle_q.write;
3014 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3015 wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3016 skb->len, DMA_TO_DEVICE);
3017 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3018 buf_desc_ptr->buf_start_lo;
3019 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3020 wr_ptr->bytes = skb->len;
3021
3022 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3023 if ((wr_ptr->bytes >> 2) == 0xb)
3024 wr_ptr->bytes = 0x30;
3025
3026 wr_ptr->mode = TX_DLE_PSI;
3027 wr_ptr->prq_wr_ptr_data = 0;
3028
3029 /* end is not to be used for the DLE q */
3030 if (++wr_ptr == iadev->tx_dle_q.end)
3031 wr_ptr = iadev->tx_dle_q.start;
3032
3033 /* Build trailer dle */
3034 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3035 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3036 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3037
3038 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3039 wr_ptr->mode = DMA_INT_ENABLE;
3040 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3041
3042 /* end is not to be used for the DLE q */
3043 if (++wr_ptr == iadev->tx_dle_q.end)
3044 wr_ptr = iadev->tx_dle_q.start;
3045
3046 iadev->tx_dle_q.write = wr_ptr;
3047 ATM_DESC(skb) = vcc->vci;
3048 skb_queue_tail(&iadev->tx_dma_q, skb);
3049
3050 atomic_inc(&vcc->stats->tx);
3051 iadev->tx_pkt_cnt++;
3052 /* Increment transaction counter */
3053 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3054
3055#if 0
3056 /* add flow control logic */
3057 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3058 if (iavcc->vc_desc_cnt > 10) {
3059 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3060 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3061 iavcc->flow_inc = -1;
3062 iavcc->saved_tx_quota = vcc->tx_quota;
3063 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3064 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3065 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3066 iavcc->flow_inc = 0;
3067 }
3068 }
3069#endif
3070 IF_TX(printk("ia send done\n");)
3071 return 0;
3072}
3073
3074static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3075{
3076 IADEV *iadev;
3077 unsigned long flags;
3078
3079 iadev = INPH_IA_DEV(vcc->dev);
3080 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3081 {
3082 if (!skb)
3083 printk(KERN_CRIT "null skb in ia_send\n");
3084 else dev_kfree_skb_any(skb);
3085 return -EINVAL;
3086 }
3087 spin_lock_irqsave(&iadev->tx_lock, flags);
3088 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3089 dev_kfree_skb_any(skb);
3090 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3091 return -EINVAL;
3092 }
3093 ATM_SKB(skb)->vcc = vcc;
3094
3095 if (skb_peek(&iadev->tx_backlog)) {
3096 skb_queue_tail(&iadev->tx_backlog, skb);
3097 }
3098 else {
3099 if (ia_pkt_tx (vcc, skb)) {
3100 skb_queue_tail(&iadev->tx_backlog, skb);
3101 }
3102 }
3103 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3104 return 0;
3105
3106}
3107
3108static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3109{
3110 int left = *pos, n;
3111 char *tmpPtr;
3112 IADEV *iadev = INPH_IA_DEV(dev);
3113 if(!left--) {
3114 if (iadev->phy_type == FE_25MBIT_PHY) {
3115 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3116 return n;
3117 }
3118 if (iadev->phy_type == FE_DS3_PHY)
3119 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3120 else if (iadev->phy_type == FE_E3_PHY)
3121 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3122 else if (iadev->phy_type == FE_UTP_OPTION)
3123 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3124 else
3125 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3126 tmpPtr = page + n;
3127 if (iadev->pci_map_size == 0x40000)
3128 n += sprintf(tmpPtr, "-1KVC-");
3129 else
3130 n += sprintf(tmpPtr, "-4KVC-");
3131 tmpPtr = page + n;
3132 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3133 n += sprintf(tmpPtr, "1M \n");
3134 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3135 n += sprintf(tmpPtr, "512K\n");
3136 else
3137 n += sprintf(tmpPtr, "128K\n");
3138 return n;
3139 }
3140 if (!left) {
3141 return sprintf(page, " Number of Tx Buffer: %u\n"
3142 " Size of Tx Buffer : %u\n"
3143 " Number of Rx Buffer: %u\n"
3144 " Size of Rx Buffer : %u\n"
3145 " Packets Receiverd : %u\n"
3146 " Packets Transmitted: %u\n"
3147 " Cells Received : %u\n"
3148 " Cells Transmitted : %u\n"
3149 " Board Dropped Cells: %u\n"
3150 " Board Dropped Pkts : %u\n",
3151 iadev->num_tx_desc, iadev->tx_buf_sz,
3152 iadev->num_rx_desc, iadev->rx_buf_sz,
3153 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3154 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3155 iadev->drop_rxcell, iadev->drop_rxpkt);
3156 }
3157 return 0;
3158}
3159
3160static const struct atmdev_ops ops = {
3161 .open = ia_open,
3162 .close = ia_close,
3163 .ioctl = ia_ioctl,
3164 .getsockopt = ia_getsockopt,
3165 .setsockopt = ia_setsockopt,
3166 .send = ia_send,
3167 .phy_put = ia_phy_put,
3168 .phy_get = ia_phy_get,
3169 .change_qos = ia_change_qos,
3170 .proc_read = ia_proc_read,
3171 .owner = THIS_MODULE,
3172};
3173
3174static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3175{
3176 struct atm_dev *dev;
3177 IADEV *iadev;
3178 int ret;
3179
3180 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3181 if (!iadev) {
3182 ret = -ENOMEM;
3183 goto err_out;
3184 }
3185
3186 iadev->pci = pdev;
3187
3188 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3189 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3190 if (pci_enable_device(pdev)) {
3191 ret = -ENODEV;
3192 goto err_out_free_iadev;
3193 }
3194 dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3195 if (!dev) {
3196 ret = -ENOMEM;
3197 goto err_out_disable_dev;
3198 }
3199 dev->dev_data = iadev;
3200 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3201 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3202 iadev->LineRate);)
3203
3204 pci_set_drvdata(pdev, dev);
3205
3206 ia_dev[iadev_count] = iadev;
3207 _ia_dev[iadev_count] = dev;
3208 iadev_count++;
3209 if (ia_init(dev) || ia_start(dev)) {
3210 IF_INIT(printk("IA register failed!\n");)
3211 iadev_count--;
3212 ia_dev[iadev_count] = NULL;
3213 _ia_dev[iadev_count] = NULL;
3214 ret = -EINVAL;
3215 goto err_out_deregister_dev;
3216 }
3217 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3218
3219 iadev->next_board = ia_boards;
3220 ia_boards = dev;
3221
3222 return 0;
3223
3224err_out_deregister_dev:
3225 atm_dev_deregister(dev);
3226err_out_disable_dev:
3227 pci_disable_device(pdev);
3228err_out_free_iadev:
3229 kfree(iadev);
3230err_out:
3231 return ret;
3232}
3233
3234static void ia_remove_one(struct pci_dev *pdev)
3235{
3236 struct atm_dev *dev = pci_get_drvdata(pdev);
3237 IADEV *iadev = INPH_IA_DEV(dev);
3238
3239 /* Disable phy interrupts */
3240 ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3241 SUNI_RSOP_CIE);
3242 udelay(1);
3243
3244 if (dev->phy && dev->phy->stop)
3245 dev->phy->stop(dev);
3246
3247 /* De-register device */
3248 free_irq(iadev->irq, dev);
3249 iadev_count--;
3250 ia_dev[iadev_count] = NULL;
3251 _ia_dev[iadev_count] = NULL;
3252 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3253 atm_dev_deregister(dev);
3254
3255 iounmap(iadev->base);
3256 pci_disable_device(pdev);
3257
3258 ia_free_rx(iadev);
3259 ia_free_tx(iadev);
3260
3261 kfree(iadev);
3262}
3263
3264static struct pci_device_id ia_pci_tbl[] = {
3265 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3266 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3267 { 0,}
3268};
3269MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3270
3271static struct pci_driver ia_driver = {
3272 .name = DEV_LABEL,
3273 .id_table = ia_pci_tbl,
3274 .probe = ia_init_one,
3275 .remove = ia_remove_one,
3276};
3277
3278static int __init ia_module_init(void)
3279{
3280 int ret;
3281
3282 ret = pci_register_driver(&ia_driver);
3283 if (ret >= 0) {
3284 ia_timer.expires = jiffies + 3*HZ;
3285 add_timer(&ia_timer);
3286 } else
3287 printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3288 return ret;
3289}
3290
3291static void __exit ia_module_exit(void)
3292{
3293 pci_unregister_driver(&ia_driver);
3294
3295 del_timer(&ia_timer);
3296}
3297
3298module_init(ia_module_init);
3299module_exit(ia_module_exit);