1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
7 *******************************************************************************
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
41 *******************************************************************************/
46 #include <linux/version.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
50 #include <linux/pci.h>
51 #include <linux/errno.h>
52 #include <linux/atm.h>
53 #include <linux/atmdev.h>
54 #include <linux/sonet.h>
55 #include <linux/skbuff.h>
56 #include <linux/time.h>
57 #include <linux/sched.h> /* for xtime */
58 #include <linux/delay.h>
59 #include <linux/uio.h>
60 #include <linux/init.h>
61 #include <asm/system.h>
63 #include <asm/atomic.h>
64 #include <asm/uaccess.h>
65 #include <asm/string.h>
66 #include <asm/byteorder.h>
67 #include <linux/vmalloc.h>
70 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
72 struct k_sonet_stats sonet_stats; /* link diagnostics */
73 unsigned char loop_mode; /* loopback mode */
74 struct atm_dev *dev; /* device back-pointer */
75 struct suni_priv *next; /* next SUNI */
77 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
79 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
81 static IADEV *ia_dev[8];
82 static struct atm_dev *_ia_dev[8];
83 static int iadev_count;
84 static void ia_led_timer(unsigned long arg);
85 static struct timer_list ia_timer = { function: ia_led_timer };
86 struct atm_vcc *vcc_close_que[100];
87 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
88 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
89 static u32 IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
90 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
93 MODULE_PARM(IA_TX_BUF, "i");
94 MODULE_PARM(IA_TX_BUF_SZ, "i");
95 MODULE_PARM(IA_RX_BUF, "i");
96 MODULE_PARM(IA_RX_BUF_SZ, "i");
97 MODULE_PARM(IADebugFlag, "i");
100 MODULE_LICENSE("GPL");
102 #if BITS_PER_LONG != 32
103 # error FIXME: this driver only works on 32-bit platforms
106 /**************************** IA_LIB **********************************/
108 static void ia_init_rtn_q (IARTN_Q *que)
114 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
117 if (que->next == NULL)
118 que->next = que->tail = data;
120 data->next = que->next;
126 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
127 IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
128 if (!entry) return -1;
131 if (que->next == NULL)
132 que->next = que->tail = entry;
134 que->tail->next = entry;
135 que->tail = que->tail->next;
140 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
142 if (que->next == NULL)
145 if ( que->next == que->tail)
146 que->next = que->tail = NULL;
148 que->next = que->next->next;
152 static void ia_hack_tcq(IADEV *dev) {
156 struct ia_vcc *iavcc_r = NULL;
157 extern void desc_dbg(IADEV *iadev);
159 tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
160 while (dev->host_tcq_wr != tcq_wr) {
161 desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
163 else if (!dev->desc_tbl[desc1 -1].timestamp) {
164 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
165 *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
167 else if (dev->desc_tbl[desc1 -1].timestamp) {
168 if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
169 printk("IA: Fatal err in get_desc\n");
172 iavcc_r->vc_desc_cnt--;
173 dev->desc_tbl[desc1 -1].timestamp = 0;
174 IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n",
175 (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
176 if (iavcc_r->pcr < dev->rate_limit) {
177 IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
178 if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
179 printk("ia_hack_tcq: No memory available\n");
181 dev->desc_tbl[desc1 -1].iavcc = NULL;
182 dev->desc_tbl[desc1 -1].txskb = NULL;
184 dev->host_tcq_wr += 2;
185 if (dev->host_tcq_wr > dev->ffL.tcq_ed)
186 dev->host_tcq_wr = dev->ffL.tcq_st;
190 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
193 struct ia_vcc *iavcc_r = NULL;
195 static unsigned long timer = 0;
197 extern void desc_dbg(IADEV *iadev);
200 if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){
203 while (i < dev->num_tx_desc) {
204 if (!dev->desc_tbl[i].timestamp) {
208 ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
209 delta = jiffies - dev->desc_tbl[i].timestamp;
210 if (delta >= ltimeout) {
211 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
212 if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
213 dev->ffL.tcq_rd = dev->ffL.tcq_ed;
215 dev->ffL.tcq_rd -= 2;
216 *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
217 if (!(skb = dev->desc_tbl[i].txskb) ||
218 !(iavcc_r = dev->desc_tbl[i].iavcc))
219 printk("Fatal err, desc table vcc or skb is NULL\n");
221 iavcc_r->vc_desc_cnt--;
222 dev->desc_tbl[i].timestamp = 0;
223 dev->desc_tbl[i].iavcc = NULL;
224 dev->desc_tbl[i].txskb = NULL;
229 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
232 /* Get the next available descriptor number from TCQ */
233 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
235 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
236 dev->ffL.tcq_rd += 2;
237 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
238 dev->ffL.tcq_rd = dev->ffL.tcq_st;
239 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
241 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
244 /* get system time */
245 dev->desc_tbl[desc_num -1].timestamp = jiffies;
249 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
251 vcstatus_t *vcstatus;
253 u_short tempCellSlot, tempFract;
254 struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
255 struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
258 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
259 vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
262 if( vcstatus->cnt == 0x05 ) {
265 if( eabr_vc->last_desc ) {
266 if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
267 /* Wait for 10 Micro sec */
269 if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
273 tempCellSlot = abr_vc->last_cell_slot;
274 tempFract = abr_vc->fraction;
275 if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
276 && (tempFract == dev->testTable[vcc->vci]->fract))
278 dev->testTable[vcc->vci]->lastTime = tempCellSlot;
279 dev->testTable[vcc->vci]->fract = tempFract;
281 } /* last descriptor */
283 } /* vcstatus->cnt */
286 IF_ABR(printk("LOCK UP found\n");)
287 writew(0xFFFD, dev->seg_reg+MODE_REG_0);
288 /* Wait for 10 Micro sec */
290 abr_vc->status &= 0xFFF8;
291 abr_vc->status |= 0x0001; /* state is idle */
292 shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
293 for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
295 shd_tbl[i] = vcc->vci;
297 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
298 writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
299 writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
300 writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
310 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
312 ** +----+----+------------------+-------------------------------+
313 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
314 ** +----+----+------------------+-------------------------------+
316 ** R = reserverd (written as 0)
317 ** NZ = 0 if 0 cells/sec; 1 otherwise
319 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
322 cellrate_to_float(u32 cr)
326 #define M_BITS 9 /* Number of bits in mantissa */
327 #define E_BITS 5 /* Number of bits in exponent */
331 u32 tmp = cr & 0x00ffffff;
340 flot = NZ | (i << M_BITS) | (cr & M_MASK);
342 flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
344 flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
350 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
353 float_to_cellrate(u16 rate)
355 u32 exp, mantissa, cps;
356 if ((rate & NZ) == 0)
358 exp = (rate >> M_BITS) & E_MASK;
359 mantissa = rate & M_MASK;
362 cps = (1 << M_BITS) | mantissa;
365 else if (exp > M_BITS)
366 cps <<= (exp - M_BITS);
368 cps >>= (M_BITS - exp);
373 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
374 srv_p->class_type = ATM_ABR;
375 srv_p->pcr = dev->LineRate;
377 srv_p->icr = 0x055cb7;
378 srv_p->tbe = 0xffffff;
389 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
390 struct atm_vcc *vcc, u8 flag)
392 f_vc_abr_entry *f_abr_vc;
393 r_vc_abr_entry *r_abr_vc;
396 u16 adtf, air, *ptr16;
397 f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
398 f_abr_vc += vcc->vci;
400 case 1: /* FFRED initialization */
401 #if 0 /* sanity check */
404 if (srv_p->pcr > dev->LineRate)
405 srv_p->pcr = dev->LineRate;
406 if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
407 return MCR_UNAVAILABLE;
408 if (srv_p->mcr > srv_p->pcr)
411 srv_p->icr = srv_p->pcr;
412 if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
414 if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
416 if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
418 if (srv_p->nrm > MAX_NRM)
420 if (srv_p->trm > MAX_TRM)
422 if (srv_p->adtf > MAX_ADTF)
424 else if (srv_p->adtf == 0)
426 if (srv_p->cdf > MAX_CDF)
428 if (srv_p->rif > MAX_RIF)
430 if (srv_p->rdf > MAX_RDF)
433 memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
434 f_abr_vc->f_vc_type = ABR;
435 nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
436 /* i.e 2**n = 2 << (n-1) */
437 f_abr_vc->f_nrm = nrm << 8 | nrm;
438 trm = 100000/(2 << (16 - srv_p->trm));
439 if ( trm == 0) trm = 1;
440 f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
441 crm = srv_p->tbe / nrm;
442 if (crm == 0) crm = 1;
443 f_abr_vc->f_crm = crm & 0xff;
444 f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
445 icr = MIN( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
446 ((srv_p->tbe/srv_p->frtt)*1000000) :
447 (1000000/(srv_p->frtt/srv_p->tbe)));
448 f_abr_vc->f_icr = cellrate_to_float(icr);
449 adtf = (10000 * srv_p->adtf)/8192;
450 if (adtf == 0) adtf = 1;
451 f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
452 f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
453 f_abr_vc->f_acr = f_abr_vc->f_icr;
454 f_abr_vc->f_status = 0x0042;
456 case 0: /* RFRED initialization */
457 ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
458 *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
459 r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
460 r_abr_vc += vcc->vci;
461 r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
462 air = srv_p->pcr << (15 - srv_p->rif);
463 if (air == 0) air = 1;
464 r_abr_vc->r_air = cellrate_to_float(air);
465 dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
466 dev->sum_mcr += srv_p->mcr;
474 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
475 u32 rateLow=0, rateHigh, rate;
477 struct ia_vcc *ia_vcc;
479 int idealSlot =0, testSlot, toBeAssigned, inc;
481 u16 *SchedTbl, *TstSchedTbl;
487 /* IpAdjustTrafficParams */
488 if (vcc->qos.txtp.max_pcr <= 0) {
489 IF_ERR(printk("PCR for CBR not defined\n");)
492 rate = vcc->qos.txtp.max_pcr;
493 entries = rate / dev->Granularity;
494 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
495 entries, rate, dev->Granularity);)
497 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
498 rateLow = entries * dev->Granularity;
499 rateHigh = (entries + 1) * dev->Granularity;
500 if (3*(rate - rateLow) > (rateHigh - rate))
502 if (entries > dev->CbrRemEntries) {
503 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
504 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
505 entries, dev->CbrRemEntries);)
509 ia_vcc = INPH_IA_VCC(vcc);
510 ia_vcc->NumCbrEntry = entries;
511 dev->sum_mcr += entries * dev->Granularity;
512 /* IaFFrednInsertCbrSched */
513 // Starting at an arbitrary location, place the entries into the table
514 // as smoothly as possible
516 spacing = dev->CbrTotEntries / entries;
517 sp_mod = dev->CbrTotEntries % entries; // get modulo
518 toBeAssigned = entries;
521 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
524 // If this is the first time, start the table loading for this connection
525 // as close to entryPoint as possible.
526 if (toBeAssigned == entries)
528 idealSlot = dev->CbrEntryPt;
529 dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
530 if (dev->CbrEntryPt >= dev->CbrTotEntries)
531 dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
533 idealSlot += (u32)(spacing + fracSlot); // Point to the next location
534 // in the table that would be smoothest
535 fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
536 sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
538 if (idealSlot >= (int)dev->CbrTotEntries)
539 idealSlot -= dev->CbrTotEntries;
540 // Continuously check around this ideal value until a null
541 // location is encountered.
542 SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
544 testSlot = idealSlot;
545 TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
546 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
547 testSlot, (u32)TstSchedTbl,toBeAssigned);)
548 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
549 while (cbrVC) // If another VC at this location, we have to keep looking
552 testSlot = idealSlot - inc;
553 if (testSlot < 0) { // Wrap if necessary
554 testSlot += dev->CbrTotEntries;
555 IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
556 (u32)SchedTbl,testSlot);)
558 TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
559 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
562 testSlot = idealSlot + inc;
563 if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
564 testSlot -= dev->CbrTotEntries;
565 IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
566 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
567 testSlot, toBeAssigned);)
569 // set table index and read in value
570 TstSchedTbl = (u16*)(SchedTbl + testSlot);
571 IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
572 (u32)TstSchedTbl,cbrVC,inc);)
573 memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
575 // Move this VCI number into this location of the CBR Sched table.
576 memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
577 dev->CbrRemEntries--;
581 /* IaFFrednCbrEnable */
582 dev->NumEnabledCBR++;
583 if (dev->NumEnabledCBR == 1) {
584 writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
585 IF_CBR(printk("CBR is enabled\n");)
589 static void ia_cbrVc_close (struct atm_vcc *vcc) {
591 u16 *SchedTbl, NullVci = 0;
594 iadev = INPH_IA_DEV(vcc->dev);
595 iadev->NumEnabledCBR--;
596 SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
597 if (iadev->NumEnabledCBR == 0) {
598 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
599 IF_CBR (printk("CBR support disabled\n");)
602 for (i=0; i < iadev->CbrTotEntries; i++)
604 if (*SchedTbl == vcc->vci) {
605 iadev->CbrRemEntries++;
611 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
614 static int ia_avail_descs(IADEV *iadev) {
617 if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
618 tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
620 tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
621 iadev->ffL.tcq_st) / 2;
625 static int ia_que_tx (IADEV *iadev) {
629 struct ia_vcc *iavcc;
630 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
631 num_desc = ia_avail_descs(iadev);
632 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
633 if (!(vcc = ATM_SKB(skb)->vcc)) {
634 dev_kfree_skb_any(skb);
635 printk("ia_que_tx: Null vcc\n");
638 if (!test_bit(ATM_VF_READY,&vcc->flags)) {
639 dev_kfree_skb_any(skb);
640 printk("Free the SKB on closed vci %d \n", vcc->vci);
643 iavcc = INPH_IA_VCC(vcc);
644 if (ia_pkt_tx (vcc, skb)) {
645 skb_queue_head(&iadev->tx_backlog, skb);
651 void ia_tx_poll (IADEV *iadev) {
652 struct atm_vcc *vcc = NULL;
653 struct sk_buff *skb = NULL, *skb1 = NULL;
654 struct ia_vcc *iavcc;
658 while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
659 skb = rtne->data.txskb;
661 printk("ia_tx_poll: skb is null\n");
664 vcc = ATM_SKB(skb)->vcc;
666 printk("ia_tx_poll: vcc is null\n");
667 dev_kfree_skb_any(skb);
671 iavcc = INPH_IA_VCC(vcc);
673 printk("ia_tx_poll: iavcc is null\n");
674 dev_kfree_skb_any(skb);
678 skb1 = skb_dequeue(&iavcc->txing_skb);
679 while (skb1 && (skb1 != skb)) {
680 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
681 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
683 IF_ERR(printk("Release the SKB not match\n");)
684 if ((vcc->pop) && (skb1->len != 0))
687 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
691 dev_kfree_skb_any(skb1);
692 skb1 = skb_dequeue(&iavcc->txing_skb);
695 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
696 ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
699 if ((vcc->pop) && (skb->len != 0))
702 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
705 dev_kfree_skb_any(skb);
713 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
718 * Issue a command to enable writes to the NOVRAM
720 NVRAM_CMD (EXTEND + EWEN);
723 * issue the write command
725 NVRAM_CMD(IAWRITE + addr);
727 * Send the data, starting with D15, then D14, and so on for 16 bits
729 for (i=15; i>=0; i--) {
730 NVRAM_CLKOUT (val & 0x8000);
735 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
737 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
741 * disable writes again
743 NVRAM_CMD(EXTEND + EWDS)
749 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
755 * Read the first bit that was clocked with the falling edge of the
756 * the last command data clock
758 NVRAM_CMD(IAREAD + addr);
760 * Now read the rest of the bits, the next bit read is D14, then D13,
764 for (i=15; i>=0; i--) {
773 static void ia_hw_type(IADEV *iadev) {
774 u_short memType = ia_eeprom_get(iadev, 25);
775 iadev->memType = memType;
776 if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
777 iadev->num_tx_desc = IA_TX_BUF;
778 iadev->tx_buf_sz = IA_TX_BUF_SZ;
779 iadev->num_rx_desc = IA_RX_BUF;
780 iadev->rx_buf_sz = IA_RX_BUF_SZ;
781 } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
782 if (IA_TX_BUF == DFL_TX_BUFFERS)
783 iadev->num_tx_desc = IA_TX_BUF / 2;
785 iadev->num_tx_desc = IA_TX_BUF;
786 iadev->tx_buf_sz = IA_TX_BUF_SZ;
787 if (IA_RX_BUF == DFL_RX_BUFFERS)
788 iadev->num_rx_desc = IA_RX_BUF / 2;
790 iadev->num_rx_desc = IA_RX_BUF;
791 iadev->rx_buf_sz = IA_RX_BUF_SZ;
794 if (IA_TX_BUF == DFL_TX_BUFFERS)
795 iadev->num_tx_desc = IA_TX_BUF / 8;
797 iadev->num_tx_desc = IA_TX_BUF;
798 iadev->tx_buf_sz = IA_TX_BUF_SZ;
799 if (IA_RX_BUF == DFL_RX_BUFFERS)
800 iadev->num_rx_desc = IA_RX_BUF / 8;
802 iadev->num_rx_desc = IA_RX_BUF;
803 iadev->rx_buf_sz = IA_RX_BUF_SZ;
805 iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
806 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
807 iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
808 iadev->rx_buf_sz, iadev->rx_pkt_ram);)
811 if ((memType & FE_MASK) == FE_SINGLE_MODE) {
812 iadev->phy_type = PHY_OC3C_S;
813 else if ((memType & FE_MASK) == FE_UTP_OPTION)
814 iadev->phy_type = PHY_UTP155;
816 iadev->phy_type = PHY_OC3C_M;
819 iadev->phy_type = memType & FE_MASK;
820 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
821 memType,iadev->phy_type);)
822 if (iadev->phy_type == FE_25MBIT_PHY)
823 iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
824 else if (iadev->phy_type == FE_DS3_PHY)
825 iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
826 else if (iadev->phy_type == FE_E3_PHY)
827 iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
829 iadev->LineRate = (u32)(ATM_OC3_PCR);
830 IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
834 static void IaFrontEndIntr(IADEV *iadev) {
835 volatile IA_SUNI *suni;
836 volatile ia_mb25_t *mb25;
837 volatile suni_pm7345_t *suni_pm7345;
841 if(iadev->phy_type & FE_25MBIT_PHY) {
842 mb25 = (ia_mb25_t*)iadev->phy;
843 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
844 } else if (iadev->phy_type & FE_DS3_PHY) {
845 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
846 /* clear FRMR interrupts */
847 frmr_intr = suni_pm7345->suni_ds3_frm_intr_stat;
848 iadev->carrier_detect =
849 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
850 } else if (iadev->phy_type & FE_E3_PHY ) {
851 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
852 frmr_intr = suni_pm7345->suni_e3_frm_maint_intr_ind;
853 iadev->carrier_detect =
854 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
857 suni = (IA_SUNI *)iadev->phy;
858 intr_status = suni->suni_rsop_status & 0xff;
859 iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
861 if (iadev->carrier_detect)
862 printk("IA: SUNI carrier detected\n");
864 printk("IA: SUNI carrier lost signal\n");
868 void ia_mb25_init (IADEV *iadev)
870 volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy;
872 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
874 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
875 mb25->mb25_diag_control = 0;
877 * Initialize carrier detect state
879 iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
883 void ia_suni_pm7345_init (IADEV *iadev)
885 volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
886 if (iadev->phy_type & FE_DS3_PHY)
888 iadev->carrier_detect =
889 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
890 suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
891 suni_pm7345->suni_ds3_frm_cfg = 1;
892 suni_pm7345->suni_ds3_tran_cfg = 1;
893 suni_pm7345->suni_config = 0;
894 suni_pm7345->suni_splr_cfg = 0;
895 suni_pm7345->suni_splt_cfg = 0;
899 iadev->carrier_detect =
900 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
901 suni_pm7345->suni_e3_frm_fram_options = 0x4;
902 suni_pm7345->suni_e3_frm_maint_options = 0x20;
903 suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
904 suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
905 suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
906 suni_pm7345->suni_e3_tran_fram_options = 0x1;
907 suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
908 suni_pm7345->suni_splr_cfg = 0x41;
909 suni_pm7345->suni_splt_cfg = 0x41;
912 * Enable RSOP loss of signal interrupt.
914 suni_pm7345->suni_intr_enbl = 0x28;
917 * Clear error counters
919 suni_pm7345->suni_id_reset = 0;
922 * Clear "PMCTST" in master test register.
924 suni_pm7345->suni_master_test = 0;
926 suni_pm7345->suni_rxcp_ctrl = 0x2c;
927 suni_pm7345->suni_rxcp_fctrl = 0x81;
929 suni_pm7345->suni_rxcp_idle_pat_h1 =
930 suni_pm7345->suni_rxcp_idle_pat_h2 =
931 suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
932 suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
934 suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
935 suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
936 suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
937 suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
939 suni_pm7345->suni_rxcp_cell_pat_h1 =
940 suni_pm7345->suni_rxcp_cell_pat_h2 =
941 suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
942 suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
944 suni_pm7345->suni_rxcp_cell_mask_h1 =
945 suni_pm7345->suni_rxcp_cell_mask_h2 =
946 suni_pm7345->suni_rxcp_cell_mask_h3 =
947 suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
949 suni_pm7345->suni_txcp_ctrl = 0xa4;
950 suni_pm7345->suni_txcp_intr_en_sts = 0x10;
951 suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
953 suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
958 suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
959 #endif /* __SNMP__ */
964 /***************************** IA_LIB END *****************************/
966 /* pwang_test debug utility */
967 int tcnter = 0, rcnter = 0;
968 void xdump( u_char* cp, int length, char* prefix )
972 u_char* pBuf = prntBuf;
974 while(count < length){
975 pBuf += sprintf( pBuf, "%s", prefix );
976 for(col = 0;count + col < length && col < 16; col++){
977 if (col != 0 && (col % 4) == 0)
978 pBuf += sprintf( pBuf, " " );
979 pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
981 while(col++ < 16){ /* pad end of buffer with blanks */
983 sprintf( pBuf, " " );
984 pBuf += sprintf( pBuf, " " );
986 pBuf += sprintf( pBuf, " " );
987 for(col = 0;count + col < length && col < 16; col++){
988 if (isprint((int)cp[count + col]))
989 pBuf += sprintf( pBuf, "%c", cp[count + col] );
991 pBuf += sprintf( pBuf, "." );
993 sprintf( pBuf, "\n" );
1000 } /* close xdump(... */
1003 static struct atm_dev *ia_boards = NULL;
1005 #define ACTUAL_RAM_BASE \
1006 RAM_BASE*((iadev->mem)/(128 * 1024))
1007 #define ACTUAL_SEG_RAM_BASE \
1008 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1009 #define ACTUAL_REASS_RAM_BASE \
1010 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1013 /*-- some utilities and memory allocation stuff will come here -------------*/
1015 void desc_dbg(IADEV *iadev) {
1017 u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1019 // regval = readl((u32)ia_cmds->maddr);
1020 tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
1021 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1022 tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1023 readw(iadev->seg_ram+tcq_wr_ptr-2));
1024 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
1026 tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
1027 tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
1028 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1030 while (tcq_st_ptr != tcq_ed_ptr) {
1031 tmp = iadev->seg_ram+tcq_st_ptr;
1032 printk("TCQ slot %d desc = %d Addr = 0x%x\n", i++, readw(tmp), tmp);
1035 for(i=0; i <iadev->num_tx_desc; i++)
1036 printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1040 /*----------------------------- Recieving side stuff --------------------------*/
1042 static void rx_excp_rcvd(struct atm_dev *dev)
1044 #if 0 /* closing the receiving size will cause too many excp int */
1047 u_short excpq_rd_ptr;
1050 iadev = INPH_IA_DEV(dev);
1051 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1052 while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1053 { printk("state = %x \n", state);
1054 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1055 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1056 if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1057 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1058 // TODO: update exception stat
1059 vci = readw(iadev->reass_ram+excpq_rd_ptr);
1060 error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1063 if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1064 excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1065 writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1066 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1071 static void free_desc(struct atm_dev *dev, int desc)
1074 iadev = INPH_IA_DEV(dev);
1075 writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1076 iadev->rfL.fdq_wr +=2;
1077 if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1078 iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
1079 writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1083 static int rx_pkt(struct atm_dev *dev)
1086 struct atm_vcc *vcc;
1087 unsigned short status;
1088 struct rx_buf_desc *buf_desc_ptr;
1092 struct sk_buff *skb;
1093 u_int buf_addr, dma_addr;
1094 iadev = INPH_IA_DEV(dev);
1095 if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1097 printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1100 /* mask 1st 3 bits to get the actual descno. */
1101 desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1102 IF_RX(printk("reass_ram = 0x%x iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1103 iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1104 printk(" pcq_wr_ptr = 0x%x\n",
1105 readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1106 /* update the read pointer - maybe we shud do this in the end*/
1107 if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1108 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1110 iadev->rfL.pcq_rd += 2;
1111 writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1113 /* get the buffer desc entry.
1114 update stuff. - doesn't seem to be any update necessary
1116 buf_desc_ptr = (struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1117 /* make the ptr point to the corresponding buffer desc entry */
1118 buf_desc_ptr += desc;
1119 if (!desc || (desc > iadev->num_rx_desc) ||
1120 ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1121 free_desc(dev, desc);
1122 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1125 vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1128 free_desc(dev, desc);
1129 printk("IA: null vcc, drop PDU\n");
1134 /* might want to check the status bits for errors */
1135 status = (u_short) (buf_desc_ptr->desc_mode);
1136 if (status & (RX_CER | RX_PTE | RX_OFL))
1138 atomic_inc(&vcc->stats->rx_err);
1139 IF_ERR(printk("IA: bad packet, dropping it");)
1140 if (status & RX_CER) {
1141 IF_ERR(printk(" cause: packet CRC error\n");)
1143 else if (status & RX_PTE) {
1144 IF_ERR(printk(" cause: packet time out\n");)
1147 IF_ERR(printk(" cause: buffer over flow\n");)
1156 buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1157 dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1158 len = dma_addr - buf_addr;
1159 if (len > iadev->rx_buf_sz) {
1160 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1161 atomic_inc(&vcc->stats->rx_err);
1165 #if LINUX_VERSION_CODE >= 0x20312
1166 if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1168 if (atm_charge(vcc, atm_pdu2truesize(len))) {
1169 /* lets allocate an skb for now */
1170 skb = alloc_skb(len, GFP_ATOMIC);
1173 IF_ERR(printk("can't allocate memory for recv, drop pkt!\n");)
1174 atomic_inc(&vcc->stats->rx_drop);
1175 atm_return(vcc, atm_pdu2truesize(len));
1180 IF_EVENT(printk("IA: Rx over the rx_quota %ld\n", vcc->rx_quota);)
1183 printk("Drop control packets\n");
1188 ATM_SKB(skb)->vcc = vcc;
1189 ATM_SKB(skb)->iovcnt = 0;
1190 ATM_DESC(skb) = desc;
1191 skb_queue_tail(&iadev->rx_dma_q, skb);
1193 /* Build the DLE structure */
1194 wr_ptr = iadev->rx_dle_q.write;
1195 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1196 len, PCI_DMA_FROMDEVICE);
1197 wr_ptr->local_pkt_addr = buf_addr;
1198 wr_ptr->bytes = len; /* We don't know this do we ?? */
1199 wr_ptr->mode = DMA_INT_ENABLE;
1201 /* shud take care of wrap around here too. */
1202 if(++wr_ptr == iadev->rx_dle_q.end)
1203 wr_ptr = iadev->rx_dle_q.start;
1204 iadev->rx_dle_q.write = wr_ptr;
1206 /* Increment transaction counter */
1207 writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1210 free_desc(dev, desc);
1214 static void rx_intr(struct atm_dev *dev)
1220 iadev = INPH_IA_DEV(dev);
1221 status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1222 IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1223 if (status & RX_PKT_RCVD)
1226 /* Basically recvd an interrupt for receving a packet.
1227 A descriptor would have been written to the packet complete
1228 queue. Get all the descriptors and set up dma to move the
1229 packets till the packet complete queue is empty..
1231 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1232 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1233 while(!(state & PCQ_EMPTY))
1236 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1240 if (status & RX_FREEQ_EMPT)
1243 iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1244 iadev->rx_tmp_jif = jiffies;
1247 else if (((jiffies - iadev->rx_tmp_jif) > 50) &&
1248 ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1249 for (i = 1; i <= iadev->num_rx_desc; i++)
1251 printk("Test logic RUN!!!!\n");
1252 writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1255 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1258 if (status & RX_EXCP_RCVD)
1260 /* probably need to handle the exception queue also. */
1261 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1266 if (status & RX_RAW_RCVD)
1268 /* need to handle the raw incoming cells. This deepnds on
1269 whether we have programmed to receive the raw cells or not.
1271 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
1276 static void rx_dle_intr(struct atm_dev *dev)
1279 struct atm_vcc *vcc;
1280 struct sk_buff *skb;
1283 struct dle *dle, *cur_dle;
1286 iadev = INPH_IA_DEV(dev);
1288 /* free all the dles done, that is just update our own dle read pointer
1289 - do we really need to do this. Think not. */
1290 /* DMA is done, just get all the recevie buffers from the rx dma queue
1291 and push them up to the higher layer protocol. Also free the desc
1292 associated with the buffer. */
1293 dle = iadev->rx_dle_q.read;
1294 dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1295 cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1296 while(dle != cur_dle)
1298 /* free the DMAed skb */
1299 skb = skb_dequeue(&iadev->rx_dma_q);
1302 desc = ATM_DESC(skb);
1303 free_desc(dev, desc);
1305 if (!(len = skb->len))
1307 printk("rx_dle_intr: skb len 0\n");
1308 dev_kfree_skb_any(skb);
1312 struct cpcs_trailer *trailer;
1314 struct ia_vcc *ia_vcc;
1316 pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1317 len, PCI_DMA_FROMDEVICE);
1318 /* no VCC related housekeeping done as yet. lets see */
1319 vcc = ATM_SKB(skb)->vcc;
1321 printk("IA: null vcc\n");
1322 dev_kfree_skb_any(skb);
1325 ia_vcc = INPH_IA_VCC(vcc);
1328 atomic_inc(&vcc->stats->rx_err);
1329 dev_kfree_skb_any(skb);
1330 #if LINUX_VERSION_CODE >= 0x20312
1331 atm_return(vcc, atm_guess_pdu2truesize(len));
1333 atm_return(vcc, atm_pdu2truesize(len));
1337 // get real pkt length pwang_test
1338 trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1339 skb->len - sizeof(*trailer));
1340 length = swap(trailer->length);
1341 if ((length > iadev->rx_buf_sz) || (length >
1342 (skb->len - sizeof(struct cpcs_trailer))))
1344 atomic_inc(&vcc->stats->rx_err);
1345 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1347 dev_kfree_skb_any(skb);
1348 #if LINUX_VERSION_CODE >= 0x20312
1349 atm_return(vcc, atm_guess_pdu2truesize(len));
1351 atm_return(vcc, atm_pdu2truesize(len));
1355 skb_trim(skb, length);
1357 /* Display the packet */
1358 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1359 xdump(skb->data, skb->len, "RX: ");
1362 IF_RX(printk("rx_dle_intr: skb push");)
1364 atomic_inc(&vcc->stats->rx);
1365 iadev->rx_pkt_cnt++;
1368 if (++dle == iadev->rx_dle_q.end)
1369 dle = iadev->rx_dle_q.start;
1371 iadev->rx_dle_q.read = dle;
1373 /* if the interrupts are masked because there were no free desc available,
1375 if (!iadev->rxing) {
1376 state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1377 if (!(state & FREEQ_EMPTY)) {
1378 state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1379 writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1380 iadev->reass_reg+REASS_MASK_REG);
1387 static int open_rx(struct atm_vcc *vcc)
1392 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1394 if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1395 iadev = INPH_IA_DEV(vcc->dev);
1396 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1397 if (iadev->phy_type & FE_25MBIT_PHY) {
1398 printk("IA: ABR not support\n");
1402 /* Make only this VCI in the vc table valid and let all
1403 others be invalid entries */
1404 vc_table = (u_short *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1405 vc_table += vcc->vci;
1406 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1408 *vc_table = vcc->vci << 6;
1409 /* Also keep a list of open rx vcs so that we can attach them with
1410 incoming PDUs later. */
1411 if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1412 (vcc->qos.txtp.traffic_class == ATM_ABR))
1414 srv_cls_param_t srv_p;
1415 init_abr_vc(iadev, &srv_p);
1416 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1418 else { /* for UBR later may need to add CBR logic */
1419 reass_ptr = (u_short *)
1420 (iadev->reass_ram+REASS_TABLE*iadev->memSize);
1421 reass_ptr += vcc->vci;
1422 *reass_ptr = NO_AAL5_PKT;
1425 if (iadev->rx_open[vcc->vci])
1426 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1427 vcc->dev->number, vcc->vci);
1428 iadev->rx_open[vcc->vci] = vcc;
1432 static int rx_init(struct atm_dev *dev)
1435 struct rx_buf_desc *buf_desc_ptr;
1436 unsigned long rx_pkt_start = 0;
1438 struct abr_vc_table *abr_vc_table;
1442 int i,j, vcsize_sel;
1443 u_short freeq_st_adr;
1444 u_short *freeq_start;
1446 iadev = INPH_IA_DEV(dev);
1447 // spin_lock_init(&iadev->rx_lock);
1449 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1450 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1451 &iadev->rx_dle_dma);
1453 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1456 iadev->rx_dle_q.start = (struct dle*)dle_addr;
1457 iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1458 iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1459 iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1460 /* the end of the dle q points to the entry after the last
1461 DLE that can be used. */
1463 /* write the upper 20 bits of the start address to rx list address register */
1464 writel(iadev->rx_dle_dma & 0xfffff000,
1465 iadev->dma + IPHASE5575_RX_LIST_ADDR);
1466 IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n",
1467 (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR),
1468 *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1469 printk("Rx Dle list addr: 0x%08x value: 0x%0x\n",
1470 (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR),
1471 *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1473 writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1474 writew(0, iadev->reass_reg+MODE_REG);
1475 writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1477 /* Receive side control memory map
1478 -------------------------------
1480 Buffer descr 0x0000 (736 - 23K)
1481 VP Table 0x5c00 (256 - 512)
1482 Except q 0x5e00 (128 - 512)
1483 Free buffer q 0x6000 (1K - 2K)
1484 Packet comp q 0x6800 (1K - 2K)
1485 Reass Table 0x7000 (1K - 2K)
1486 VC Table 0x7800 (1K - 2K)
1487 ABR VC Table 0x8000 (1K - 32K)
1490 /* Base address for Buffer Descriptor Table */
1491 writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1492 /* Set the buffer size register */
1493 writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1495 /* Initialize each entry in the Buffer Descriptor Table */
1496 iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1497 buf_desc_ptr =(struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1498 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1500 rx_pkt_start = iadev->rx_pkt_ram;
1501 for(i=1; i<=iadev->num_rx_desc; i++)
1503 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1504 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1505 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1507 rx_pkt_start += iadev->rx_buf_sz;
1509 IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)
1510 i = FREE_BUF_DESC_Q*iadev->memSize;
1511 writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
1512 writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1513 writew(i+iadev->num_rx_desc*sizeof(u_short),
1514 iadev->reass_reg+FREEQ_ED_ADR);
1515 writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1516 writew(i+iadev->num_rx_desc*sizeof(u_short),
1517 iadev->reass_reg+FREEQ_WR_PTR);
1518 /* Fill the FREEQ with all the free descriptors. */
1519 freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1520 freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1521 for(i=1; i<=iadev->num_rx_desc; i++)
1523 *freeq_start = (u_short)i;
1526 IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)
1527 /* Packet Complete Queue */
1528 i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1529 writew(i, iadev->reass_reg+PCQ_ST_ADR);
1530 writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1531 writew(i, iadev->reass_reg+PCQ_RD_PTR);
1532 writew(i, iadev->reass_reg+PCQ_WR_PTR);
1534 /* Exception Queue */
1535 i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1536 writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1537 writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1538 iadev->reass_reg+EXCP_Q_ED_ADR);
1539 writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1540 writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1542 /* Load local copy of FREEQ and PCQ ptrs */
1543 iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1544 iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1545 iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1546 iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1547 iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1548 iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1549 iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1550 iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1552 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1553 iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1554 iadev->rfL.pcq_wr);)
1555 /* just for check - no VP TBL */
1557 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1558 /* initialize VP Table for invalid VPIs
1559 - I guess we can write all 1s or 0x000f in the entire memory
1560 space or something similar.
1563 /* This seems to work and looks right to me too !!! */
1564 i = REASS_TABLE * iadev->memSize;
1565 writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1566 /* initialize Reassembly table to I don't know what ???? */
1567 reass_table = (u16 *)(iadev->reass_ram+i);
1568 j = REASS_TABLE_SZ * iadev->memSize;
1569 for(i=0; i < j; i++)
1570 *reass_table++ = NO_AAL5_PKT;
1573 while (i != iadev->num_vc) {
1577 i = RX_VC_TABLE * iadev->memSize;
1578 writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1579 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1580 j = RX_VC_TABLE_SZ * iadev->memSize;
1581 for(i = 0; i < j; i++)
1583 /* shift the reassembly pointer by 3 + lower 3 bits of
1584 vc_lkup_base register (=3 for 1K VCs) and the last byte
1585 is those low 3 bits.
1586 Shall program this later.
1588 *vc_table = (i << 6) | 15; /* for invalid VCI */
1592 i = ABR_VC_TABLE * iadev->memSize;
1593 writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1595 i = ABR_VC_TABLE * iadev->memSize;
1596 abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1597 j = REASS_TABLE_SZ * iadev->memSize;
1598 memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1599 for(i = 0; i < j; i++) {
1600 abr_vc_table->rdf = 0x0003;
1601 abr_vc_table->air = 0x5eb1;
1605 /* Initialize other registers */
1607 /* VP Filter Register set for VC Reassembly only */
1608 writew(0xff00, iadev->reass_reg+VP_FILTER);
1609 writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1610 writew(0x1, iadev->reass_reg+PROTOCOL_ID);
1612 /* Packet Timeout Count related Registers :
1613 Set packet timeout to occur in about 3 seconds
1614 Set Packet Aging Interval count register to overflow in about 4 us
1616 writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1618 i = ((u32)ptr16 >> 6) & 0xff;
1620 i |=(((u32)ptr16 << 2) & 0xff00);
1621 writew(i, iadev->reass_reg+TMOUT_RANGE);
1622 /* initiate the desc_tble */
1623 for(i=0; i<iadev->num_tx_desc;i++)
1624 iadev->desc_tbl[i].timestamp = 0;
1626 /* to clear the interrupt status register - read it */
1627 readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1629 /* Mask Register - clear it */
1630 writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1632 skb_queue_head_init(&iadev->rx_dma_q);
1633 iadev->rx_free_desc_qhead = NULL;
1634 iadev->rx_open = kmalloc(4*iadev->num_vc,GFP_KERNEL);
1635 if (!iadev->rx_open)
1637 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1641 memset(iadev->rx_open, 0, 4*iadev->num_vc);
1643 iadev->rx_pkt_cnt = 0;
1645 writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1649 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1657 The memory map suggested in appendix A and the coding for it.
1658 Keeping it around just in case we change our mind later.
1660 Buffer descr 0x0000 (128 - 4K)
1661 UBR sched 0x1000 (1K - 4K)
1662 UBR Wait q 0x2000 (1K - 4K)
1663 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1665 extended VC 0x4000 (1K - 8K)
1666 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1667 CBR sched 0x7000 (as needed)
1668 VC table 0x8000 (1K - 32K)
1671 static void tx_intr(struct atm_dev *dev)
1674 unsigned short status;
1675 unsigned long flags;
1677 iadev = INPH_IA_DEV(dev);
1679 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1680 if (status & TRANSMIT_DONE){
1682 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1683 spin_lock_irqsave(&iadev->tx_lock, flags);
1685 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1686 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1687 if (iadev->close_pending)
1688 wake_up(&iadev->close_wait);
1690 if (status & TCQ_NOT_EMPTY)
1692 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1696 static void tx_dle_intr(struct atm_dev *dev)
1699 struct dle *dle, *cur_dle;
1700 struct sk_buff *skb;
1701 struct atm_vcc *vcc;
1702 struct ia_vcc *iavcc;
1704 unsigned long flags;
1706 iadev = INPH_IA_DEV(dev);
1707 spin_lock_irqsave(&iadev->tx_lock, flags);
1708 dle = iadev->tx_dle_q.read;
1709 dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1710 (sizeof(struct dle)*DLE_ENTRIES - 1);
1711 cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1712 while (dle != cur_dle)
1714 /* free the DMAed skb */
1715 skb = skb_dequeue(&iadev->tx_dma_q);
1718 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1719 if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1720 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1723 vcc = ATM_SKB(skb)->vcc;
1725 printk("tx_dle_intr: vcc is null\n");
1726 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1727 dev_kfree_skb_any(skb);
1731 iavcc = INPH_IA_VCC(vcc);
1733 printk("tx_dle_intr: iavcc is null\n");
1734 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1735 dev_kfree_skb_any(skb);
1738 if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1739 if ((vcc->pop) && (skb->len != 0))
1744 dev_kfree_skb_any(skb);
1747 else { /* Hold the rate-limited skb for flow control */
1748 IA_SKB_STATE(skb) |= IA_DLED;
1749 skb_queue_tail(&iavcc->txing_skb, skb);
1751 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1752 if (++dle == iadev->tx_dle_q.end)
1753 dle = iadev->tx_dle_q.start;
1755 iadev->tx_dle_q.read = dle;
1756 spin_unlock_irqrestore(&iadev->tx_lock, flags);
1759 static int open_tx(struct atm_vcc *vcc)
1761 struct ia_vcc *ia_vcc;
1766 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1767 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1768 iadev = INPH_IA_DEV(vcc->dev);
1770 if (iadev->phy_type & FE_25MBIT_PHY) {
1771 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1772 printk("IA: ABR not support\n");
1775 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1776 printk("IA: CBR not support\n");
1780 ia_vcc = INPH_IA_VCC(vcc);
1781 memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1782 if (vcc->qos.txtp.max_sdu >
1783 (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1784 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1785 vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1786 INPH_IA_VCC(vcc) = NULL;
1790 ia_vcc->vc_desc_cnt = 0;
1794 if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1795 vcc->qos.txtp.pcr = iadev->LineRate;
1796 else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1797 vcc->qos.txtp.pcr = iadev->LineRate;
1798 else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1799 vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1800 if (vcc->qos.txtp.pcr > iadev->LineRate)
1801 vcc->qos.txtp.pcr = iadev->LineRate;
1802 ia_vcc->pcr = vcc->qos.txtp.pcr;
1804 if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1805 else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1806 else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1807 else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
1808 if (ia_vcc->pcr < iadev->rate_limit)
1809 skb_queue_head_init (&ia_vcc->txing_skb);
1810 if (ia_vcc->pcr < iadev->rate_limit) {
1811 if (vcc->qos.txtp.max_sdu != 0) {
1812 if (ia_vcc->pcr > 60000)
1813 vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 5;
1814 else if (ia_vcc->pcr > 2000)
1815 vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 4;
1817 vcc->sk->sndbuf = 3*vcc->qos.txtp.max_sdu;
1820 vcc->sk->sndbuf = 24576;
1823 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1824 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1827 memset((caddr_t)vc, 0, sizeof(*vc));
1828 memset((caddr_t)evc, 0, sizeof(*evc));
1830 /* store the most significant 4 bits of vci as the last 4 bits
1831 of first part of atm header.
1832 store the last 12 bits of vci as first 12 bits of the second
1833 part of the atm header.
1835 evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1836 evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1838 /* check the following for different traffic classes */
1839 if (vcc->qos.txtp.traffic_class == ATM_UBR)
1842 vc->status = CRC_APPEND;
1843 vc->acr = cellrate_to_float(iadev->LineRate);
1844 if (vcc->qos.txtp.pcr > 0)
1845 vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1846 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1847 vcc->qos.txtp.max_pcr,vc->acr);)
1849 else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1850 { srv_cls_param_t srv_p;
1851 IF_ABR(printk("Tx ABR VCC\n");)
1852 init_abr_vc(iadev, &srv_p);
1853 if (vcc->qos.txtp.pcr > 0)
1854 srv_p.pcr = vcc->qos.txtp.pcr;
1855 if (vcc->qos.txtp.min_pcr > 0) {
1856 int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1857 if (tmpsum > iadev->LineRate)
1859 srv_p.mcr = vcc->qos.txtp.min_pcr;
1860 iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1863 if (vcc->qos.txtp.icr)
1864 srv_p.icr = vcc->qos.txtp.icr;
1865 if (vcc->qos.txtp.tbe)
1866 srv_p.tbe = vcc->qos.txtp.tbe;
1867 if (vcc->qos.txtp.frtt)
1868 srv_p.frtt = vcc->qos.txtp.frtt;
1869 if (vcc->qos.txtp.rif)
1870 srv_p.rif = vcc->qos.txtp.rif;
1871 if (vcc->qos.txtp.rdf)
1872 srv_p.rdf = vcc->qos.txtp.rdf;
1873 if (vcc->qos.txtp.nrm_pres)
1874 srv_p.nrm = vcc->qos.txtp.nrm;
1875 if (vcc->qos.txtp.trm_pres)
1876 srv_p.trm = vcc->qos.txtp.trm;
1877 if (vcc->qos.txtp.adtf_pres)
1878 srv_p.adtf = vcc->qos.txtp.adtf;
1879 if (vcc->qos.txtp.cdf_pres)
1880 srv_p.cdf = vcc->qos.txtp.cdf;
1881 if (srv_p.icr > srv_p.pcr)
1882 srv_p.icr = srv_p.pcr;
1883 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1884 srv_p.pcr, srv_p.mcr);)
1885 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1886 } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1887 if (iadev->phy_type & FE_25MBIT_PHY) {
1888 printk("IA: CBR not support\n");
1891 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1892 IF_CBR(printk("PCR is not availble\n");)
1896 vc->status = CRC_APPEND;
1897 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1902 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1904 iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1905 IF_EVENT(printk("ia open_tx returning \n");)
1910 static int tx_init(struct atm_dev *dev)
1913 struct tx_buf_desc *buf_desc_ptr;
1914 unsigned int tx_pkt_start;
1926 iadev = INPH_IA_DEV(dev);
1927 spin_lock_init(&iadev->tx_lock);
1929 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1930 readw(iadev->seg_reg+SEG_MASK_REG));)
1932 /* Allocate 4k (boundary aligned) bytes */
1933 dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1934 &iadev->tx_dle_dma);
1936 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1939 iadev->tx_dle_q.start = (struct dle*)dle_addr;
1940 iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1941 iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1942 iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1944 /* write the upper 20 bits of the start address to tx list address register */
1945 writel(iadev->tx_dle_dma & 0xfffff000,
1946 iadev->dma + IPHASE5575_TX_LIST_ADDR);
1947 writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1948 writew(0, iadev->seg_reg+MODE_REG_0);
1949 writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1950 iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1951 iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1952 iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1955 Transmit side control memory map
1956 --------------------------------
1957 Buffer descr 0x0000 (128 - 4K)
1958 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1961 CBR Table 0x1800 (as needed) - 6K
1962 UBR Table 0x3000 (1K - 4K) - 12K
1963 UBR Wait queue 0x4000 (1K - 4K) - 16K
1964 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1965 ABR Tbl - 20K, ABR Wq - 22K
1966 extended VC 0x6000 (1K - 8K) - 24K
1967 VC Table 0x8000 (1K - 32K) - 32K
1969 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1970 and Wait q, which can be allotted later.
1973 /* Buffer Descriptor Table Base address */
1974 writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1976 /* initialize each entry in the buffer descriptor table */
1977 buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1978 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1980 tx_pkt_start = TX_PACKET_RAM;
1981 for(i=1; i<=iadev->num_tx_desc; i++)
1983 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1984 buf_desc_ptr->desc_mode = AAL5;
1985 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1986 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1988 tx_pkt_start += iadev->tx_buf_sz;
1990 iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1991 if (!iadev->tx_buf) {
1992 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1995 for (i= 0; i< iadev->num_tx_desc; i++)
1997 struct cpcs_trailer *cpcs;
1999 cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
2001 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
2002 goto err_free_tx_bufs;
2004 iadev->tx_buf[i].cpcs = cpcs;
2005 iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
2006 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
2008 iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
2009 sizeof(struct desc_tbl_t), GFP_KERNEL);
2010 if(!iadev->desc_tbl)
2011 goto err_free_all_tx_bufs;
2013 /* Communication Queues base address */
2014 i = TX_COMP_Q * iadev->memSize;
2015 writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2017 /* Transmit Complete Queue */
2018 writew(i, iadev->seg_reg+TCQ_ST_ADR);
2019 writew(i, iadev->seg_reg+TCQ_RD_PTR);
2020 writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2021 iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2022 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2023 iadev->seg_reg+TCQ_ED_ADR);
2024 /* Fill the TCQ with all the free descriptors. */
2025 tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2026 tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2027 for(i=1; i<=iadev->num_tx_desc; i++)
2029 *tcq_start = (u_short)i;
2033 /* Packet Ready Queue */
2034 i = PKT_RDY_Q * iadev->memSize;
2035 writew(i, iadev->seg_reg+PRQ_ST_ADR);
2036 writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2037 iadev->seg_reg+PRQ_ED_ADR);
2038 writew(i, iadev->seg_reg+PRQ_RD_PTR);
2039 writew(i, iadev->seg_reg+PRQ_WR_PTR);
2041 /* Load local copy of PRQ and TCQ ptrs */
2042 iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2043 iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2044 iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2046 iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2047 iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2048 iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2050 /* Just for safety initializing the queue to have desc 1 always */
2051 /* Fill the PRQ with all the free descriptors. */
2052 prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2053 prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2054 for(i=1; i<=iadev->num_tx_desc; i++)
2056 *prq_start = (u_short)0; /* desc 1 in all entries */
2060 IF_INIT(printk("Start CBR Init\n");)
2061 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2062 writew(0,iadev->seg_reg+CBR_PTR_BASE);
2063 #else /* Charlie's logic is wrong ? */
2064 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2065 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2066 writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2069 IF_INIT(printk("value in register = 0x%x\n",
2070 readw(iadev->seg_reg+CBR_PTR_BASE));)
2071 tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2072 writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2073 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2074 readw(iadev->seg_reg+CBR_TAB_BEG));)
2075 writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2076 tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2077 writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2078 IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2079 (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2080 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2081 readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2082 readw(iadev->seg_reg+CBR_TAB_END+1));)
2083 tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
2085 /* Initialize the CBR Schedualing Table */
2086 memset((caddr_t)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize),
2087 0, iadev->num_vc*6);
2088 iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2089 iadev->CbrEntryPt = 0;
2090 iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2091 iadev->NumEnabledCBR = 0;
2093 /* UBR scheduling Table and wait queue */
2094 /* initialize all bytes of UBR scheduler table and wait queue to 0
2095 - SCHEDSZ is 1K (# of entries).
2096 - UBR Table size is 4K
2097 - UBR wait queue is 4K
2098 since the table and wait queues are contiguous, all the bytes
2099 can be intialized by one memeset.
2104 while (i != iadev->num_vc) {
2109 i = MAIN_VC_TABLE * iadev->memSize;
2110 writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2111 i = EXT_VC_TABLE * iadev->memSize;
2112 writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2113 i = UBR_SCHED_TABLE * iadev->memSize;
2114 writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
2115 i = UBR_WAIT_Q * iadev->memSize;
2116 writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
2117 memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2118 0, iadev->num_vc*8);
2119 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2120 /* initialize all bytes of ABR scheduler table and wait queue to 0
2121 - SCHEDSZ is 1K (# of entries).
2122 - ABR Table size is 2K
2123 - ABR wait queue is 2K
2124 since the table and wait queues are contiguous, all the bytes
2125 can be intialized by one memeset.
2127 i = ABR_SCHED_TABLE * iadev->memSize;
2128 writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2129 i = ABR_WAIT_Q * iadev->memSize;
2130 writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2132 i = ABR_SCHED_TABLE*iadev->memSize;
2133 memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
2134 vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2135 evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2136 iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2137 if (!iadev->testTable) {
2138 printk("Get freepage failed\n");
2139 goto err_free_desc_tbl;
2141 for(i=0; i<iadev->num_vc; i++)
2143 memset((caddr_t)vc, 0, sizeof(*vc));
2144 memset((caddr_t)evc, 0, sizeof(*evc));
2145 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2147 if (!iadev->testTable[i])
2148 goto err_free_test_tables;
2149 iadev->testTable[i]->lastTime = 0;
2150 iadev->testTable[i]->fract = 0;
2151 iadev->testTable[i]->vc_status = VC_UBR;
2156 /* Other Initialization */
2158 /* Max Rate Register */
2159 if (iadev->phy_type & FE_25MBIT_PHY) {
2160 writew(RATE25, iadev->seg_reg+MAXRATE);
2161 writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2164 writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2165 writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2167 /* Set Idle Header Reigisters to be sure */
2168 writew(0, iadev->seg_reg+IDLEHEADHI);
2169 writew(0, iadev->seg_reg+IDLEHEADLO);
2171 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2172 writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2174 iadev->close_pending = 0;
2175 #if LINUX_VERSION_CODE >= 0x20303
2176 init_waitqueue_head(&iadev->close_wait);
2177 init_waitqueue_head(&iadev->timeout_wait);
2179 iadev->close_wait = NULL;
2180 iadev->timeout_wait = NULL;
2182 skb_queue_head_init(&iadev->tx_dma_q);
2183 ia_init_rtn_q(&iadev->tx_return_q);
2185 /* RM Cell Protocol ID and Message Type */
2186 writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2187 skb_queue_head_init (&iadev->tx_backlog);
2189 /* Mode Register 1 */
2190 writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2192 /* Mode Register 0 */
2193 writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2195 /* Interrupt Status Register - read to clear */
2196 readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2198 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2199 writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2200 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2201 iadev->tx_pkt_cnt = 0;
2202 iadev->rate_limit = iadev->LineRate / 3;
2206 err_free_test_tables:
2208 kfree(iadev->testTable[i]);
2209 kfree(iadev->testTable);
2211 kfree(iadev->desc_tbl);
2212 err_free_all_tx_bufs:
2213 i = iadev->num_tx_desc;
2216 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2218 pci_unmap_single(iadev->pci, desc->dma_addr,
2219 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2222 kfree(iadev->tx_buf);
2224 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2230 static void ia_int(int irq, void *dev_id, struct pt_regs *regs)
2232 struct atm_dev *dev;
2234 unsigned int status;
2237 iadev = INPH_IA_DEV(dev);
2238 while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2240 IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2241 if (status & STAT_REASSINT)
2244 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2247 if (status & STAT_DLERINT)
2249 /* Clear this bit by writing a 1 to it. */
2250 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2253 if (status & STAT_SEGINT)
2256 IF_EVENT(printk("IA: tx_intr \n");)
2259 if (status & STAT_DLETINT)
2261 *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
2264 if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2266 if (status & STAT_FEINT)
2267 IaFrontEndIntr(iadev);
2274 /*----------------------------- entries --------------------------------*/
2275 static int get_esi(struct atm_dev *dev)
2282 iadev = INPH_IA_DEV(dev);
2283 mac1 = cpu_to_be32(le32_to_cpu(readl(
2284 iadev->reg+IPHASE5575_MAC1)));
2285 mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2286 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2287 for (i=0; i<MAC1_LEN; i++)
2288 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2290 for (i=0; i<MAC2_LEN; i++)
2291 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2295 static int reset_sar(struct atm_dev *dev)
2299 unsigned int pci[64];
2301 iadev = INPH_IA_DEV(dev);
2303 if ((error = pci_read_config_dword(iadev->pci,
2304 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2306 writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2308 if ((error = pci_write_config_dword(iadev->pci,
2309 i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2316 #if LINUX_VERSION_CODE >= 0x20312
2317 static int __init ia_init(struct atm_dev *dev)
2319 __initfunc(static int ia_init(struct atm_dev *dev))
2323 unsigned long real_base, base;
2324 unsigned short command;
2325 unsigned char revision;
2328 /* The device has been identified and registered. Now we read
2329 necessary configuration info like memory base address,
2330 interrupt number etc */
2332 IF_INIT(printk(">ia_init\n");)
2333 dev->ci_range.vpi_bits = 0;
2334 dev->ci_range.vci_bits = NR_VCI_LD;
2336 iadev = INPH_IA_DEV(dev);
2337 real_base = pci_resource_start (iadev->pci, 0);
2338 iadev->irq = iadev->pci->irq;
2340 if ((error = pci_read_config_word(iadev->pci, PCI_COMMAND,&command))
2341 || (error = pci_read_config_byte(iadev->pci,
2342 PCI_REVISION_ID,&revision)))
2344 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2348 IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2349 dev->number, revision, real_base, iadev->irq);)
2351 /* find mapping size of board */
2353 iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2355 if (iadev->pci_map_size == 0x100000){
2356 iadev->num_vc = 4096;
2357 dev->ci_range.vci_bits = NR_VCI_4K_LD;
2360 else if (iadev->pci_map_size == 0x40000) {
2361 iadev->num_vc = 1024;
2365 printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2368 IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2370 /* enable bus mastering */
2371 pci_set_master(iadev->pci);
2374 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2378 /* mapping the physical address to a virtual address in address space */
2379 base=(unsigned long)ioremap((unsigned long)real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
2383 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2387 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=0x%lx,irq=%d\n",
2388 dev->number, revision, base, iadev->irq);)
2390 /* filling the iphase dev structure */
2391 iadev->mem = iadev->pci_map_size /2;
2392 iadev->base_diff = real_base - base;
2393 iadev->real_base = real_base;
2396 /* Bus Interface Control Registers */
2397 iadev->reg = (u32 *) (base + REG_BASE);
2398 /* Segmentation Control Registers */
2399 iadev->seg_reg = (u32 *) (base + SEG_BASE);
2400 /* Reassembly Control Registers */
2401 iadev->reass_reg = (u32 *) (base + REASS_BASE);
2402 /* Front end/ DMA control registers */
2403 iadev->phy = (u32 *) (base + PHY_BASE);
2404 iadev->dma = (u32 *) (base + PHY_BASE);
2405 /* RAM - Segmentation RAm and Reassembly RAM */
2406 iadev->ram = (u32 *) (base + ACTUAL_RAM_BASE);
2407 iadev->seg_ram = (base + ACTUAL_SEG_RAM_BASE);
2408 iadev->reass_ram = (base + ACTUAL_REASS_RAM_BASE);
2410 /* lets print out the above */
2411 IF_INIT(printk("Base addrs: %08x %08x %08x \n %08x %08x %08x %08x\n",
2412 (u32)iadev->reg,(u32)iadev->seg_reg,(u32)iadev->reass_reg,
2413 (u32)iadev->phy, (u32)iadev->ram, (u32)iadev->seg_ram,
2414 (u32)iadev->reass_ram);)
2416 /* lets try reading the MAC address */
2417 error = get_esi(dev);
2419 iounmap((void *) iadev->base);
2423 for (i=0; i < ESI_LEN; i++)
2424 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2428 if (reset_sar(dev)) {
2429 iounmap((void *) iadev->base);
2430 printk("IA: reset SAR fail, please try again\n");
2436 static void ia_update_stats(IADEV *iadev) {
2437 if (!iadev->carrier_detect)
2439 iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2440 iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2441 iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2442 iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2443 iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2444 iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2448 static void ia_led_timer(unsigned long arg) {
2449 unsigned long flags;
2450 static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2452 static u32 ctrl_reg;
2453 for (i = 0; i < iadev_count; i++) {
2455 ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2456 if (blinking[i] == 0) {
2458 ctrl_reg &= (~CTRL_LED);
2459 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2460 ia_update_stats(ia_dev[i]);
2464 ctrl_reg |= CTRL_LED;
2465 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2466 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2467 if (ia_dev[i]->close_pending)
2468 wake_up(&ia_dev[i]->close_wait);
2469 ia_tx_poll(ia_dev[i]);
2470 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2474 mod_timer(&ia_timer, jiffies + HZ / 4);
2478 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2481 writel(value, INPH_IA_DEV(dev)->phy+addr);
2484 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2486 return readl(INPH_IA_DEV(dev)->phy+addr);
2489 static void ia_free_tx(IADEV *iadev)
2493 kfree(iadev->desc_tbl);
2494 for (i = 0; i < iadev->num_vc; i++)
2495 kfree(iadev->testTable[i]);
2496 kfree(iadev->testTable);
2497 for (i = 0; i < iadev->num_tx_desc; i++) {
2498 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2500 pci_unmap_single(iadev->pci, desc->dma_addr,
2501 sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2504 kfree(iadev->tx_buf);
2505 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2509 static void ia_free_rx(IADEV *iadev)
2511 kfree(iadev->rx_open);
2512 pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2516 #if LINUX_VERSION_CODE >= 0x20312
2517 static int __init ia_start(struct atm_dev *dev)
2519 __initfunc(static int ia_start(struct atm_dev *dev))
2526 IF_EVENT(printk(">ia_start\n");)
2527 iadev = INPH_IA_DEV(dev);
2528 if (request_irq(iadev->irq, &ia_int, SA_SHIRQ, DEV_LABEL, dev)) {
2529 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2530 dev->number, iadev->irq);
2534 /* @@@ should release IRQ on error */
2535 /* enabling memory + master */
2536 if ((error = pci_write_config_word(iadev->pci,
2538 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2540 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2541 "master (0x%x)\n",dev->number, error);
2547 /* Maybe we should reset the front end, initialize Bus Interface Control
2548 Registers and see. */
2550 IF_INIT(printk("Bus ctrl reg: %08x\n",
2551 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2552 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2553 ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2561 | CTRL_DLETMASK /* shud be removed l8r */
2568 writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2570 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2571 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2572 printk("Bus status reg after init: %08x\n",
2573 readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2576 error = tx_init(dev);
2579 error = rx_init(dev);
2583 ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2584 writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2585 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2586 readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2587 phy = 0; /* resolve compiler complaint */
2589 if ((phy=ia_phy_get(dev,0)) == 0x30)
2590 printk("IA: pm5346,rev.%d\n",phy&0x0f);
2592 printk("IA: utopia,rev.%0x\n",phy);)
2594 if (iadev->phy_type & FE_25MBIT_PHY) {
2595 ia_mb25_init(iadev);
2596 } else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY)) {
2597 ia_suni_pm7345_init(iadev);
2599 error = suni_init(dev);
2603 * Enable interrupt on loss of signal
2604 * SUNI_RSOP_CIE - 0x10
2605 * SUNI_RSOP_CIE_LOSE - 0x04
2607 ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2609 error = dev->phy->start(dev);
2613 /* Get iadev->carrier_detect status */
2614 IaFrontEndIntr(iadev);
2623 free_irq(iadev->irq, dev);
2628 static void ia_close(struct atm_vcc *vcc)
2632 struct ia_vcc *ia_vcc;
2633 struct sk_buff *skb = NULL;
2634 struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2635 unsigned long closetime, flags;
2638 iadev = INPH_IA_DEV(vcc->dev);
2639 ia_vcc = INPH_IA_VCC(vcc);
2640 if (!ia_vcc) return;
2642 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2643 ia_vcc->vc_desc_cnt,vcc->vci);)
2644 clear_bit(ATM_VF_READY,&vcc->flags);
2645 skb_queue_head_init (&tmp_tx_backlog);
2646 skb_queue_head_init (&tmp_vcc_backlog);
2647 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2648 iadev->close_pending++;
2649 sleep_on_timeout(&iadev->timeout_wait, 50);
2650 spin_lock_irqsave(&iadev->tx_lock, flags);
2651 while((skb = skb_dequeue(&iadev->tx_backlog))) {
2652 if (ATM_SKB(skb)->vcc == vcc){
2653 if (vcc->pop) vcc->pop(vcc, skb);
2654 else dev_kfree_skb_any(skb);
2657 skb_queue_tail(&tmp_tx_backlog, skb);
2659 while((skb = skb_dequeue(&tmp_tx_backlog)))
2660 skb_queue_tail(&iadev->tx_backlog, skb);
2661 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2662 closetime = jiffies;
2663 ctimeout = 300000 / ia_vcc->pcr;
2666 while (ia_vcc->vc_desc_cnt > 0){
2667 if ((jiffies - closetime) >= ctimeout)
2669 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2670 sleep_on(&iadev->close_wait);
2671 spin_lock_irqsave(&iadev->tx_lock, flags);
2673 iadev->close_pending--;
2674 iadev->testTable[vcc->vci]->lastTime = 0;
2675 iadev->testTable[vcc->vci]->fract = 0;
2676 iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2677 if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2678 if (vcc->qos.txtp.min_pcr > 0)
2679 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2681 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2682 ia_vcc = INPH_IA_VCC(vcc);
2683 iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2684 ia_cbrVc_close (vcc);
2686 spin_unlock_irqrestore(&iadev->tx_lock, flags);
2689 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2690 // reset reass table
2691 vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2692 vc_table += vcc->vci;
2693 *vc_table = NO_AAL5_PKT;
2695 vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2696 vc_table += vcc->vci;
2697 *vc_table = (vcc->vci << 6) | 15;
2698 if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2699 struct abr_vc_table *abr_vc_table = (struct abr_vc_table *)
2700 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2701 abr_vc_table += vcc->vci;
2702 abr_vc_table->rdf = 0x0003;
2703 abr_vc_table->air = 0x5eb1;
2705 // Drain the packets
2706 rx_dle_intr(vcc->dev);
2707 iadev->rx_open[vcc->vci] = 0;
2709 kfree(INPH_IA_VCC(vcc));
2711 INPH_IA_VCC(vcc) = NULL;
2712 clear_bit(ATM_VF_ADDR,&vcc->flags);
2716 static int ia_open(struct atm_vcc *vcc, short vpi, int vci)
2719 struct ia_vcc *ia_vcc;
2721 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2723 IF_EVENT(printk("ia: not partially allocated resources\n");)
2724 INPH_IA_VCC(vcc) = NULL;
2726 iadev = INPH_IA_DEV(vcc->dev);
2727 error = atm_find_ci(vcc, &vpi, &vci);
2730 printk("iadev: atm_find_ci returned error %d\n", error);
2735 if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
2737 IF_EVENT(printk("iphase open: unspec part\n");)
2738 set_bit(ATM_VF_ADDR,&vcc->flags);
2740 if (vcc->qos.aal != ATM_AAL5)
2742 IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2743 vcc->dev->number, vcc->vpi, vcc->vci);)
2745 /* Device dependent initialization */
2746 ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2747 if (!ia_vcc) return -ENOMEM;
2748 INPH_IA_VCC(vcc) = ia_vcc;
2750 if ((error = open_rx(vcc)))
2752 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2757 if ((error = open_tx(vcc)))
2759 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2764 set_bit(ATM_VF_READY,&vcc->flags);
2768 static u8 first = 1;
2770 ia_timer.expires = jiffies + 3*HZ;
2771 add_timer(&ia_timer);
2776 IF_EVENT(printk("ia open returning\n");)
2780 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2782 IF_EVENT(printk(">ia_change_qos\n");)
2786 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)
2792 IF_EVENT(printk(">ia_ioctl\n");)
2793 if (cmd != IA_CMD) {
2794 if (!dev->phy->ioctl) return -EINVAL;
2795 return dev->phy->ioctl(dev,cmd,arg);
2797 if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2798 board = ia_cmds.status;
2799 if ((board < 0) || (board > iadev_count))
2801 iadev = ia_dev[board];
2802 switch (ia_cmds.cmd) {
2805 switch (ia_cmds.sub_cmd) {
2807 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2808 if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2812 case MEMDUMP_SEGREG:
2813 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2814 tmps = (u16 *)ia_cmds.buf;
2815 for(i=0; i<0x80; i+=2, tmps++)
2816 if(put_user(*(u16*)(iadev->seg_reg+i), tmps)) return -EFAULT;
2820 case MEMDUMP_REASSREG:
2821 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2822 tmps = (u16 *)ia_cmds.buf;
2823 for(i=0; i<0x80; i+=2, tmps++)
2824 if(put_user(*(u16*)(iadev->reass_reg+i), tmps)) return -EFAULT;
2830 ia_regs_t regs_local;
2831 ffredn_t *ffL = ®s_local.ffredn;
2832 rfredn_t *rfL = ®s_local.rfredn;
2834 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2835 /* Copy real rfred registers into the local copy */
2836 for (i=0; i<(sizeof (rfredn_t))/4; i++)
2837 ((u_int *)rfL)[i] = ((u_int *)iadev->reass_reg)[i] & 0xffff;
2838 /* Copy real ffred registers into the local copy */
2839 for (i=0; i<(sizeof (ffredn_t))/4; i++)
2840 ((u_int *)ffL)[i] = ((u_int *)iadev->seg_reg)[i] & 0xffff;
2842 if (copy_to_user(ia_cmds.buf, ®s_local,sizeof(ia_regs_t)))
2844 printk("Board %d registers dumped\n", board);
2850 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2858 printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2859 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2864 struct k_sonet_stats *stats;
2865 stats = &PRIV(_ia_dev[board])->sonet_stats;
2866 printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2867 printk("line_bip : %d\n", atomic_read(&stats->line_bip));
2868 printk("path_bip : %d\n", atomic_read(&stats->path_bip));
2869 printk("line_febe : %d\n", atomic_read(&stats->line_febe));
2870 printk("path_febe : %d\n", atomic_read(&stats->path_febe));
2871 printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
2872 printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2873 printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
2874 printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
2879 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2880 for (i = 1; i <= iadev->num_rx_desc; i++)
2881 free_desc(_ia_dev[board], i);
2882 writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2883 iadev->reass_reg+REASS_MASK_REG);
2890 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2891 IaFrontEndIntr(iadev);
2894 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2897 IADebugFlag = ia_cmds.maddr;
2898 printk("New debug option loaded\n");
2914 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2915 void *optval, int optlen)
2917 IF_EVENT(printk(">ia_getsockopt\n");)
2921 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2922 void *optval, int optlen)
2924 IF_EVENT(printk(">ia_setsockopt\n");)
2928 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2931 struct tx_buf_desc *buf_desc_ptr;
2934 int total_len, pad, last;
2935 struct cpcs_trailer *trailer;
2936 struct ia_vcc *iavcc;
2938 iadev = INPH_IA_DEV(vcc->dev);
2939 iavcc = INPH_IA_VCC(vcc);
2940 if (!iavcc->txing) {
2941 printk("discard packet on closed VC\n");
2945 dev_kfree_skb_any(skb);
2949 if (skb->len > iadev->tx_buf_sz - 8) {
2950 printk("Transmit size over tx buffer size\n");
2954 dev_kfree_skb_any(skb);
2957 if ((u32)skb->data & 3) {
2958 printk("Misaligned SKB\n");
2962 dev_kfree_skb_any(skb);
2965 /* Get a descriptor number from our free descriptor queue
2966 We get the descr number from the TCQ now, since I am using
2967 the TCQ as a free buffer queue. Initially TCQ will be
2968 initialized with all the descriptors and is hence, full.
2970 desc = get_desc (iadev, iavcc);
2973 comp_code = desc >> 13;
2976 if ((desc == 0) || (desc > iadev->num_tx_desc))
2978 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2979 atomic_inc(&vcc->stats->tx);
2983 dev_kfree_skb_any(skb);
2984 return 0; /* return SUCCESS */
2989 IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2993 /* remember the desc and vcc mapping */
2994 iavcc->vc_desc_cnt++;
2995 iadev->desc_tbl[desc-1].iavcc = iavcc;
2996 iadev->desc_tbl[desc-1].txskb = skb;
2997 IA_SKB_STATE(skb) = 0;
2999 iadev->ffL.tcq_rd += 2;
3000 if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
3001 iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
3002 writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
3004 /* Put the descriptor number in the packet ready queue
3005 and put the updated write pointer in the DLE field
3007 *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
3009 iadev->ffL.prq_wr += 2;
3010 if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
3011 iadev->ffL.prq_wr = iadev->ffL.prq_st;
3013 /* Figure out the exact length of the packet and padding required to
3014 make it aligned on a 48 byte boundary. */
3015 total_len = skb->len + sizeof(struct cpcs_trailer);
3016 last = total_len - (total_len/48)*48;
3018 total_len = pad + total_len;
3019 IF_TX(printk("ia packet len:%d padding:%d\n", total_len, pad);)
3021 /* Put the packet in a tx buffer */
3022 trailer = iadev->tx_buf[desc-1].cpcs;
3023 IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
3024 (u32)skb, (u32)skb->data, skb->len, desc);)
3025 trailer->control = 0;
3027 trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
3028 trailer->crc32 = 0; /* not needed - dummy bytes */
3030 /* Display the packet */
3031 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
3032 skb->len, tcnter++);
3033 xdump(skb->data, skb->len, "TX: ");
3036 /* Build the buffer descriptor */
3037 buf_desc_ptr = (struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
3038 buf_desc_ptr += desc; /* points to the corresponding entry */
3039 buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
3040 /* Huh ? p.115 of users guide describes this as a read-only register */
3041 writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3042 buf_desc_ptr->vc_index = vcc->vci;
3043 buf_desc_ptr->bytes = total_len;
3045 if (vcc->qos.txtp.traffic_class == ATM_ABR)
3046 clear_lockup (vcc, iadev);
3048 /* Build the DLE structure */
3049 wr_ptr = iadev->tx_dle_q.write;
3050 memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3051 wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3052 skb->len, PCI_DMA_TODEVICE);
3053 wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3054 buf_desc_ptr->buf_start_lo;
3055 /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */
3056 wr_ptr->bytes = skb->len;
3058 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3059 if ((wr_ptr->bytes >> 2) == 0xb)
3060 wr_ptr->bytes = 0x30;
3062 wr_ptr->mode = TX_DLE_PSI;
3063 wr_ptr->prq_wr_ptr_data = 0;
3065 /* end is not to be used for the DLE q */
3066 if (++wr_ptr == iadev->tx_dle_q.end)
3067 wr_ptr = iadev->tx_dle_q.start;
3069 /* Build trailer dle */
3070 wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3071 wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3072 buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3074 wr_ptr->bytes = sizeof(struct cpcs_trailer);
3075 wr_ptr->mode = DMA_INT_ENABLE;
3076 wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3078 /* end is not to be used for the DLE q */
3079 if (++wr_ptr == iadev->tx_dle_q.end)
3080 wr_ptr = iadev->tx_dle_q.start;
3082 iadev->tx_dle_q.write = wr_ptr;
3083 ATM_DESC(skb) = vcc->vci;
3084 skb_queue_tail(&iadev->tx_dma_q, skb);
3086 atomic_inc(&vcc->stats->tx);
3087 iadev->tx_pkt_cnt++;
3088 /* Increment transaction counter */
3089 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3092 /* add flow control logic */
3093 if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3094 if (iavcc->vc_desc_cnt > 10) {
3095 vcc->tx_quota = vcc->tx_quota * 3 / 4;
3096 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3097 iavcc->flow_inc = -1;
3098 iavcc->saved_tx_quota = vcc->tx_quota;
3099 } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3100 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3101 printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3102 iavcc->flow_inc = 0;
3106 IF_TX(printk("ia send done\n");)
3110 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3113 struct ia_vcc *iavcc;
3114 unsigned long flags;
3116 iadev = INPH_IA_DEV(vcc->dev);
3117 iavcc = INPH_IA_VCC(vcc);
3118 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3121 printk(KERN_CRIT "null skb in ia_send\n");
3122 else dev_kfree_skb_any(skb);
3125 spin_lock_irqsave(&iadev->tx_lock, flags);
3126 if (!test_bit(ATM_VF_READY,&vcc->flags)){
3127 dev_kfree_skb_any(skb);
3128 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3131 ATM_SKB(skb)->vcc = vcc;
3133 if (skb_peek(&iadev->tx_backlog)) {
3134 skb_queue_tail(&iadev->tx_backlog, skb);
3137 if (ia_pkt_tx (vcc, skb)) {
3138 skb_queue_tail(&iadev->tx_backlog, skb);
3141 spin_unlock_irqrestore(&iadev->tx_lock, flags);
3146 static int ia_sg_send(struct atm_vcc *vcc, unsigned long start,
3149 IF_EVENT(printk(">ia_sg_send\n");)
3154 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3158 IADEV *iadev = INPH_IA_DEV(dev);
3160 if (iadev->phy_type == FE_25MBIT_PHY) {
3161 n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
3164 if (iadev->phy_type == FE_DS3_PHY)
3165 n = sprintf(page, " Board Type : Iphase-ATM-DS3");
3166 else if (iadev->phy_type == FE_E3_PHY)
3167 n = sprintf(page, " Board Type : Iphase-ATM-E3");
3168 else if (iadev->phy_type == FE_UTP_OPTION)
3169 n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
3171 n = sprintf(page, " Board Type : Iphase-ATM-OC3");
3173 if (iadev->pci_map_size == 0x40000)
3174 n += sprintf(tmpPtr, "-1KVC-");
3176 n += sprintf(tmpPtr, "-4KVC-");
3178 if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3179 n += sprintf(tmpPtr, "1M \n");
3180 else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3181 n += sprintf(tmpPtr, "512K\n");
3183 n += sprintf(tmpPtr, "128K\n");
3187 return sprintf(page, " Number of Tx Buffer: %u\n"
3188 " Size of Tx Buffer : %u\n"
3189 " Number of Rx Buffer: %u\n"
3190 " Size of Rx Buffer : %u\n"
3191 " Packets Receiverd : %u\n"
3192 " Packets Transmitted: %u\n"
3193 " Cells Received : %u\n"
3194 " Cells Transmitted : %u\n"
3195 " Board Dropped Cells: %u\n"
3196 " Board Dropped Pkts : %u\n",
3197 iadev->num_tx_desc, iadev->tx_buf_sz,
3198 iadev->num_rx_desc, iadev->rx_buf_sz,
3199 iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
3200 iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3201 iadev->drop_rxcell, iadev->drop_rxpkt);
3206 static const struct atmdev_ops ops = {
3210 getsockopt: ia_getsockopt,
3211 setsockopt: ia_setsockopt,
3213 sg_send: ia_sg_send,
3214 phy_put: ia_phy_put,
3215 phy_get: ia_phy_get,
3216 change_qos: ia_change_qos,
3217 proc_read: ia_proc_read,
3222 static int __devinit ia_init_one(struct pci_dev *pdev,
3223 const struct pci_device_id *ent)
3225 struct atm_dev *dev;
3227 unsigned long flags;
3230 iadev = kmalloc(sizeof(*iadev), GFP_KERNEL);
3235 memset(iadev, 0, sizeof(*iadev));
3238 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3239 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3240 if (pci_enable_device(pdev)) {
3242 goto err_out_free_iadev;
3244 dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3247 goto err_out_disable_dev;
3249 INPH_IA_DEV(dev) = iadev;
3250 IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3251 IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3254 ia_dev[iadev_count] = iadev;
3255 _ia_dev[iadev_count] = dev;
3257 spin_lock_init(&iadev->misc_lock);
3258 /* First fixes first. I don't want to think about this now. */
3259 spin_lock_irqsave(&iadev->misc_lock, flags);
3260 if (ia_init(dev) || ia_start(dev)) {
3261 IF_INIT(printk("IA register failed!\n");)
3263 ia_dev[iadev_count] = NULL;
3264 _ia_dev[iadev_count] = NULL;
3265 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3267 goto err_out_deregister_dev;
3269 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3270 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3272 iadev->next_board = ia_boards;
3275 pci_set_drvdata(pdev, dev);
3279 err_out_deregister_dev:
3280 atm_dev_deregister(dev);
3281 err_out_disable_dev:
3282 pci_disable_device(pdev);
3289 static void __devexit ia_remove_one(struct pci_dev *pdev)
3291 struct atm_dev *dev = pci_get_drvdata(pdev);
3292 IADEV *iadev = INPH_IA_DEV(dev);
3294 ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10);
3297 /* De-register device */
3298 free_irq(iadev->irq, dev);
3300 ia_dev[iadev_count] = NULL;
3301 _ia_dev[iadev_count] = NULL;
3302 atm_dev_deregister(dev);
3303 IF_EVENT(printk("iav deregistered at (itf:%d)\n", dev->number);)
3305 iounmap((void *) iadev->base);
3306 pci_disable_device(pdev);
3314 static struct pci_device_id ia_pci_tbl[] __devinitdata = {
3315 { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3316 { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3319 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3321 static struct pci_driver ia_driver = {
3323 .id_table = ia_pci_tbl,
3324 .probe = ia_init_one,
3325 .remove = ia_remove_one,
3328 static int __init ia_init_module(void)
3332 ret = pci_module_init(&ia_driver);
3334 ia_timer.expires = jiffies + 3*HZ;
3335 add_timer(&ia_timer);
3340 static void __exit ia_cleanup_module(void)
3342 pci_unregister_driver(&ia_driver);
3344 del_timer(&ia_timer);
3347 module_init(ia_init_module);
3348 module_exit(ia_cleanup_module);