make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #ifdef IA_MODULE
44 #define MODULE
45 #endif
46 #include <linux/version.h>
47 #include <linux/module.h>  
48 #include <linux/kernel.h>  
49 #include <linux/mm.h>  
50 #include <linux/pci.h>  
51 #include <linux/errno.h>  
52 #include <linux/atm.h>  
53 #include <linux/atmdev.h>  
54 #include <linux/sonet.h>  
55 #include <linux/skbuff.h>  
56 #include <linux/time.h>  
57 #include <linux/sched.h> /* for xtime */  
58 #include <linux/delay.h>  
59 #include <linux/uio.h>  
60 #include <linux/init.h>  
61 #include <asm/system.h>  
62 #include <asm/io.h>  
63 #include <asm/atomic.h>  
64 #include <asm/uaccess.h>  
65 #include <asm/string.h>  
66 #include <asm/byteorder.h>  
67 #include <linux/vmalloc.h>  
68 #include "iphase.h"               
69 #include "suni.h"                 
70 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
71 struct suni_priv {
72         struct k_sonet_stats sonet_stats; /* link diagnostics */
73         unsigned char loop_mode;        /* loopback mode */
74         struct atm_dev *dev;            /* device back-pointer */
75         struct suni_priv *next;         /* next SUNI */
76 }; 
77 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
78
79 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
80
81 static IADEV *ia_dev[8];
82 static struct atm_dev *_ia_dev[8];
83 static int iadev_count;
84 static void ia_led_timer(unsigned long arg);
85 static struct timer_list ia_timer = { function: ia_led_timer };
86 struct atm_vcc *vcc_close_que[100];
87 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
88 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
89 static u32 IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
90             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
91
92 #ifdef MODULE
93 MODULE_PARM(IA_TX_BUF, "i");
94 MODULE_PARM(IA_TX_BUF_SZ, "i");
95 MODULE_PARM(IA_RX_BUF, "i");
96 MODULE_PARM(IA_RX_BUF_SZ, "i");
97 MODULE_PARM(IADebugFlag, "i");
98 #endif
99
100 MODULE_LICENSE("GPL");
101
102 #if BITS_PER_LONG != 32
103 #  error FIXME: this driver only works on 32-bit platforms
104 #endif
105
106 /**************************** IA_LIB **********************************/
107
108 static void ia_init_rtn_q (IARTN_Q *que) 
109
110    que->next = NULL; 
111    que->tail = NULL; 
112 }
113
114 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
115 {
116    data->next = NULL;
117    if (que->next == NULL) 
118       que->next = que->tail = data;
119    else {
120       data->next = que->next;
121       que->next = data;
122    } 
123    return;
124 }
125
126 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
127    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
128    if (!entry) return -1;
129    entry->data = data;
130    entry->next = NULL;
131    if (que->next == NULL) 
132       que->next = que->tail = entry;
133    else {
134       que->tail->next = entry;
135       que->tail = que->tail->next;
136    }      
137    return 1;
138 }
139
140 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
141    IARTN_Q *tmpdata;
142    if (que->next == NULL)
143       return NULL;
144    tmpdata = que->next;
145    if ( que->next == que->tail)  
146       que->next = que->tail = NULL;
147    else 
148       que->next = que->next->next;
149    return tmpdata;
150 }
151
152 static void ia_hack_tcq(IADEV *dev) {
153
154   u_short               desc1;
155   u_short               tcq_wr;
156   struct ia_vcc         *iavcc_r = NULL; 
157   extern void desc_dbg(IADEV *iadev);
158
159   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
160   while (dev->host_tcq_wr != tcq_wr) {
161      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
162      if (!desc1) ;
163      else if (!dev->desc_tbl[desc1 -1].timestamp) {
164         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
165         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
166      }                                 
167      else if (dev->desc_tbl[desc1 -1].timestamp) {
168         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
169            printk("IA: Fatal err in get_desc\n");
170            continue;
171         }
172         iavcc_r->vc_desc_cnt--;
173         dev->desc_tbl[desc1 -1].timestamp = 0;
174         IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n", 
175                                    (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
176         if (iavcc_r->pcr < dev->rate_limit) {
177            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
178            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
179               printk("ia_hack_tcq: No memory available\n");
180         } 
181         dev->desc_tbl[desc1 -1].iavcc = NULL;
182         dev->desc_tbl[desc1 -1].txskb = NULL;
183      }
184      dev->host_tcq_wr += 2;
185      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
186         dev->host_tcq_wr = dev->ffL.tcq_st;
187   }
188 } /* ia_hack_tcq */
189
190 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
191   u_short               desc_num, i;
192   struct sk_buff        *skb;
193   struct ia_vcc         *iavcc_r = NULL; 
194   unsigned long delta;
195   static unsigned long timer = 0;
196   int ltimeout;
197   extern void desc_dbg(IADEV *iadev);
198
199   ia_hack_tcq (dev);
200   if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){      
201      timer = jiffies; 
202      i=0;
203      while (i < dev->num_tx_desc) {
204         if (!dev->desc_tbl[i].timestamp) {
205            i++;
206            continue;
207         }
208         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
209         delta = jiffies - dev->desc_tbl[i].timestamp;
210         if (delta >= ltimeout) {
211            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld,  time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
212            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
213               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
214            else 
215               dev->ffL.tcq_rd -= 2;
216            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
217            if (!(skb = dev->desc_tbl[i].txskb) || 
218                           !(iavcc_r = dev->desc_tbl[i].iavcc))
219               printk("Fatal err, desc table vcc or skb is NULL\n");
220            else 
221               iavcc_r->vc_desc_cnt--;
222            dev->desc_tbl[i].timestamp = 0;
223            dev->desc_tbl[i].iavcc = NULL;
224            dev->desc_tbl[i].txskb = NULL;
225         }
226         i++;
227      } /* while */
228   }
229   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
230      return 0xFFFF;
231     
232   /* Get the next available descriptor number from TCQ */
233   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
234
235   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
236      dev->ffL.tcq_rd += 2;
237      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
238      dev->ffL.tcq_rd = dev->ffL.tcq_st;
239      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
240         return 0xFFFF; 
241      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
242   }
243
244   /* get system time */
245   dev->desc_tbl[desc_num -1].timestamp = jiffies;
246   return desc_num;
247 }
248
249 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
250   u_char                foundLockUp;
251   vcstatus_t            *vcstatus;
252   u_short               *shd_tbl;
253   u_short               tempCellSlot, tempFract;
254   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
255   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
256   u_int  i;
257
258   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
259      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
260      vcstatus->cnt++;
261      foundLockUp = 0;
262      if( vcstatus->cnt == 0x05 ) {
263         abr_vc += vcc->vci;
264         eabr_vc += vcc->vci;
265         if( eabr_vc->last_desc ) {
266            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
267               /* Wait for 10 Micro sec */
268               udelay(10);
269               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
270                  foundLockUp = 1;
271            }
272            else {
273               tempCellSlot = abr_vc->last_cell_slot;
274               tempFract    = abr_vc->fraction;
275               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
276                          && (tempFract == dev->testTable[vcc->vci]->fract))
277                  foundLockUp = 1;                   
278               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
279               dev->testTable[vcc->vci]->fract = tempFract; 
280            }        
281         } /* last descriptor */            
282         vcstatus->cnt = 0;      
283      } /* vcstatus->cnt */
284         
285      if (foundLockUp) {
286         IF_ABR(printk("LOCK UP found\n");) 
287         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
288         /* Wait for 10 Micro sec */
289         udelay(10); 
290         abr_vc->status &= 0xFFF8;
291         abr_vc->status |= 0x0001;  /* state is idle */
292         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
293         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
294         if (i < dev->num_vc)
295            shd_tbl[i] = vcc->vci;
296         else
297            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
298         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
299         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
300         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
301         vcstatus->cnt = 0;
302      } /* foundLockUp */
303
304   } /* if an ABR VC */
305
306
307 }
308  
309 /*
310 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
311 **
312 **  +----+----+------------------+-------------------------------+
313 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
314 **  +----+----+------------------+-------------------------------+
315 ** 
316 **    R = reserverd (written as 0)
317 **    NZ = 0 if 0 cells/sec; 1 otherwise
318 **
319 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
320 */
321 static u16
322 cellrate_to_float(u32 cr)
323 {
324
325 #define NZ              0x4000
326 #define M_BITS          9               /* Number of bits in mantissa */
327 #define E_BITS          5               /* Number of bits in exponent */
328 #define M_MASK          0x1ff           
329 #define E_MASK          0x1f
330   u16   flot;
331   u32   tmp = cr & 0x00ffffff;
332   int   i   = 0;
333   if (cr == 0)
334      return 0;
335   while (tmp != 1) {
336      tmp >>= 1;
337      i++;
338   }
339   if (i == M_BITS)
340      flot = NZ | (i << M_BITS) | (cr & M_MASK);
341   else if (i < M_BITS)
342      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
343   else
344      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
345   return flot;
346 }
347
348 #if 0
349 /*
350 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
351 */
352 static u32
353 float_to_cellrate(u16 rate)
354 {
355   u32   exp, mantissa, cps;
356   if ((rate & NZ) == 0)
357      return 0;
358   exp = (rate >> M_BITS) & E_MASK;
359   mantissa = rate & M_MASK;
360   if (exp == 0)
361      return 1;
362   cps = (1 << M_BITS) | mantissa;
363   if (exp == M_BITS)
364      cps = cps;
365   else if (exp > M_BITS)
366      cps <<= (exp - M_BITS);
367   else
368      cps >>= (M_BITS - exp);
369   return cps;
370 }
371 #endif 
372
373 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
374   srv_p->class_type = ATM_ABR;
375   srv_p->pcr        = dev->LineRate;
376   srv_p->mcr        = 0;
377   srv_p->icr        = 0x055cb7;
378   srv_p->tbe        = 0xffffff;
379   srv_p->frtt       = 0x3a;
380   srv_p->rif        = 0xf;
381   srv_p->rdf        = 0xb;
382   srv_p->nrm        = 0x4;
383   srv_p->trm        = 0x7;
384   srv_p->cdf        = 0x3;
385   srv_p->adtf       = 50;
386 }
387
388 static int
389 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
390                                                 struct atm_vcc *vcc, u8 flag)
391 {
392   f_vc_abr_entry  *f_abr_vc;
393   r_vc_abr_entry  *r_abr_vc;
394   u32           icr;
395   u8            trm, nrm, crm;
396   u16           adtf, air, *ptr16;      
397   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
398   f_abr_vc += vcc->vci;       
399   switch (flag) {
400      case 1: /* FFRED initialization */
401 #if 0  /* sanity check */
402        if (srv_p->pcr == 0)
403           return INVALID_PCR;
404        if (srv_p->pcr > dev->LineRate)
405           srv_p->pcr = dev->LineRate;
406        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
407           return MCR_UNAVAILABLE;
408        if (srv_p->mcr > srv_p->pcr)
409           return INVALID_MCR;
410        if (!(srv_p->icr))
411           srv_p->icr = srv_p->pcr;
412        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
413           return INVALID_ICR;
414        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
415           return INVALID_TBE;
416        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
417           return INVALID_FRTT;
418        if (srv_p->nrm > MAX_NRM)
419           return INVALID_NRM;
420        if (srv_p->trm > MAX_TRM)
421           return INVALID_TRM;
422        if (srv_p->adtf > MAX_ADTF)
423           return INVALID_ADTF;
424        else if (srv_p->adtf == 0)
425           srv_p->adtf = 1;
426        if (srv_p->cdf > MAX_CDF)
427           return INVALID_CDF;
428        if (srv_p->rif > MAX_RIF)
429           return INVALID_RIF;
430        if (srv_p->rdf > MAX_RDF)
431           return INVALID_RDF;
432 #endif
433        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
434        f_abr_vc->f_vc_type = ABR;
435        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
436                                   /* i.e 2**n = 2 << (n-1) */
437        f_abr_vc->f_nrm = nrm << 8 | nrm;
438        trm = 100000/(2 << (16 - srv_p->trm));
439        if ( trm == 0) trm = 1;
440        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
441        crm = srv_p->tbe / nrm;
442        if (crm == 0) crm = 1;
443        f_abr_vc->f_crm = crm & 0xff;
444        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
445        icr = MIN( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
446                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
447                                 (1000000/(srv_p->frtt/srv_p->tbe)));
448        f_abr_vc->f_icr = cellrate_to_float(icr);
449        adtf = (10000 * srv_p->adtf)/8192;
450        if (adtf == 0) adtf = 1; 
451        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
452        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
453        f_abr_vc->f_acr = f_abr_vc->f_icr;
454        f_abr_vc->f_status = 0x0042;
455        break;
456     case 0: /* RFRED initialization */  
457        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
458        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
459        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
460        r_abr_vc += vcc->vci;
461        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
462        air = srv_p->pcr << (15 - srv_p->rif);
463        if (air == 0) air = 1;
464        r_abr_vc->r_air = cellrate_to_float(air);
465        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
466        dev->sum_mcr        += srv_p->mcr;
467        dev->n_abr++;
468        break;
469     default:
470        break;
471   }
472   return        0;
473 }
474 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
475    u32 rateLow=0, rateHigh, rate;
476    int entries;
477    struct ia_vcc *ia_vcc;
478
479    int   idealSlot =0, testSlot, toBeAssigned, inc;
480    u32   spacing;
481    u16  *SchedTbl, *TstSchedTbl;
482    u16  cbrVC, vcIndex;
483    u32   fracSlot    = 0;
484    u32   sp_mod      = 0;
485    u32   sp_mod2     = 0;
486
487    /* IpAdjustTrafficParams */
488    if (vcc->qos.txtp.max_pcr <= 0) {
489       IF_ERR(printk("PCR for CBR not defined\n");)
490       return -1;
491    }
492    rate = vcc->qos.txtp.max_pcr;
493    entries = rate / dev->Granularity;
494    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
495                                 entries, rate, dev->Granularity);)
496    if (entries < 1)
497       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
498    rateLow  =  entries * dev->Granularity;
499    rateHigh = (entries + 1) * dev->Granularity;
500    if (3*(rate - rateLow) > (rateHigh - rate))
501       entries++;
502    if (entries > dev->CbrRemEntries) {
503       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
504       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
505                                        entries, dev->CbrRemEntries);)
506       return -EBUSY;
507    }   
508
509    ia_vcc = INPH_IA_VCC(vcc);
510    ia_vcc->NumCbrEntry = entries; 
511    dev->sum_mcr += entries * dev->Granularity; 
512    /* IaFFrednInsertCbrSched */
513    // Starting at an arbitrary location, place the entries into the table
514    // as smoothly as possible
515    cbrVC   = 0;
516    spacing = dev->CbrTotEntries / entries;
517    sp_mod  = dev->CbrTotEntries % entries; // get modulo
518    toBeAssigned = entries;
519    fracSlot = 0;
520    vcIndex  = vcc->vci;
521    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
522    while (toBeAssigned)
523    {
524       // If this is the first time, start the table loading for this connection
525       // as close to entryPoint as possible.
526       if (toBeAssigned == entries)
527       {
528          idealSlot = dev->CbrEntryPt;
529          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
530          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
531             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
532       } else {
533          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
534          // in the table that would be  smoothest
535          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
536          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
537       }
538       if (idealSlot >= (int)dev->CbrTotEntries) 
539          idealSlot -= dev->CbrTotEntries;  
540       // Continuously check around this ideal value until a null
541       // location is encountered.
542       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
543       inc = 0;
544       testSlot = idealSlot;
545       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
546       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
547                                 testSlot, (u32)TstSchedTbl,toBeAssigned);) 
548       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
549       while (cbrVC)  // If another VC at this location, we have to keep looking
550       {
551           inc++;
552           testSlot = idealSlot - inc;
553           if (testSlot < 0) { // Wrap if necessary
554              testSlot += dev->CbrTotEntries;
555              IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
556                                                        (u32)SchedTbl,testSlot);)
557           }
558           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
559           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
560           if (!cbrVC)
561              break;
562           testSlot = idealSlot + inc;
563           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
564              testSlot -= dev->CbrTotEntries;
565              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
566              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
567                                             testSlot, toBeAssigned);)
568           } 
569           // set table index and read in value
570           TstSchedTbl = (u16*)(SchedTbl + testSlot);
571           IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
572                           (u32)TstSchedTbl,cbrVC,inc);) 
573           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
574        } /* while */
575        // Move this VCI number into this location of the CBR Sched table.
576        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
577        dev->CbrRemEntries--;
578        toBeAssigned--;
579    } /* while */ 
580
581    /* IaFFrednCbrEnable */
582    dev->NumEnabledCBR++;
583    if (dev->NumEnabledCBR == 1) {
584        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
585        IF_CBR(printk("CBR is enabled\n");)
586    }
587    return 0;
588 }
589 static void ia_cbrVc_close (struct atm_vcc *vcc) {
590    IADEV *iadev;
591    u16 *SchedTbl, NullVci = 0;
592    u32 i, NumFound;
593
594    iadev = INPH_IA_DEV(vcc->dev);
595    iadev->NumEnabledCBR--;
596    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
597    if (iadev->NumEnabledCBR == 0) {
598       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
599       IF_CBR (printk("CBR support disabled\n");)
600    }
601    NumFound = 0;
602    for (i=0; i < iadev->CbrTotEntries; i++)
603    {
604       if (*SchedTbl == vcc->vci) {
605          iadev->CbrRemEntries++;
606          *SchedTbl = NullVci;
607          IF_CBR(NumFound++;)
608       }
609       SchedTbl++;   
610    } 
611    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
612 }
613
614 static int ia_avail_descs(IADEV *iadev) {
615    int tmp = 0;
616    ia_hack_tcq(iadev);
617    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
618       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
619    else
620       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
621                    iadev->ffL.tcq_st) / 2;
622    return tmp;
623 }    
624
625 static int ia_que_tx (IADEV *iadev) { 
626    struct sk_buff *skb;
627    int num_desc;
628    struct atm_vcc *vcc;
629    struct ia_vcc *iavcc;
630    static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
631    num_desc = ia_avail_descs(iadev);
632    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
633       if (!(vcc = ATM_SKB(skb)->vcc)) {
634          dev_kfree_skb_any(skb);
635          printk("ia_que_tx: Null vcc\n");
636          break;
637       }
638       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
639          dev_kfree_skb_any(skb);
640          printk("Free the SKB on closed vci %d \n", vcc->vci);
641          break;
642       }
643       iavcc = INPH_IA_VCC(vcc);
644       if (ia_pkt_tx (vcc, skb)) {
645          skb_queue_head(&iadev->tx_backlog, skb);
646       }
647       num_desc--;
648    }
649    return 0;
650 }
651 void ia_tx_poll (IADEV *iadev) {
652    struct atm_vcc *vcc = NULL;
653    struct sk_buff *skb = NULL, *skb1 = NULL;
654    struct ia_vcc *iavcc;
655    IARTN_Q *  rtne;
656
657    ia_hack_tcq(iadev);
658    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
659        skb = rtne->data.txskb;
660        if (!skb) {
661            printk("ia_tx_poll: skb is null\n");
662            goto out;
663        }
664        vcc = ATM_SKB(skb)->vcc;
665        if (!vcc) {
666            printk("ia_tx_poll: vcc is null\n");
667            dev_kfree_skb_any(skb);
668            goto out;
669        }
670
671        iavcc = INPH_IA_VCC(vcc);
672        if (!iavcc) {
673            printk("ia_tx_poll: iavcc is null\n");
674            dev_kfree_skb_any(skb);
675            goto out;
676        }
677
678        skb1 = skb_dequeue(&iavcc->txing_skb);
679        while (skb1 && (skb1 != skb)) {
680           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
681              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
682           }
683           IF_ERR(printk("Release the SKB not match\n");)
684           if ((vcc->pop) && (skb1->len != 0))
685           {
686              vcc->pop(vcc, skb1);
687              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
688                                                           (long)skb1);)
689           }
690           else 
691              dev_kfree_skb_any(skb1);
692           skb1 = skb_dequeue(&iavcc->txing_skb);
693        }                                                        
694        if (!skb1) {
695           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
696           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
697           break;
698        }
699        if ((vcc->pop) && (skb->len != 0))
700        {
701           vcc->pop(vcc, skb);
702           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
703        }
704        else 
705           dev_kfree_skb_any(skb);
706        kfree(rtne);
707     }
708     ia_que_tx(iadev);
709 out:
710     return;
711 }
712 #if 0
713 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
714 {
715         u32     t;
716         int     i;
717         /*
718          * Issue a command to enable writes to the NOVRAM
719          */
720         NVRAM_CMD (EXTEND + EWEN);
721         NVRAM_CLR_CE;
722         /*
723          * issue the write command
724          */
725         NVRAM_CMD(IAWRITE + addr);
726         /* 
727          * Send the data, starting with D15, then D14, and so on for 16 bits
728          */
729         for (i=15; i>=0; i--) {
730                 NVRAM_CLKOUT (val & 0x8000);
731                 val <<= 1;
732         }
733         NVRAM_CLR_CE;
734         CFG_OR(NVCE);
735         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
736         while (!(t & NVDO))
737                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
738
739         NVRAM_CLR_CE;
740         /*
741          * disable writes again
742          */
743         NVRAM_CMD(EXTEND + EWDS)
744         NVRAM_CLR_CE;
745         CFG_AND(~NVDI);
746 }
747 #endif
748
749 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
750 {
751         u_short val;
752         u32     t;
753         int     i;
754         /*
755          * Read the first bit that was clocked with the falling edge of the
756          * the last command data clock
757          */
758         NVRAM_CMD(IAREAD + addr);
759         /*
760          * Now read the rest of the bits, the next bit read is D14, then D13,
761          * and so on.
762          */
763         val = 0;
764         for (i=15; i>=0; i--) {
765                 NVRAM_CLKIN(t);
766                 val |= (t << i);
767         }
768         NVRAM_CLR_CE;
769         CFG_AND(~NVDI);
770         return val;
771 }
772
773 static void ia_hw_type(IADEV *iadev) {
774    u_short memType = ia_eeprom_get(iadev, 25);   
775    iadev->memType = memType;
776    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
777       iadev->num_tx_desc = IA_TX_BUF;
778       iadev->tx_buf_sz = IA_TX_BUF_SZ;
779       iadev->num_rx_desc = IA_RX_BUF;
780       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
781    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
782       if (IA_TX_BUF == DFL_TX_BUFFERS)
783         iadev->num_tx_desc = IA_TX_BUF / 2;
784       else 
785         iadev->num_tx_desc = IA_TX_BUF;
786       iadev->tx_buf_sz = IA_TX_BUF_SZ;
787       if (IA_RX_BUF == DFL_RX_BUFFERS)
788         iadev->num_rx_desc = IA_RX_BUF / 2;
789       else
790         iadev->num_rx_desc = IA_RX_BUF;
791       iadev->rx_buf_sz = IA_RX_BUF_SZ;
792    }
793    else {
794       if (IA_TX_BUF == DFL_TX_BUFFERS) 
795         iadev->num_tx_desc = IA_TX_BUF / 8;
796       else
797         iadev->num_tx_desc = IA_TX_BUF;
798       iadev->tx_buf_sz = IA_TX_BUF_SZ;
799       if (IA_RX_BUF == DFL_RX_BUFFERS)
800         iadev->num_rx_desc = IA_RX_BUF / 8;
801       else
802         iadev->num_rx_desc = IA_RX_BUF;
803       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
804    } 
805    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
806    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
807          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
808          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
809
810 #if 0
811    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
812       iadev->phy_type = PHY_OC3C_S;
813    else if ((memType & FE_MASK) == FE_UTP_OPTION)
814       iadev->phy_type = PHY_UTP155;
815    else
816      iadev->phy_type = PHY_OC3C_M;
817 #endif
818    
819    iadev->phy_type = memType & FE_MASK;
820    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
821                                          memType,iadev->phy_type);)
822    if (iadev->phy_type == FE_25MBIT_PHY) 
823       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
824    else if (iadev->phy_type == FE_DS3_PHY)
825       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
826    else if (iadev->phy_type == FE_E3_PHY) 
827       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
828    else
829        iadev->LineRate = (u32)(ATM_OC3_PCR);
830    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
831
832 }
833
834 static void IaFrontEndIntr(IADEV *iadev) {
835   volatile IA_SUNI *suni;
836   volatile ia_mb25_t *mb25;
837   volatile suni_pm7345_t *suni_pm7345;
838   u32 intr_status;
839   u_int frmr_intr;
840
841   if(iadev->phy_type & FE_25MBIT_PHY) {
842      mb25 = (ia_mb25_t*)iadev->phy;
843      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
844   } else if (iadev->phy_type & FE_DS3_PHY) {
845      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
846      /* clear FRMR interrupts */
847      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
848      iadev->carrier_detect =  
849            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
850   } else if (iadev->phy_type & FE_E3_PHY ) {
851      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
852      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
853      iadev->carrier_detect =
854            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
855   }
856   else { 
857      suni = (IA_SUNI *)iadev->phy;
858      intr_status = suni->suni_rsop_status & 0xff;
859      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
860   }
861   if (iadev->carrier_detect)
862     printk("IA: SUNI carrier detected\n");
863   else
864     printk("IA: SUNI carrier lost signal\n"); 
865   return;
866 }
867
868 void ia_mb25_init (IADEV *iadev)
869 {
870    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
871 #if 0
872    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
873 #endif
874    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
875    mb25->mb25_diag_control = 0;
876    /*
877     * Initialize carrier detect state
878     */
879    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
880    return;
881 }                   
882
883 void ia_suni_pm7345_init (IADEV *iadev)
884 {
885    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
886    if (iadev->phy_type & FE_DS3_PHY)
887    {
888       iadev->carrier_detect = 
889           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
890       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
891       suni_pm7345->suni_ds3_frm_cfg = 1;
892       suni_pm7345->suni_ds3_tran_cfg = 1;
893       suni_pm7345->suni_config = 0;
894       suni_pm7345->suni_splr_cfg = 0;
895       suni_pm7345->suni_splt_cfg = 0;
896    }
897    else 
898    {
899       iadev->carrier_detect = 
900           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
901       suni_pm7345->suni_e3_frm_fram_options = 0x4;
902       suni_pm7345->suni_e3_frm_maint_options = 0x20;
903       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
904       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
905       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
906       suni_pm7345->suni_e3_tran_fram_options = 0x1;
907       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
908       suni_pm7345->suni_splr_cfg = 0x41;
909       suni_pm7345->suni_splt_cfg = 0x41;
910    } 
911    /*
912     * Enable RSOP loss of signal interrupt.
913     */
914    suni_pm7345->suni_intr_enbl = 0x28;
915  
916    /*
917     * Clear error counters
918     */
919    suni_pm7345->suni_id_reset = 0;
920
921    /*
922     * Clear "PMCTST" in master test register.
923     */
924    suni_pm7345->suni_master_test = 0;
925
926    suni_pm7345->suni_rxcp_ctrl = 0x2c;
927    suni_pm7345->suni_rxcp_fctrl = 0x81;
928  
929    suni_pm7345->suni_rxcp_idle_pat_h1 =
930         suni_pm7345->suni_rxcp_idle_pat_h2 =
931         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
932    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
933  
934    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
935    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
936    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
937    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
938  
939    suni_pm7345->suni_rxcp_cell_pat_h1 =
940         suni_pm7345->suni_rxcp_cell_pat_h2 =
941         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
942    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
943  
944    suni_pm7345->suni_rxcp_cell_mask_h1 =
945         suni_pm7345->suni_rxcp_cell_mask_h2 =
946         suni_pm7345->suni_rxcp_cell_mask_h3 =
947         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
948  
949    suni_pm7345->suni_txcp_ctrl = 0xa4;
950    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
951    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
952  
953    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
954                                  SUNI_PM7345_CLB |
955                                  SUNI_PM7345_DLB |
956                                   SUNI_PM7345_PLB);
957 #ifdef __SNMP__
958    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
959 #endif /* __SNMP__ */
960    return;
961 }
962
963
964 /***************************** IA_LIB END *****************************/
965     
966 /* pwang_test debug utility */
967 int tcnter = 0, rcnter = 0;
968 void xdump( u_char*  cp, int  length, char*  prefix )
969 {
970     int col, count;
971     u_char prntBuf[120];
972     u_char*  pBuf = prntBuf;
973     count = 0;
974     while(count < length){
975         pBuf += sprintf( pBuf, "%s", prefix );
976         for(col = 0;count + col < length && col < 16; col++){
977             if (col != 0 && (col % 4) == 0)
978                 pBuf += sprintf( pBuf, " " );
979             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
980         }
981         while(col++ < 16){      /* pad end of buffer with blanks */
982             if ((col % 4) == 0)
983                 sprintf( pBuf, " " );
984             pBuf += sprintf( pBuf, "   " );
985         }
986         pBuf += sprintf( pBuf, "  " );
987         for(col = 0;count + col < length && col < 16; col++){
988             if (isprint((int)cp[count + col]))
989                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
990             else
991                 pBuf += sprintf( pBuf, "." );
992                 }
993         sprintf( pBuf, "\n" );
994         // SPrint(prntBuf);
995         printk(prntBuf);
996         count += col;
997         pBuf = prntBuf;
998     }
999
1000 }  /* close xdump(... */
1001
1002   
1003 static struct atm_dev *ia_boards = NULL;  
1004   
1005 #define ACTUAL_RAM_BASE \
1006         RAM_BASE*((iadev->mem)/(128 * 1024))  
1007 #define ACTUAL_SEG_RAM_BASE \
1008         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1009 #define ACTUAL_REASS_RAM_BASE \
1010         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1011   
1012   
1013 /*-- some utilities and memory allocation stuff will come here -------------*/  
1014   
1015 void desc_dbg(IADEV *iadev) {
1016
1017   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1018   u32 tmp, i;
1019   // regval = readl((u32)ia_cmds->maddr);
1020   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1021   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1022                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1023                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1024   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1025                    iadev->ffL.tcq_rd);
1026   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1027   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1028   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1029   i = 0;
1030   while (tcq_st_ptr != tcq_ed_ptr) {
1031       tmp = iadev->seg_ram+tcq_st_ptr;
1032       printk("TCQ slot %d desc = %d  Addr = 0x%x\n", i++, readw(tmp), tmp);
1033       tcq_st_ptr += 2;
1034   }
1035   for(i=0; i <iadev->num_tx_desc; i++)
1036       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1037
1038   
1039   
1040 /*----------------------------- Recieving side stuff --------------------------*/  
1041  
1042 static void rx_excp_rcvd(struct atm_dev *dev)  
1043 {  
1044 #if 0 /* closing the receiving size will cause too many excp int */  
1045   IADEV *iadev;  
1046   u_short state;  
1047   u_short excpq_rd_ptr;  
1048   //u_short *ptr;  
1049   int vci, error = 1;  
1050   iadev = INPH_IA_DEV(dev);  
1051   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1052   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1053   { printk("state = %x \n", state); 
1054         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1055  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1056         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1057             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1058         // TODO: update exception stat
1059         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1060         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1061         // pwang_test
1062         excpq_rd_ptr += 4;  
1063         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1064             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1065         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1066         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1067   }  
1068 #endif
1069 }  
1070   
1071 static void free_desc(struct atm_dev *dev, int desc)  
1072 {  
1073         IADEV *iadev;  
1074         iadev = INPH_IA_DEV(dev);  
1075         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1076         iadev->rfL.fdq_wr +=2;
1077         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1078                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1079         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1080 }  
1081   
1082   
1083 static int rx_pkt(struct atm_dev *dev)  
1084 {  
1085         IADEV *iadev;  
1086         struct atm_vcc *vcc;  
1087         unsigned short status;  
1088         struct rx_buf_desc *buf_desc_ptr;  
1089         int desc;   
1090         struct dle* wr_ptr;  
1091         int len;  
1092         struct sk_buff *skb;  
1093         u_int buf_addr, dma_addr;  
1094         iadev = INPH_IA_DEV(dev);  
1095         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1096         {  
1097             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1098             return -EINVAL;  
1099         }  
1100         /* mask 1st 3 bits to get the actual descno. */  
1101         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1102         IF_RX(printk("reass_ram = 0x%x iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1103                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1104               printk(" pcq_wr_ptr = 0x%x\n",
1105                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1106         /* update the read pointer  - maybe we shud do this in the end*/  
1107         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1108                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1109         else  
1110                 iadev->rfL.pcq_rd += 2;
1111         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1112   
1113         /* get the buffer desc entry.  
1114                 update stuff. - doesn't seem to be any update necessary  
1115         */  
1116         buf_desc_ptr = (struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1117         /* make the ptr point to the corresponding buffer desc entry */  
1118         buf_desc_ptr += desc;     
1119         if (!desc || (desc > iadev->num_rx_desc) || 
1120                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1121             free_desc(dev, desc);
1122             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1123             return -1;
1124         }
1125         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1126         if (!vcc)  
1127         {      
1128                 free_desc(dev, desc); 
1129                 printk("IA: null vcc, drop PDU\n");  
1130                 return -1;  
1131         }  
1132           
1133   
1134         /* might want to check the status bits for errors */  
1135         status = (u_short) (buf_desc_ptr->desc_mode);  
1136         if (status & (RX_CER | RX_PTE | RX_OFL))  
1137         {  
1138                 atomic_inc(&vcc->stats->rx_err);
1139                 IF_ERR(printk("IA: bad packet, dropping it");)  
1140                 if (status & RX_CER) { 
1141                     IF_ERR(printk(" cause: packet CRC error\n");)
1142                 }
1143                 else if (status & RX_PTE) {
1144                     IF_ERR(printk(" cause: packet time out\n");)
1145                 }
1146                 else {
1147                     IF_ERR(printk(" cause: buffer over flow\n");)
1148                 }
1149                 goto out_free_desc;
1150         }  
1151   
1152         /*  
1153                 build DLE.        
1154         */  
1155   
1156         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1157         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1158         len = dma_addr - buf_addr;  
1159         if (len > iadev->rx_buf_sz) {
1160            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1161            atomic_inc(&vcc->stats->rx_err);
1162            goto out_free_desc;
1163         }
1164                   
1165 #if LINUX_VERSION_CODE >= 0x20312
1166         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1167 #else
1168         if (atm_charge(vcc, atm_pdu2truesize(len))) {
1169            /* lets allocate an skb for now */  
1170            skb = alloc_skb(len, GFP_ATOMIC);  
1171            if (!skb)  
1172            {  
1173               IF_ERR(printk("can't allocate memory for recv, drop pkt!\n");)  
1174               atomic_inc(&vcc->stats->rx_drop);
1175               atm_return(vcc, atm_pdu2truesize(len));
1176               goto out_free_desc;
1177            }  
1178         }
1179         else {
1180            IF_EVENT(printk("IA: Rx over the rx_quota %ld\n", vcc->rx_quota);)
1181 #endif
1182            if (vcc->vci < 32)
1183               printk("Drop control packets\n");
1184               goto out_free_desc;
1185         }
1186         skb_put(skb,len);  
1187         // pwang_test
1188         ATM_SKB(skb)->vcc = vcc;
1189         ATM_SKB(skb)->iovcnt = 0;
1190         ATM_DESC(skb) = desc;        
1191         skb_queue_tail(&iadev->rx_dma_q, skb);  
1192
1193         /* Build the DLE structure */  
1194         wr_ptr = iadev->rx_dle_q.write;  
1195         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1196                 len, PCI_DMA_FROMDEVICE);
1197         wr_ptr->local_pkt_addr = buf_addr;  
1198         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1199         wr_ptr->mode = DMA_INT_ENABLE;  
1200   
1201         /* shud take care of wrap around here too. */  
1202         if(++wr_ptr == iadev->rx_dle_q.end)
1203              wr_ptr = iadev->rx_dle_q.start;
1204         iadev->rx_dle_q.write = wr_ptr;  
1205         udelay(1);  
1206         /* Increment transaction counter */  
1207         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1208 out:    return 0;  
1209 out_free_desc:
1210         free_desc(dev, desc);
1211         goto out;
1212 }  
1213   
1214 static void rx_intr(struct atm_dev *dev)  
1215 {  
1216   IADEV *iadev;  
1217   u_short status;  
1218   u_short state, i;  
1219   
1220   iadev = INPH_IA_DEV(dev);  
1221   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1222   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1223   if (status & RX_PKT_RCVD)  
1224   {  
1225         /* do something */  
1226         /* Basically recvd an interrupt for receving a packet.  
1227         A descriptor would have been written to the packet complete   
1228         queue. Get all the descriptors and set up dma to move the   
1229         packets till the packet complete queue is empty..  
1230         */  
1231         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1232         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1233         while(!(state & PCQ_EMPTY))  
1234         {  
1235              rx_pkt(dev);  
1236              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1237         }  
1238         iadev->rxing = 1;
1239   }  
1240   if (status & RX_FREEQ_EMPT)  
1241   {   
1242      if (iadev->rxing) {
1243         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1244         iadev->rx_tmp_jif = jiffies; 
1245         iadev->rxing = 0;
1246      } 
1247      else if (((jiffies - iadev->rx_tmp_jif) > 50) && 
1248                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1249         for (i = 1; i <= iadev->num_rx_desc; i++)
1250                free_desc(dev, i);
1251 printk("Test logic RUN!!!!\n");
1252         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1253         iadev->rxing = 1;
1254      }
1255      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1256   }  
1257
1258   if (status & RX_EXCP_RCVD)  
1259   {  
1260         /* probably need to handle the exception queue also. */  
1261         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1262         rx_excp_rcvd(dev);  
1263   }  
1264
1265
1266   if (status & RX_RAW_RCVD)  
1267   {  
1268         /* need to handle the raw incoming cells. This deepnds on   
1269         whether we have programmed to receive the raw cells or not.  
1270         Else ignore. */  
1271         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1272   }  
1273 }  
1274   
1275   
1276 static void rx_dle_intr(struct atm_dev *dev)  
1277 {  
1278   IADEV *iadev;  
1279   struct atm_vcc *vcc;   
1280   struct sk_buff *skb;  
1281   int desc;  
1282   u_short state;   
1283   struct dle *dle, *cur_dle;  
1284   u_int dle_lp;  
1285   int len;
1286   iadev = INPH_IA_DEV(dev);  
1287  
1288   /* free all the dles done, that is just update our own dle read pointer   
1289         - do we really need to do this. Think not. */  
1290   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1291         and push them up to the higher layer protocol. Also free the desc  
1292         associated with the buffer. */  
1293   dle = iadev->rx_dle_q.read;  
1294   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1295   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1296   while(dle != cur_dle)  
1297   {  
1298       /* free the DMAed skb */  
1299       skb = skb_dequeue(&iadev->rx_dma_q);  
1300       if (!skb)  
1301          goto INCR_DLE;
1302       desc = ATM_DESC(skb);
1303       free_desc(dev, desc);  
1304                
1305       if (!(len = skb->len))
1306       {  
1307           printk("rx_dle_intr: skb len 0\n");  
1308           dev_kfree_skb_any(skb);  
1309       }  
1310       else  
1311       {  
1312           struct cpcs_trailer *trailer;
1313           u_short length;
1314           struct ia_vcc *ia_vcc;
1315
1316           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1317                 len, PCI_DMA_FROMDEVICE);
1318           /* no VCC related housekeeping done as yet. lets see */  
1319           vcc = ATM_SKB(skb)->vcc;
1320           if (!vcc) {
1321               printk("IA: null vcc\n");  
1322               dev_kfree_skb_any(skb);
1323               goto INCR_DLE;
1324           }
1325           ia_vcc = INPH_IA_VCC(vcc);
1326           if (ia_vcc == NULL)
1327           {
1328              atomic_inc(&vcc->stats->rx_err);
1329              dev_kfree_skb_any(skb);
1330 #if LINUX_VERSION_CODE >= 0x20312
1331              atm_return(vcc, atm_guess_pdu2truesize(len));
1332 #else
1333              atm_return(vcc, atm_pdu2truesize(len));
1334 #endif
1335              goto INCR_DLE;
1336            }
1337           // get real pkt length  pwang_test
1338           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1339                                  skb->len - sizeof(*trailer));
1340           length =  swap(trailer->length);
1341           if ((length > iadev->rx_buf_sz) || (length > 
1342                               (skb->len - sizeof(struct cpcs_trailer))))
1343           {
1344              atomic_inc(&vcc->stats->rx_err);
1345              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1346                                                             length, skb->len);)
1347              dev_kfree_skb_any(skb);
1348 #if LINUX_VERSION_CODE >= 0x20312
1349              atm_return(vcc, atm_guess_pdu2truesize(len));
1350 #else
1351              atm_return(vcc, atm_pdu2truesize(len));
1352 #endif 
1353              goto INCR_DLE;
1354           }
1355           skb_trim(skb, length);
1356           
1357           /* Display the packet */  
1358           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1359           xdump(skb->data, skb->len, "RX: ");
1360           printk("\n");)
1361
1362           IF_RX(printk("rx_dle_intr: skb push");)  
1363           vcc->push(vcc,skb);  
1364           atomic_inc(&vcc->stats->rx);
1365           iadev->rx_pkt_cnt++;
1366       }  
1367 INCR_DLE:
1368       if (++dle == iadev->rx_dle_q.end)  
1369           dle = iadev->rx_dle_q.start;  
1370   }  
1371   iadev->rx_dle_q.read = dle;  
1372   
1373   /* if the interrupts are masked because there were no free desc available,  
1374                 unmask them now. */ 
1375   if (!iadev->rxing) {
1376      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1377      if (!(state & FREEQ_EMPTY)) {
1378         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1379         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1380                                       iadev->reass_reg+REASS_MASK_REG);
1381         iadev->rxing++; 
1382      }
1383   }
1384 }  
1385   
1386   
1387 static int open_rx(struct atm_vcc *vcc)  
1388 {  
1389         IADEV *iadev;  
1390         u_short *vc_table;  
1391         u_short *reass_ptr;  
1392         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1393
1394         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1395         iadev = INPH_IA_DEV(vcc->dev);  
1396         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1397            if (iadev->phy_type & FE_25MBIT_PHY) {
1398                printk("IA:  ABR not support\n");
1399                return -EINVAL; 
1400            }
1401         }
1402         /* Make only this VCI in the vc table valid and let all   
1403                 others be invalid entries */  
1404         vc_table = (u_short *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1405         vc_table += vcc->vci;  
1406         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1407
1408         *vc_table = vcc->vci << 6;
1409         /* Also keep a list of open rx vcs so that we can attach them with  
1410                 incoming PDUs later. */  
1411         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1412                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1413         {  
1414                 srv_cls_param_t srv_p;
1415                 init_abr_vc(iadev, &srv_p);
1416                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1417         } 
1418         else {  /* for UBR  later may need to add CBR logic */
1419                 reass_ptr = (u_short *)
1420                            (iadev->reass_ram+REASS_TABLE*iadev->memSize);
1421                 reass_ptr += vcc->vci;  
1422                 *reass_ptr = NO_AAL5_PKT;
1423         }
1424         
1425         if (iadev->rx_open[vcc->vci])  
1426                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1427                         vcc->dev->number, vcc->vci);  
1428         iadev->rx_open[vcc->vci] = vcc;  
1429         return 0;  
1430 }  
1431   
1432 static int rx_init(struct atm_dev *dev)  
1433 {  
1434         IADEV *iadev;  
1435         struct rx_buf_desc *buf_desc_ptr;  
1436         unsigned long rx_pkt_start = 0;  
1437         void *dle_addr;  
1438         struct abr_vc_table  *abr_vc_table; 
1439         u16 *vc_table;  
1440         u16 *reass_table;  
1441         u16 *ptr16;
1442         int i,j, vcsize_sel;  
1443         u_short freeq_st_adr;  
1444         u_short *freeq_start;  
1445   
1446         iadev = INPH_IA_DEV(dev);  
1447   //    spin_lock_init(&iadev->rx_lock); 
1448   
1449         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1450         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1451                                         &iadev->rx_dle_dma);  
1452         if (!dle_addr)  {  
1453                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1454                 goto err_out;
1455         }
1456         iadev->rx_dle_q.start = (struct dle*)dle_addr;  
1457         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1458         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1459         iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1460         /* the end of the dle q points to the entry after the last  
1461         DLE that can be used. */  
1462   
1463         /* write the upper 20 bits of the start address to rx list address register */  
1464         writel(iadev->rx_dle_dma & 0xfffff000,
1465                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1466         IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n", 
1467                       (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR), 
1468                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1469         printk("Rx Dle list addr: 0x%08x value: 0x%0x\n", 
1470                       (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR), 
1471                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1472   
1473         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1474         writew(0, iadev->reass_reg+MODE_REG);  
1475         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1476   
1477         /* Receive side control memory map  
1478            -------------------------------  
1479   
1480                 Buffer descr    0x0000 (736 - 23K)  
1481                 VP Table        0x5c00 (256 - 512)  
1482                 Except q        0x5e00 (128 - 512)  
1483                 Free buffer q   0x6000 (1K - 2K)  
1484                 Packet comp q   0x6800 (1K - 2K)  
1485                 Reass Table     0x7000 (1K - 2K)  
1486                 VC Table        0x7800 (1K - 2K)  
1487                 ABR VC Table    0x8000 (1K - 32K)  
1488         */  
1489           
1490         /* Base address for Buffer Descriptor Table */  
1491         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1492         /* Set the buffer size register */  
1493         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1494   
1495         /* Initialize each entry in the Buffer Descriptor Table */  
1496         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1497         buf_desc_ptr =(struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1498         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1499         buf_desc_ptr++;  
1500         rx_pkt_start = iadev->rx_pkt_ram;  
1501         for(i=1; i<=iadev->num_rx_desc; i++)  
1502         {  
1503                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1504                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1505                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1506                 buf_desc_ptr++;           
1507                 rx_pkt_start += iadev->rx_buf_sz;  
1508         }  
1509         IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)  
1510         i = FREE_BUF_DESC_Q*iadev->memSize; 
1511         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1512         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1513         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1514                                          iadev->reass_reg+FREEQ_ED_ADR);
1515         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1516         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1517                                         iadev->reass_reg+FREEQ_WR_PTR);    
1518         /* Fill the FREEQ with all the free descriptors. */  
1519         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1520         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1521         for(i=1; i<=iadev->num_rx_desc; i++)  
1522         {  
1523                 *freeq_start = (u_short)i;  
1524                 freeq_start++;  
1525         }  
1526         IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)  
1527         /* Packet Complete Queue */
1528         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1529         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1530         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1531         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1532         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1533
1534         /* Exception Queue */
1535         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1536         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1537         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1538                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1539         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1540         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1541  
1542         /* Load local copy of FREEQ and PCQ ptrs */
1543         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1544         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1545         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1546         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1547         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1548         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1549         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1550         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1551         
1552         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1553               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1554               iadev->rfL.pcq_wr);)                
1555         /* just for check - no VP TBL */  
1556         /* VP Table */  
1557         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1558         /* initialize VP Table for invalid VPIs  
1559                 - I guess we can write all 1s or 0x000f in the entire memory  
1560                   space or something similar.  
1561         */  
1562   
1563         /* This seems to work and looks right to me too !!! */  
1564         i =  REASS_TABLE * iadev->memSize;
1565         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1566         /* initialize Reassembly table to I don't know what ???? */  
1567         reass_table = (u16 *)(iadev->reass_ram+i);  
1568         j = REASS_TABLE_SZ * iadev->memSize;
1569         for(i=0; i < j; i++)  
1570                 *reass_table++ = NO_AAL5_PKT;  
1571        i = 8*1024;
1572        vcsize_sel =  0;
1573        while (i != iadev->num_vc) {
1574           i /= 2;
1575           vcsize_sel++;
1576        }
1577        i = RX_VC_TABLE * iadev->memSize;
1578        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1579        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1580         j = RX_VC_TABLE_SZ * iadev->memSize;
1581         for(i = 0; i < j; i++)  
1582         {  
1583                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1584                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1585                 is those low 3 bits.   
1586                 Shall program this later.  
1587                 */  
1588                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1589                 vc_table++;  
1590         }  
1591         /* ABR VC table */
1592         i =  ABR_VC_TABLE * iadev->memSize;
1593         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1594                    
1595         i = ABR_VC_TABLE * iadev->memSize;
1596         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1597         j = REASS_TABLE_SZ * iadev->memSize;
1598         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1599         for(i = 0; i < j; i++) {                
1600                 abr_vc_table->rdf = 0x0003;
1601                 abr_vc_table->air = 0x5eb1;
1602                 abr_vc_table++;         
1603         }  
1604
1605         /* Initialize other registers */  
1606   
1607         /* VP Filter Register set for VC Reassembly only */  
1608         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1609         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1610         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1611
1612         /* Packet Timeout Count  related Registers : 
1613            Set packet timeout to occur in about 3 seconds
1614            Set Packet Aging Interval count register to overflow in about 4 us
1615         */  
1616         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1617         ptr16 = (u16*)j;
1618         i = ((u32)ptr16 >> 6) & 0xff;
1619         ptr16  += j - 1;
1620         i |=(((u32)ptr16 << 2) & 0xff00);
1621         writew(i, iadev->reass_reg+TMOUT_RANGE);
1622         /* initiate the desc_tble */
1623         for(i=0; i<iadev->num_tx_desc;i++)
1624             iadev->desc_tbl[i].timestamp = 0;
1625
1626         /* to clear the interrupt status register - read it */  
1627         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1628   
1629         /* Mask Register - clear it */  
1630         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1631   
1632         skb_queue_head_init(&iadev->rx_dma_q);  
1633         iadev->rx_free_desc_qhead = NULL;   
1634         iadev->rx_open = kmalloc(4*iadev->num_vc,GFP_KERNEL);
1635         if (!iadev->rx_open)  
1636         {  
1637                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1638                 dev->number);  
1639                 goto err_free_dle;
1640         }  
1641         memset(iadev->rx_open, 0, 4*iadev->num_vc);  
1642         iadev->rxing = 1;
1643         iadev->rx_pkt_cnt = 0;
1644         /* Mode Register */  
1645         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1646         return 0;  
1647
1648 err_free_dle:
1649         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1650                             iadev->rx_dle_dma);  
1651 err_out:
1652         return -ENOMEM;
1653 }  
1654   
1655
1656 /*  
1657         The memory map suggested in appendix A and the coding for it.   
1658         Keeping it around just in case we change our mind later.  
1659   
1660                 Buffer descr    0x0000 (128 - 4K)  
1661                 UBR sched       0x1000 (1K - 4K)  
1662                 UBR Wait q      0x2000 (1K - 4K)  
1663                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1664                                         (128 - 256) each  
1665                 extended VC     0x4000 (1K - 8K)  
1666                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1667                 CBR sched       0x7000 (as needed)  
1668                 VC table        0x8000 (1K - 32K)  
1669 */  
1670   
1671 static void tx_intr(struct atm_dev *dev)  
1672 {  
1673         IADEV *iadev;  
1674         unsigned short status;  
1675         unsigned long flags;
1676
1677         iadev = INPH_IA_DEV(dev);  
1678   
1679         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1680         if (status & TRANSMIT_DONE){
1681
1682            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1683            spin_lock_irqsave(&iadev->tx_lock, flags);
1684            ia_tx_poll(iadev);
1685            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1686            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1687            if (iadev->close_pending)  
1688                wake_up(&iadev->close_wait);
1689         }         
1690         if (status & TCQ_NOT_EMPTY)  
1691         {  
1692             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1693         }  
1694 }  
1695   
1696 static void tx_dle_intr(struct atm_dev *dev)
1697 {
1698         IADEV *iadev;
1699         struct dle *dle, *cur_dle; 
1700         struct sk_buff *skb;
1701         struct atm_vcc *vcc;
1702         struct ia_vcc  *iavcc;
1703         u_int dle_lp;
1704         unsigned long flags;
1705
1706         iadev = INPH_IA_DEV(dev);
1707         spin_lock_irqsave(&iadev->tx_lock, flags);   
1708         dle = iadev->tx_dle_q.read;
1709         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1710                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1711         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1712         while (dle != cur_dle)
1713         {
1714             /* free the DMAed skb */ 
1715             skb = skb_dequeue(&iadev->tx_dma_q); 
1716             if (!skb) break;
1717
1718             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1719             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1720                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1721                                  PCI_DMA_TODEVICE);
1722             }
1723             vcc = ATM_SKB(skb)->vcc;
1724             if (!vcc) {
1725                   printk("tx_dle_intr: vcc is null\n");
1726                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1727                   dev_kfree_skb_any(skb);
1728
1729                   return;
1730             }
1731             iavcc = INPH_IA_VCC(vcc);
1732             if (!iavcc) {
1733                   printk("tx_dle_intr: iavcc is null\n");
1734                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1735                   dev_kfree_skb_any(skb);
1736                   return;
1737             }
1738             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1739                if ((vcc->pop) && (skb->len != 0))
1740                {     
1741                  vcc->pop(vcc, skb);
1742                } 
1743                else {
1744                  dev_kfree_skb_any(skb);
1745                }
1746             }
1747             else { /* Hold the rate-limited skb for flow control */
1748                IA_SKB_STATE(skb) |= IA_DLED;
1749                skb_queue_tail(&iavcc->txing_skb, skb);
1750             }
1751             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1752             if (++dle == iadev->tx_dle_q.end)
1753                  dle = iadev->tx_dle_q.start;
1754         }
1755         iadev->tx_dle_q.read = dle;
1756         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1757 }
1758   
1759 static int open_tx(struct atm_vcc *vcc)  
1760 {  
1761         struct ia_vcc *ia_vcc;  
1762         IADEV *iadev;  
1763         struct main_vc *vc;  
1764         struct ext_vc *evc;  
1765         int ret;
1766         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1767         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1768         iadev = INPH_IA_DEV(vcc->dev);  
1769         
1770         if (iadev->phy_type & FE_25MBIT_PHY) {
1771            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1772                printk("IA:  ABR not support\n");
1773                return -EINVAL; 
1774            }
1775           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1776                printk("IA:  CBR not support\n");
1777                return -EINVAL; 
1778           }
1779         }
1780         ia_vcc =  INPH_IA_VCC(vcc);
1781         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1782         if (vcc->qos.txtp.max_sdu > 
1783                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1784            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1785                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1786            INPH_IA_VCC(vcc) = NULL;  
1787            kfree(ia_vcc);
1788            return -EINVAL; 
1789         }
1790         ia_vcc->vc_desc_cnt = 0;
1791         ia_vcc->txing = 1;
1792
1793         /* find pcr */
1794         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1795            vcc->qos.txtp.pcr = iadev->LineRate;
1796         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1797            vcc->qos.txtp.pcr = iadev->LineRate;
1798         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1799            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1800         if (vcc->qos.txtp.pcr > iadev->LineRate)
1801              vcc->qos.txtp.pcr = iadev->LineRate;
1802         ia_vcc->pcr = vcc->qos.txtp.pcr;
1803
1804         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1805         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1806         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1807         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1808         if (ia_vcc->pcr < iadev->rate_limit)
1809            skb_queue_head_init (&ia_vcc->txing_skb);
1810         if (ia_vcc->pcr < iadev->rate_limit) {
1811            if (vcc->qos.txtp.max_sdu != 0) {
1812                if (ia_vcc->pcr > 60000)
1813                   vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 5;
1814                else if (ia_vcc->pcr > 2000)
1815                   vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 4;
1816                else
1817                  vcc->sk->sndbuf = 3*vcc->qos.txtp.max_sdu;
1818            }
1819            else
1820              vcc->sk->sndbuf = 24576;
1821         }
1822            
1823         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1824         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1825         vc += vcc->vci;  
1826         evc += vcc->vci;  
1827         memset((caddr_t)vc, 0, sizeof(*vc));  
1828         memset((caddr_t)evc, 0, sizeof(*evc));  
1829           
1830         /* store the most significant 4 bits of vci as the last 4 bits   
1831                 of first part of atm header.  
1832            store the last 12 bits of vci as first 12 bits of the second  
1833                 part of the atm header.  
1834         */  
1835         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1836         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1837  
1838         /* check the following for different traffic classes */  
1839         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1840         {  
1841                 vc->type = UBR;  
1842                 vc->status = CRC_APPEND;
1843                 vc->acr = cellrate_to_float(iadev->LineRate);  
1844                 if (vcc->qos.txtp.pcr > 0) 
1845                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1846                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1847                                              vcc->qos.txtp.max_pcr,vc->acr);)
1848         }  
1849         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1850         {       srv_cls_param_t srv_p;
1851                 IF_ABR(printk("Tx ABR VCC\n");)  
1852                 init_abr_vc(iadev, &srv_p);
1853                 if (vcc->qos.txtp.pcr > 0) 
1854                    srv_p.pcr = vcc->qos.txtp.pcr;
1855                 if (vcc->qos.txtp.min_pcr > 0) {
1856                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1857                    if (tmpsum > iadev->LineRate)
1858                        return -EBUSY;
1859                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1860                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1861                 } 
1862                 else srv_p.mcr = 0;
1863                 if (vcc->qos.txtp.icr)
1864                    srv_p.icr = vcc->qos.txtp.icr;
1865                 if (vcc->qos.txtp.tbe)
1866                    srv_p.tbe = vcc->qos.txtp.tbe;
1867                 if (vcc->qos.txtp.frtt)
1868                    srv_p.frtt = vcc->qos.txtp.frtt;
1869                 if (vcc->qos.txtp.rif)
1870                    srv_p.rif = vcc->qos.txtp.rif;
1871                 if (vcc->qos.txtp.rdf)
1872                    srv_p.rdf = vcc->qos.txtp.rdf;
1873                 if (vcc->qos.txtp.nrm_pres)
1874                    srv_p.nrm = vcc->qos.txtp.nrm;
1875                 if (vcc->qos.txtp.trm_pres)
1876                    srv_p.trm = vcc->qos.txtp.trm;
1877                 if (vcc->qos.txtp.adtf_pres)
1878                    srv_p.adtf = vcc->qos.txtp.adtf;
1879                 if (vcc->qos.txtp.cdf_pres)
1880                    srv_p.cdf = vcc->qos.txtp.cdf;    
1881                 if (srv_p.icr > srv_p.pcr)
1882                    srv_p.icr = srv_p.pcr;    
1883                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1884                                                       srv_p.pcr, srv_p.mcr);)
1885                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1886         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1887                 if (iadev->phy_type & FE_25MBIT_PHY) {
1888                     printk("IA:  CBR not support\n");
1889                     return -EINVAL; 
1890                 }
1891                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1892                    IF_CBR(printk("PCR is not availble\n");)
1893                    return -1;
1894                 }
1895                 vc->type = CBR;
1896                 vc->status = CRC_APPEND;
1897                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1898                     return ret;
1899                 }
1900        } 
1901         else  
1902            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1903         
1904         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1905         IF_EVENT(printk("ia open_tx returning \n");)  
1906         return 0;  
1907 }  
1908   
1909   
1910 static int tx_init(struct atm_dev *dev)  
1911 {  
1912         IADEV *iadev;  
1913         struct tx_buf_desc *buf_desc_ptr;
1914         unsigned int tx_pkt_start;  
1915         void *dle_addr;  
1916         int i;  
1917         u_short tcq_st_adr;  
1918         u_short *tcq_start;  
1919         u_short prq_st_adr;  
1920         u_short *prq_start;  
1921         struct main_vc *vc;  
1922         struct ext_vc *evc;   
1923         u_short tmp16;
1924         u32 vcsize_sel;
1925  
1926         iadev = INPH_IA_DEV(dev);  
1927         spin_lock_init(&iadev->tx_lock);
1928  
1929         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1930                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1931
1932         /* Allocate 4k (boundary aligned) bytes */
1933         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1934                                         &iadev->tx_dle_dma);  
1935         if (!dle_addr)  {
1936                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1937                 goto err_out;
1938         }
1939         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1940         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1941         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1942         iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1943
1944         /* write the upper 20 bits of the start address to tx list address register */  
1945         writel(iadev->tx_dle_dma & 0xfffff000,
1946                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1947         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1948         writew(0, iadev->seg_reg+MODE_REG_0);  
1949         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1950         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1951         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1952         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1953   
1954         /*  
1955            Transmit side control memory map  
1956            --------------------------------    
1957          Buffer descr   0x0000 (128 - 4K)  
1958          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1959                                         (512 - 1K) each  
1960                                         TCQ - 4K, PRQ - 5K  
1961          CBR Table      0x1800 (as needed) - 6K  
1962          UBR Table      0x3000 (1K - 4K) - 12K  
1963          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1964          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1965                                 ABR Tbl - 20K, ABR Wq - 22K   
1966          extended VC    0x6000 (1K - 8K) - 24K  
1967          VC Table       0x8000 (1K - 32K) - 32K  
1968           
1969         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1970         and Wait q, which can be allotted later.  
1971         */  
1972      
1973         /* Buffer Descriptor Table Base address */  
1974         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1975   
1976         /* initialize each entry in the buffer descriptor table */  
1977         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1978         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1979         buf_desc_ptr++;  
1980         tx_pkt_start = TX_PACKET_RAM;  
1981         for(i=1; i<=iadev->num_tx_desc; i++)  
1982         {  
1983                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1984                 buf_desc_ptr->desc_mode = AAL5;  
1985                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1986                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1987                 buf_desc_ptr++;           
1988                 tx_pkt_start += iadev->tx_buf_sz;  
1989         }  
1990         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1991         if (!iadev->tx_buf) {
1992             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1993             goto err_free_dle;
1994         }
1995         for (i= 0; i< iadev->num_tx_desc; i++)
1996         {
1997             struct cpcs_trailer *cpcs;
1998  
1999             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
2000             if(!cpcs) {                
2001                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
2002                 goto err_free_tx_bufs;
2003             }
2004             iadev->tx_buf[i].cpcs = cpcs;
2005             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
2006                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
2007         }
2008         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
2009                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
2010         if(!iadev->desc_tbl)
2011                 goto err_free_all_tx_bufs;
2012   
2013         /* Communication Queues base address */  
2014         i = TX_COMP_Q * iadev->memSize;
2015         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
2016   
2017         /* Transmit Complete Queue */  
2018         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
2019         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
2020         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
2021         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2022         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2023                                               iadev->seg_reg+TCQ_ED_ADR); 
2024         /* Fill the TCQ with all the free descriptors. */  
2025         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2026         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2027         for(i=1; i<=iadev->num_tx_desc; i++)  
2028         {  
2029                 *tcq_start = (u_short)i;  
2030                 tcq_start++;  
2031         }  
2032   
2033         /* Packet Ready Queue */  
2034         i = PKT_RDY_Q * iadev->memSize; 
2035         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2036         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2037                                               iadev->seg_reg+PRQ_ED_ADR);
2038         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2039         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2040          
2041         /* Load local copy of PRQ and TCQ ptrs */
2042         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2043         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2044         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2045
2046         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2047         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2048         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2049
2050         /* Just for safety initializing the queue to have desc 1 always */  
2051         /* Fill the PRQ with all the free descriptors. */  
2052         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2053         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2054         for(i=1; i<=iadev->num_tx_desc; i++)  
2055         {  
2056                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2057                 prq_start++;  
2058         }  
2059         /* CBR Table */  
2060         IF_INIT(printk("Start CBR Init\n");)
2061 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2062         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2063 #else /* Charlie's logic is wrong ? */
2064         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2065         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2066         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2067 #endif
2068
2069         IF_INIT(printk("value in register = 0x%x\n",
2070                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2071         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2072         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2073         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2074                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2075         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2076         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2077         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2078         IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2079                (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2080         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2081           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2082           readw(iadev->seg_reg+CBR_TAB_END+1));)
2083         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
2084
2085         /* Initialize the CBR Schedualing Table */
2086         memset((caddr_t)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize), 
2087                                                           0, iadev->num_vc*6); 
2088         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2089         iadev->CbrEntryPt = 0;
2090         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2091         iadev->NumEnabledCBR = 0;
2092
2093         /* UBR scheduling Table and wait queue */  
2094         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2095                 - SCHEDSZ is 1K (# of entries).  
2096                 - UBR Table size is 4K  
2097                 - UBR wait queue is 4K  
2098            since the table and wait queues are contiguous, all the bytes   
2099            can be intialized by one memeset.  
2100         */  
2101         
2102         vcsize_sel = 0;
2103         i = 8*1024;
2104         while (i != iadev->num_vc) {
2105           i /= 2;
2106           vcsize_sel++;
2107         }
2108  
2109         i = MAIN_VC_TABLE * iadev->memSize;
2110         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2111         i =  EXT_VC_TABLE * iadev->memSize;
2112         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2113         i = UBR_SCHED_TABLE * iadev->memSize;
2114         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2115         i = UBR_WAIT_Q * iadev->memSize; 
2116         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2117         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2118                                                        0, iadev->num_vc*8);
2119         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2120         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2121                 - SCHEDSZ is 1K (# of entries).  
2122                 - ABR Table size is 2K  
2123                 - ABR wait queue is 2K  
2124            since the table and wait queues are contiguous, all the bytes   
2125            can be intialized by one memeset.  
2126         */  
2127         i = ABR_SCHED_TABLE * iadev->memSize;
2128         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2129         i = ABR_WAIT_Q * iadev->memSize;
2130         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2131  
2132         i = ABR_SCHED_TABLE*iadev->memSize;
2133         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2134         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2135         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2136         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2137         if (!iadev->testTable) {
2138            printk("Get freepage  failed\n");
2139            goto err_free_desc_tbl;
2140         }
2141         for(i=0; i<iadev->num_vc; i++)  
2142         {  
2143                 memset((caddr_t)vc, 0, sizeof(*vc));  
2144                 memset((caddr_t)evc, 0, sizeof(*evc));  
2145                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2146                                                 GFP_KERNEL);
2147                 if (!iadev->testTable[i])
2148                         goto err_free_test_tables;
2149                 iadev->testTable[i]->lastTime = 0;
2150                 iadev->testTable[i]->fract = 0;
2151                 iadev->testTable[i]->vc_status = VC_UBR;
2152                 vc++;  
2153                 evc++;  
2154         }  
2155   
2156         /* Other Initialization */  
2157           
2158         /* Max Rate Register */  
2159         if (iadev->phy_type & FE_25MBIT_PHY) {
2160            writew(RATE25, iadev->seg_reg+MAXRATE);  
2161            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2162         }
2163         else {
2164            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2165            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2166         }
2167         /* Set Idle Header Reigisters to be sure */  
2168         writew(0, iadev->seg_reg+IDLEHEADHI);  
2169         writew(0, iadev->seg_reg+IDLEHEADLO);  
2170   
2171         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2172         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2173
2174         iadev->close_pending = 0;
2175 #if LINUX_VERSION_CODE >= 0x20303
2176         init_waitqueue_head(&iadev->close_wait);
2177         init_waitqueue_head(&iadev->timeout_wait);
2178 #else
2179         iadev->close_wait = NULL;
2180         iadev->timeout_wait = NULL;
2181 #endif 
2182         skb_queue_head_init(&iadev->tx_dma_q);  
2183         ia_init_rtn_q(&iadev->tx_return_q);  
2184
2185         /* RM Cell Protocol ID and Message Type */  
2186         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2187         skb_queue_head_init (&iadev->tx_backlog);
2188   
2189         /* Mode Register 1 */  
2190         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2191   
2192         /* Mode Register 0 */  
2193         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2194   
2195         /* Interrupt Status Register - read to clear */  
2196         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2197   
2198         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2199         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2200         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2201         iadev->tx_pkt_cnt = 0;
2202         iadev->rate_limit = iadev->LineRate / 3;
2203   
2204         return 0;
2205
2206 err_free_test_tables:
2207         while (--i >= 0)
2208                 kfree(iadev->testTable[i]);
2209         kfree(iadev->testTable);
2210 err_free_desc_tbl:
2211         kfree(iadev->desc_tbl);
2212 err_free_all_tx_bufs:
2213         i = iadev->num_tx_desc;
2214 err_free_tx_bufs:
2215         while (--i >= 0) {
2216                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2217
2218                 pci_unmap_single(iadev->pci, desc->dma_addr,
2219                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2220                 kfree(desc->cpcs);
2221         }
2222         kfree(iadev->tx_buf);
2223 err_free_dle:
2224         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2225                             iadev->tx_dle_dma);  
2226 err_out:
2227         return -ENOMEM;
2228 }   
2229    
2230 static void ia_int(int irq, void *dev_id, struct pt_regs *regs)  
2231 {  
2232    struct atm_dev *dev;  
2233    IADEV *iadev;  
2234    unsigned int status;  
2235
2236    dev = dev_id;  
2237    iadev = INPH_IA_DEV(dev);  
2238    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2239    { 
2240         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2241         if (status & STAT_REASSINT)  
2242         {  
2243            /* do something */  
2244            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2245            rx_intr(dev);  
2246         }  
2247         if (status & STAT_DLERINT)  
2248         {  
2249            /* Clear this bit by writing a 1 to it. */  
2250            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2251            rx_dle_intr(dev);  
2252         }  
2253         if (status & STAT_SEGINT)  
2254         {  
2255            /* do something */ 
2256            IF_EVENT(printk("IA: tx_intr \n");) 
2257            tx_intr(dev);  
2258         }  
2259         if (status & STAT_DLETINT)  
2260         {  
2261            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2262            tx_dle_intr(dev);  
2263         }  
2264         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2265         {  
2266            if (status & STAT_FEINT) 
2267                IaFrontEndIntr(iadev);
2268         }  
2269    }  
2270 }  
2271           
2272           
2273           
2274 /*----------------------------- entries --------------------------------*/  
2275 static int get_esi(struct atm_dev *dev)  
2276 {  
2277         IADEV *iadev;  
2278         int i;  
2279         u32 mac1;  
2280         u16 mac2;  
2281           
2282         iadev = INPH_IA_DEV(dev);  
2283         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2284                                 iadev->reg+IPHASE5575_MAC1)));  
2285         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2286         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2287         for (i=0; i<MAC1_LEN; i++)  
2288                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2289           
2290         for (i=0; i<MAC2_LEN; i++)  
2291                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2292         return 0;  
2293 }  
2294           
2295 static int reset_sar(struct atm_dev *dev)  
2296 {  
2297         IADEV *iadev;  
2298         int i, error = 1;  
2299         unsigned int pci[64];  
2300           
2301         iadev = INPH_IA_DEV(dev);  
2302         for(i=0; i<64; i++)  
2303           if ((error = pci_read_config_dword(iadev->pci,  
2304                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2305               return error;  
2306         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2307         for(i=0; i<64; i++)  
2308           if ((error = pci_write_config_dword(iadev->pci,  
2309                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2310             return error;  
2311         udelay(5);  
2312         return 0;  
2313 }  
2314           
2315           
2316 #if LINUX_VERSION_CODE >= 0x20312
2317 static int __init ia_init(struct atm_dev *dev)
2318 #else
2319 __initfunc(static int ia_init(struct atm_dev *dev))
2320 #endif  
2321 {  
2322         IADEV *iadev;  
2323         unsigned long real_base, base;  
2324         unsigned short command;  
2325         unsigned char revision;  
2326         int error, i; 
2327           
2328         /* The device has been identified and registered. Now we read   
2329            necessary configuration info like memory base address,   
2330            interrupt number etc */  
2331           
2332         IF_INIT(printk(">ia_init\n");)  
2333         dev->ci_range.vpi_bits = 0;  
2334         dev->ci_range.vci_bits = NR_VCI_LD;  
2335
2336         iadev = INPH_IA_DEV(dev);  
2337         real_base = pci_resource_start (iadev->pci, 0);
2338         iadev->irq = iadev->pci->irq;
2339                   
2340         if ((error = pci_read_config_word(iadev->pci, PCI_COMMAND,&command))   
2341                     || (error = pci_read_config_byte(iadev->pci,   
2342                                 PCI_REVISION_ID,&revision)))   
2343         {  
2344                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2345                                 dev->number,error);  
2346                 return -EINVAL;  
2347         }  
2348         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2349                         dev->number, revision, real_base, iadev->irq);)  
2350           
2351         /* find mapping size of board */  
2352           
2353         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2354
2355         if (iadev->pci_map_size == 0x100000){
2356           iadev->num_vc = 4096;
2357           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2358           iadev->memSize = 4;
2359         }
2360         else if (iadev->pci_map_size == 0x40000) {
2361           iadev->num_vc = 1024;
2362           iadev->memSize = 1;
2363         }
2364         else {
2365            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2366            return -EINVAL;
2367         }
2368         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2369           
2370         /* enable bus mastering */
2371         pci_set_master(iadev->pci);
2372
2373         /*  
2374          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2375          */  
2376         udelay(10);  
2377           
2378         /* mapping the physical address to a virtual address in address space */  
2379         base=(unsigned long)ioremap((unsigned long)real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2380           
2381         if (!base)  
2382         {  
2383                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2384                             dev->number);  
2385                 return error;  
2386         }  
2387         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=0x%lx,irq=%d\n",  
2388                         dev->number, revision, base, iadev->irq);)  
2389           
2390         /* filling the iphase dev structure */  
2391         iadev->mem = iadev->pci_map_size /2;  
2392         iadev->base_diff = real_base - base;  
2393         iadev->real_base = real_base;  
2394         iadev->base = base;  
2395                   
2396         /* Bus Interface Control Registers */  
2397         iadev->reg = (u32 *) (base + REG_BASE);  
2398         /* Segmentation Control Registers */  
2399         iadev->seg_reg = (u32 *) (base + SEG_BASE);  
2400         /* Reassembly Control Registers */  
2401         iadev->reass_reg = (u32 *) (base + REASS_BASE);  
2402         /* Front end/ DMA control registers */  
2403         iadev->phy = (u32 *) (base + PHY_BASE);  
2404         iadev->dma = (u32 *) (base + PHY_BASE);  
2405         /* RAM - Segmentation RAm and Reassembly RAM */  
2406         iadev->ram = (u32 *) (base + ACTUAL_RAM_BASE);  
2407         iadev->seg_ram =  (base + ACTUAL_SEG_RAM_BASE);  
2408         iadev->reass_ram = (base + ACTUAL_REASS_RAM_BASE);  
2409   
2410         /* lets print out the above */  
2411         IF_INIT(printk("Base addrs: %08x %08x %08x \n %08x %08x %08x %08x\n", 
2412           (u32)iadev->reg,(u32)iadev->seg_reg,(u32)iadev->reass_reg, 
2413           (u32)iadev->phy, (u32)iadev->ram, (u32)iadev->seg_ram, 
2414           (u32)iadev->reass_ram);) 
2415           
2416         /* lets try reading the MAC address */  
2417         error = get_esi(dev);  
2418         if (error) {
2419           iounmap((void *) iadev->base);
2420           return error;  
2421         }
2422         printk("IA: ");
2423         for (i=0; i < ESI_LEN; i++)  
2424                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2425         printk("\n");  
2426   
2427         /* reset SAR */  
2428         if (reset_sar(dev)) {
2429            iounmap((void *) iadev->base);
2430            printk("IA: reset SAR fail, please try again\n");
2431            return 1;
2432         }
2433         return 0;  
2434 }  
2435
2436 static void ia_update_stats(IADEV *iadev) {
2437     if (!iadev->carrier_detect)
2438         return;
2439     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2440     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2441     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2442     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2443     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2444     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2445     return;
2446 }
2447   
2448 static void ia_led_timer(unsigned long arg) {
2449         unsigned long flags;
2450         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2451         u_char i;
2452         static u32 ctrl_reg; 
2453         for (i = 0; i < iadev_count; i++) {
2454            if (ia_dev[i]) {
2455               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2456               if (blinking[i] == 0) {
2457                  blinking[i]++;
2458                  ctrl_reg &= (~CTRL_LED);
2459                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2460                  ia_update_stats(ia_dev[i]);
2461               }
2462               else {
2463                  blinking[i] = 0;
2464                  ctrl_reg |= CTRL_LED;
2465                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2466                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2467                  if (ia_dev[i]->close_pending)  
2468                     wake_up(&ia_dev[i]->close_wait);
2469                  ia_tx_poll(ia_dev[i]);
2470                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2471               }
2472            }
2473         }
2474         mod_timer(&ia_timer, jiffies + HZ / 4);
2475         return;
2476 }
2477
2478 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2479         unsigned long addr)  
2480 {  
2481         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2482 }  
2483   
2484 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2485 {  
2486         return readl(INPH_IA_DEV(dev)->phy+addr);  
2487 }  
2488
2489 static void ia_free_tx(IADEV *iadev)
2490 {
2491         int i;
2492
2493         kfree(iadev->desc_tbl);
2494         for (i = 0; i < iadev->num_vc; i++)
2495                 kfree(iadev->testTable[i]);
2496         kfree(iadev->testTable);
2497         for (i = 0; i < iadev->num_tx_desc; i++) {
2498                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2499
2500                 pci_unmap_single(iadev->pci, desc->dma_addr,
2501                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2502                 kfree(desc->cpcs);
2503         }
2504         kfree(iadev->tx_buf);
2505         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2506                             iadev->tx_dle_dma);  
2507 }
2508
2509 static void ia_free_rx(IADEV *iadev)
2510 {
2511         kfree(iadev->rx_open);
2512         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2513                             iadev->rx_dle_dma);  
2514 }
2515
2516 #if LINUX_VERSION_CODE >= 0x20312
2517 static int __init ia_start(struct atm_dev *dev)
2518 #else
2519 __initfunc(static int ia_start(struct atm_dev *dev))
2520 #endif  
2521 {  
2522         IADEV *iadev;  
2523         int error;  
2524         unsigned char phy;  
2525         u32 ctrl_reg;  
2526         IF_EVENT(printk(">ia_start\n");)  
2527         iadev = INPH_IA_DEV(dev);  
2528         if (request_irq(iadev->irq, &ia_int, SA_SHIRQ, DEV_LABEL, dev)) {  
2529                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2530                     dev->number, iadev->irq);  
2531                 error = -EAGAIN;
2532                 goto err_out;
2533         }  
2534         /* @@@ should release IRQ on error */  
2535         /* enabling memory + master */  
2536         if ((error = pci_write_config_word(iadev->pci,   
2537                                 PCI_COMMAND,   
2538                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2539         {  
2540                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2541                     "master (0x%x)\n",dev->number, error);  
2542                 error = -EIO;  
2543                 goto err_free_irq;
2544         }  
2545         udelay(10);  
2546   
2547         /* Maybe we should reset the front end, initialize Bus Interface Control   
2548                 Registers and see. */  
2549   
2550         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2551                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2552         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2553         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2554                         | CTRL_B8  
2555                         | CTRL_B16  
2556                         | CTRL_B32  
2557                         | CTRL_B48  
2558                         | CTRL_B64  
2559                         | CTRL_B128  
2560                         | CTRL_ERRMASK  
2561                         | CTRL_DLETMASK         /* shud be removed l8r */  
2562                         | CTRL_DLERMASK  
2563                         | CTRL_SEGMASK  
2564                         | CTRL_REASSMASK          
2565                         | CTRL_FEMASK  
2566                         | CTRL_CSPREEMPT;  
2567   
2568        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2569   
2570         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2571                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2572            printk("Bus status reg after init: %08x\n", 
2573                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2574     
2575         ia_hw_type(iadev); 
2576         error = tx_init(dev);  
2577         if (error)
2578                 goto err_free_irq;
2579         error = rx_init(dev);  
2580         if (error)
2581                 goto err_free_tx;
2582   
2583         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2584         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2585         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2586                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2587         phy = 0; /* resolve compiler complaint */
2588         IF_INIT ( 
2589         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2590                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2591         else  
2592                 printk("IA: utopia,rev.%0x\n",phy);) 
2593
2594         if (iadev->phy_type &  FE_25MBIT_PHY) {
2595            ia_mb25_init(iadev);
2596         } else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY)) {
2597            ia_suni_pm7345_init(iadev);
2598         } else {
2599                 error = suni_init(dev);
2600                 if (error)
2601                         goto err_free_rx;
2602                 /* 
2603                  * Enable interrupt on loss of signal
2604                  * SUNI_RSOP_CIE - 0x10
2605                  * SUNI_RSOP_CIE_LOSE - 0x04
2606                  */
2607                 ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2608 #ifndef MODULE
2609                 error = dev->phy->start(dev);
2610                 if (error)
2611                         goto err_free_rx;
2612 #endif
2613                 /* Get iadev->carrier_detect status */
2614                 IaFrontEndIntr(iadev);
2615         }
2616         return 0;
2617
2618 err_free_rx:
2619         ia_free_rx(iadev);
2620 err_free_tx:
2621         ia_free_tx(iadev);
2622 err_free_irq:
2623         free_irq(iadev->irq, dev);  
2624 err_out:
2625         return error;
2626 }  
2627   
2628 static void ia_close(struct atm_vcc *vcc)  
2629 {  
2630         u16 *vc_table;
2631         IADEV *iadev;
2632         struct ia_vcc *ia_vcc;
2633         struct sk_buff *skb = NULL;
2634         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2635         unsigned long closetime, flags;
2636         int ctimeout;
2637
2638         iadev = INPH_IA_DEV(vcc->dev);
2639         ia_vcc = INPH_IA_VCC(vcc);
2640         if (!ia_vcc) return;  
2641
2642         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2643                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2644         clear_bit(ATM_VF_READY,&vcc->flags);
2645         skb_queue_head_init (&tmp_tx_backlog);
2646         skb_queue_head_init (&tmp_vcc_backlog); 
2647         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2648            iadev->close_pending++;
2649            sleep_on_timeout(&iadev->timeout_wait, 50);
2650            spin_lock_irqsave(&iadev->tx_lock, flags); 
2651            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2652               if (ATM_SKB(skb)->vcc == vcc){ 
2653                  if (vcc->pop) vcc->pop(vcc, skb);
2654                  else dev_kfree_skb_any(skb);
2655               }
2656               else 
2657                  skb_queue_tail(&tmp_tx_backlog, skb);
2658            } 
2659            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2660              skb_queue_tail(&iadev->tx_backlog, skb);
2661            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2662            closetime = jiffies;
2663            ctimeout = 300000 / ia_vcc->pcr;
2664            if (ctimeout == 0)
2665               ctimeout = 1;
2666            while (ia_vcc->vc_desc_cnt > 0){
2667               if ((jiffies - closetime) >= ctimeout) 
2668                  break;
2669               spin_unlock_irqrestore(&iadev->tx_lock, flags);
2670               sleep_on(&iadev->close_wait);
2671               spin_lock_irqsave(&iadev->tx_lock, flags);
2672            }    
2673            iadev->close_pending--;
2674            iadev->testTable[vcc->vci]->lastTime = 0;
2675            iadev->testTable[vcc->vci]->fract = 0; 
2676            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2677            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2678               if (vcc->qos.txtp.min_pcr > 0)
2679                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2680            }
2681            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2682               ia_vcc = INPH_IA_VCC(vcc); 
2683               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2684               ia_cbrVc_close (vcc);
2685            }
2686            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2687         }
2688         
2689         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2690            // reset reass table
2691            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2692            vc_table += vcc->vci; 
2693            *vc_table = NO_AAL5_PKT;
2694            // reset vc table
2695            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2696            vc_table += vcc->vci;
2697            *vc_table = (vcc->vci << 6) | 15;
2698            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2699               struct abr_vc_table *abr_vc_table = (struct abr_vc_table *)
2700                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2701               abr_vc_table +=  vcc->vci;
2702               abr_vc_table->rdf = 0x0003;
2703               abr_vc_table->air = 0x5eb1;
2704            }                                 
2705            // Drain the packets
2706            rx_dle_intr(vcc->dev); 
2707            iadev->rx_open[vcc->vci] = 0;
2708         }
2709         kfree(INPH_IA_VCC(vcc));  
2710         ia_vcc = NULL;
2711         INPH_IA_VCC(vcc) = NULL;  
2712         clear_bit(ATM_VF_ADDR,&vcc->flags);
2713         return;        
2714 }  
2715   
2716 static int ia_open(struct atm_vcc *vcc, short vpi, int vci)  
2717 {  
2718         IADEV *iadev;  
2719         struct ia_vcc *ia_vcc;  
2720         int error;  
2721         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2722         {  
2723                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2724                 INPH_IA_VCC(vcc) = NULL;  
2725         }  
2726         iadev = INPH_IA_DEV(vcc->dev);  
2727         error = atm_find_ci(vcc, &vpi, &vci);  
2728         if (error)   
2729         {  
2730             printk("iadev: atm_find_ci returned error %d\n", error);  
2731             return error;  
2732         }  
2733         vcc->vpi = vpi;  
2734         vcc->vci = vci;  
2735         if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)  
2736         {  
2737                 IF_EVENT(printk("iphase open: unspec part\n");)  
2738                 set_bit(ATM_VF_ADDR,&vcc->flags);
2739         }  
2740         if (vcc->qos.aal != ATM_AAL5)  
2741                 return -EINVAL;  
2742         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2743                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2744   
2745         /* Device dependent initialization */  
2746         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2747         if (!ia_vcc) return -ENOMEM;  
2748         INPH_IA_VCC(vcc) = ia_vcc;  
2749   
2750         if ((error = open_rx(vcc)))  
2751         {  
2752                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2753                 ia_close(vcc);  
2754                 return error;  
2755         }  
2756   
2757         if ((error = open_tx(vcc)))  
2758         {  
2759                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2760                 ia_close(vcc);  
2761                 return error;  
2762         }  
2763   
2764         set_bit(ATM_VF_READY,&vcc->flags);
2765
2766 #ifndef MODULE
2767         {
2768            static u8 first = 1; 
2769            if (first) {
2770               ia_timer.expires = jiffies + 3*HZ;
2771               add_timer(&ia_timer);
2772               first = 0;
2773            }           
2774         }
2775 #endif
2776         IF_EVENT(printk("ia open returning\n");)  
2777         return 0;  
2778 }  
2779   
2780 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2781 {  
2782         IF_EVENT(printk(">ia_change_qos\n");)  
2783         return 0;  
2784 }  
2785   
2786 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)  
2787 {  
2788    IA_CMDBUF ia_cmds;
2789    IADEV *iadev;
2790    int i, board;
2791    u16 *tmps;
2792    IF_EVENT(printk(">ia_ioctl\n");)  
2793    if (cmd != IA_CMD) {
2794       if (!dev->phy->ioctl) return -EINVAL;
2795       return dev->phy->ioctl(dev,cmd,arg);
2796    }
2797    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2798    board = ia_cmds.status;
2799    if ((board < 0) || (board > iadev_count))
2800          board = 0;    
2801    iadev = ia_dev[board];
2802    switch (ia_cmds.cmd) {
2803    case MEMDUMP:
2804    {
2805         switch (ia_cmds.sub_cmd) {
2806           case MEMDUMP_DEV:     
2807              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2808              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2809                 return -EFAULT;
2810              ia_cmds.status = 0;
2811              break;
2812           case MEMDUMP_SEGREG:
2813              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2814              tmps = (u16 *)ia_cmds.buf;
2815              for(i=0; i<0x80; i+=2, tmps++)
2816                 if(put_user(*(u16*)(iadev->seg_reg+i), tmps)) return -EFAULT;
2817              ia_cmds.status = 0;
2818              ia_cmds.len = 0x80;
2819              break;
2820           case MEMDUMP_REASSREG:
2821              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2822              tmps = (u16 *)ia_cmds.buf;
2823              for(i=0; i<0x80; i+=2, tmps++)
2824                 if(put_user(*(u16*)(iadev->reass_reg+i), tmps)) return -EFAULT;
2825              ia_cmds.status = 0;
2826              ia_cmds.len = 0x80;
2827              break;
2828           case MEMDUMP_FFL:
2829           {  
2830              ia_regs_t       regs_local;
2831              ffredn_t        *ffL = &regs_local.ffredn;
2832              rfredn_t        *rfL = &regs_local.rfredn;
2833                      
2834              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2835              /* Copy real rfred registers into the local copy */
2836              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2837                 ((u_int *)rfL)[i] = ((u_int *)iadev->reass_reg)[i] & 0xffff;
2838                 /* Copy real ffred registers into the local copy */
2839              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2840                 ((u_int *)ffL)[i] = ((u_int *)iadev->seg_reg)[i] & 0xffff;
2841
2842              if (copy_to_user(ia_cmds.buf, &regs_local,sizeof(ia_regs_t)))
2843                 return -EFAULT;
2844              printk("Board %d registers dumped\n", board);
2845              ia_cmds.status = 0;                  
2846          }      
2847              break;        
2848          case READ_REG:
2849          {  
2850              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2851              desc_dbg(iadev); 
2852              ia_cmds.status = 0; 
2853          }
2854              break;
2855          case 0x6:
2856          {  
2857              ia_cmds.status = 0; 
2858              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2859              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2860          }
2861              break;
2862          case 0x8:
2863          {
2864              struct k_sonet_stats *stats;
2865              stats = &PRIV(_ia_dev[board])->sonet_stats;
2866              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2867              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2868              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2869              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2870              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2871              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2872              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2873              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2874              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2875          }
2876             ia_cmds.status = 0;
2877             break;
2878          case 0x9:
2879             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2880             for (i = 1; i <= iadev->num_rx_desc; i++)
2881                free_desc(_ia_dev[board], i);
2882             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2883                                             iadev->reass_reg+REASS_MASK_REG);
2884             iadev->rxing = 1;
2885             
2886             ia_cmds.status = 0;
2887             break;
2888
2889          case 0xb:
2890             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2891             IaFrontEndIntr(iadev);
2892             break;
2893          case 0xa:
2894             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2895          {  
2896              ia_cmds.status = 0; 
2897              IADebugFlag = ia_cmds.maddr;
2898              printk("New debug option loaded\n");
2899          }
2900              break;
2901          default:
2902              ia_cmds.status = 0;
2903              break;
2904       } 
2905    }
2906       break;
2907    default:
2908       break;
2909
2910    }    
2911    return 0;  
2912 }  
2913   
2914 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2915         void *optval, int optlen)  
2916 {  
2917         IF_EVENT(printk(">ia_getsockopt\n");)  
2918         return -EINVAL;  
2919 }  
2920   
2921 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2922         void *optval, int optlen)  
2923 {  
2924         IF_EVENT(printk(">ia_setsockopt\n");)  
2925         return -EINVAL;  
2926 }  
2927   
2928 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2929         IADEV *iadev;
2930         struct dle *wr_ptr;
2931         struct tx_buf_desc *buf_desc_ptr;
2932         int desc;
2933         int comp_code;
2934         int total_len, pad, last;
2935         struct cpcs_trailer *trailer;
2936         struct ia_vcc *iavcc;
2937
2938         iadev = INPH_IA_DEV(vcc->dev);  
2939         iavcc = INPH_IA_VCC(vcc);
2940         if (!iavcc->txing) {
2941            printk("discard packet on closed VC\n");
2942            if (vcc->pop)
2943                 vcc->pop(vcc, skb);
2944            else
2945                 dev_kfree_skb_any(skb);
2946            return 0;
2947         }
2948
2949         if (skb->len > iadev->tx_buf_sz - 8) {
2950            printk("Transmit size over tx buffer size\n");
2951            if (vcc->pop)
2952                  vcc->pop(vcc, skb);
2953            else
2954                  dev_kfree_skb_any(skb);
2955           return 0;
2956         }
2957         if ((u32)skb->data & 3) {
2958            printk("Misaligned SKB\n");
2959            if (vcc->pop)
2960                  vcc->pop(vcc, skb);
2961            else
2962                  dev_kfree_skb_any(skb);
2963            return 0;
2964         }       
2965         /* Get a descriptor number from our free descriptor queue  
2966            We get the descr number from the TCQ now, since I am using  
2967            the TCQ as a free buffer queue. Initially TCQ will be   
2968            initialized with all the descriptors and is hence, full.  
2969         */
2970         desc = get_desc (iadev, iavcc);
2971         if (desc == 0xffff) 
2972             return 1;
2973         comp_code = desc >> 13;  
2974         desc &= 0x1fff;  
2975   
2976         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2977         {  
2978                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2979                 atomic_inc(&vcc->stats->tx);
2980                 if (vcc->pop)   
2981                     vcc->pop(vcc, skb);   
2982                 else  
2983                     dev_kfree_skb_any(skb);
2984                 return 0;   /* return SUCCESS */
2985         }  
2986   
2987         if (comp_code)  
2988         {  
2989             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2990                                                             desc, comp_code);)  
2991         }  
2992        
2993         /* remember the desc and vcc mapping */
2994         iavcc->vc_desc_cnt++;
2995         iadev->desc_tbl[desc-1].iavcc = iavcc;
2996         iadev->desc_tbl[desc-1].txskb = skb;
2997         IA_SKB_STATE(skb) = 0;
2998
2999         iadev->ffL.tcq_rd += 2;
3000         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
3001                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
3002         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
3003   
3004         /* Put the descriptor number in the packet ready queue  
3005                 and put the updated write pointer in the DLE field   
3006         */   
3007         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
3008
3009         iadev->ffL.prq_wr += 2;
3010         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
3011                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
3012           
3013         /* Figure out the exact length of the packet and padding required to 
3014            make it  aligned on a 48 byte boundary.  */
3015         total_len = skb->len + sizeof(struct cpcs_trailer);  
3016         last = total_len - (total_len/48)*48;  
3017         pad = 48 - last;  
3018         total_len = pad + total_len;  
3019         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, pad);)  
3020  
3021         /* Put the packet in a tx buffer */   
3022         trailer = iadev->tx_buf[desc-1].cpcs;
3023         IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
3024                   (u32)skb, (u32)skb->data, skb->len, desc);)
3025         trailer->control = 0; 
3026         /*big endian*/ 
3027         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
3028         trailer->crc32 = 0;     /* not needed - dummy bytes */  
3029
3030         /* Display the packet */  
3031         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
3032                                                         skb->len, tcnter++);  
3033         xdump(skb->data, skb->len, "TX: ");
3034         printk("\n");)
3035
3036         /* Build the buffer descriptor */  
3037         buf_desc_ptr = (struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
3038         buf_desc_ptr += desc;   /* points to the corresponding entry */  
3039         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
3040         /* Huh ? p.115 of users guide describes this as a read-only register */
3041         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3042         buf_desc_ptr->vc_index = vcc->vci;
3043         buf_desc_ptr->bytes = total_len;  
3044
3045         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3046            clear_lockup (vcc, iadev);
3047
3048         /* Build the DLE structure */  
3049         wr_ptr = iadev->tx_dle_q.write;  
3050         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3051         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3052                 skb->len, PCI_DMA_TODEVICE);
3053         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3054                                                   buf_desc_ptr->buf_start_lo;  
3055         /* wr_ptr->bytes = swap(total_len);     didn't seem to affect ?? */  
3056         wr_ptr->bytes = skb->len;  
3057
3058         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3059         if ((wr_ptr->bytes >> 2) == 0xb)
3060            wr_ptr->bytes = 0x30;
3061
3062         wr_ptr->mode = TX_DLE_PSI; 
3063         wr_ptr->prq_wr_ptr_data = 0;
3064   
3065         /* end is not to be used for the DLE q */  
3066         if (++wr_ptr == iadev->tx_dle_q.end)  
3067                 wr_ptr = iadev->tx_dle_q.start;  
3068         
3069         /* Build trailer dle */
3070         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3071         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3072           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3073
3074         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3075         wr_ptr->mode = DMA_INT_ENABLE; 
3076         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3077         
3078         /* end is not to be used for the DLE q */
3079         if (++wr_ptr == iadev->tx_dle_q.end)  
3080                 wr_ptr = iadev->tx_dle_q.start;
3081
3082         iadev->tx_dle_q.write = wr_ptr;  
3083         ATM_DESC(skb) = vcc->vci;
3084         skb_queue_tail(&iadev->tx_dma_q, skb);
3085
3086         atomic_inc(&vcc->stats->tx);
3087         iadev->tx_pkt_cnt++;
3088         /* Increment transaction counter */  
3089         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3090         
3091 #if 0        
3092         /* add flow control logic */ 
3093         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3094           if (iavcc->vc_desc_cnt > 10) {
3095              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3096             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3097               iavcc->flow_inc = -1;
3098               iavcc->saved_tx_quota = vcc->tx_quota;
3099            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3100              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3101              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3102               iavcc->flow_inc = 0;
3103            }
3104         }
3105 #endif
3106         IF_TX(printk("ia send done\n");)  
3107         return 0;  
3108 }  
3109
3110 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3111 {
3112         IADEV *iadev; 
3113         struct ia_vcc *iavcc;
3114         unsigned long flags;
3115
3116         iadev = INPH_IA_DEV(vcc->dev);
3117         iavcc = INPH_IA_VCC(vcc); 
3118         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3119         {
3120             if (!skb)
3121                 printk(KERN_CRIT "null skb in ia_send\n");
3122             else dev_kfree_skb_any(skb);
3123             return -EINVAL;
3124         }                         
3125         spin_lock_irqsave(&iadev->tx_lock, flags); 
3126         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3127             dev_kfree_skb_any(skb);
3128             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3129             return -EINVAL; 
3130         }
3131         ATM_SKB(skb)->vcc = vcc;
3132  
3133         if (skb_peek(&iadev->tx_backlog)) {
3134            skb_queue_tail(&iadev->tx_backlog, skb);
3135         }
3136         else {
3137            if (ia_pkt_tx (vcc, skb)) {
3138               skb_queue_tail(&iadev->tx_backlog, skb);
3139            }
3140         }
3141         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3142         return 0;
3143
3144 }
3145
3146 static int ia_sg_send(struct atm_vcc *vcc, unsigned long start,   
3147         unsigned long size)  
3148 {  
3149         IF_EVENT(printk(">ia_sg_send\n");)  
3150         return 0;  
3151 }  
3152   
3153   
3154 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3155
3156   int   left = *pos, n;   
3157   char  *tmpPtr;
3158   IADEV *iadev = INPH_IA_DEV(dev);
3159   if(!left--) {
3160      if (iadev->phy_type == FE_25MBIT_PHY) {
3161        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3162        return n;
3163      }
3164      if (iadev->phy_type == FE_DS3_PHY)
3165         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3166      else if (iadev->phy_type == FE_E3_PHY)
3167         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3168      else if (iadev->phy_type == FE_UTP_OPTION)
3169          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3170      else
3171         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3172      tmpPtr = page + n;
3173      if (iadev->pci_map_size == 0x40000)
3174         n += sprintf(tmpPtr, "-1KVC-");
3175      else
3176         n += sprintf(tmpPtr, "-4KVC-");  
3177      tmpPtr = page + n; 
3178      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3179         n += sprintf(tmpPtr, "1M  \n");
3180      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3181         n += sprintf(tmpPtr, "512K\n");
3182      else
3183        n += sprintf(tmpPtr, "128K\n");
3184      return n;
3185   }
3186   if (!left) {
3187      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3188                            "  Size of Tx Buffer  :  %u\n"
3189                            "  Number of Rx Buffer:  %u\n"
3190                            "  Size of Rx Buffer  :  %u\n"
3191                            "  Packets Receiverd  :  %u\n"
3192                            "  Packets Transmitted:  %u\n"
3193                            "  Cells Received     :  %u\n"
3194                            "  Cells Transmitted  :  %u\n"
3195                            "  Board Dropped Cells:  %u\n"
3196                            "  Board Dropped Pkts :  %u\n",
3197                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3198                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3199                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3200                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3201                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3202   }
3203   return 0;
3204 }
3205   
3206 static const struct atmdev_ops ops = {  
3207         open:           ia_open,  
3208         close:          ia_close,  
3209         ioctl:          ia_ioctl,  
3210         getsockopt:     ia_getsockopt,  
3211         setsockopt:     ia_setsockopt,  
3212         send:           ia_send,  
3213         sg_send:        ia_sg_send,  
3214         phy_put:        ia_phy_put,  
3215         phy_get:        ia_phy_get,  
3216         change_qos:     ia_change_qos,  
3217         proc_read:      ia_proc_read,
3218         owner:          THIS_MODULE,
3219 };  
3220           
3221   
3222 static int __devinit ia_init_one(struct pci_dev *pdev,
3223                                  const struct pci_device_id *ent)
3224 {  
3225         struct atm_dev *dev;  
3226         IADEV *iadev;  
3227         unsigned long flags;
3228         int ret;
3229
3230         iadev = kmalloc(sizeof(*iadev), GFP_KERNEL); 
3231         if (!iadev) {
3232                 ret = -ENOMEM;
3233                 goto err_out;
3234         }
3235         memset(iadev, 0, sizeof(*iadev));
3236         iadev->pci = pdev;
3237
3238         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3239                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3240         if (pci_enable_device(pdev)) {
3241                 ret = -ENODEV;
3242                 goto err_out_free_iadev;
3243         }
3244         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3245         if (!dev) {
3246                 ret = -ENOMEM;
3247                 goto err_out_disable_dev;
3248         }
3249         INPH_IA_DEV(dev) = iadev; 
3250         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3251         IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3252                 iadev->LineRate);)
3253
3254         ia_dev[iadev_count] = iadev;
3255         _ia_dev[iadev_count] = dev;
3256         iadev_count++;
3257         spin_lock_init(&iadev->misc_lock);
3258         /* First fixes first. I don't want to think about this now. */
3259         spin_lock_irqsave(&iadev->misc_lock, flags); 
3260         if (ia_init(dev) || ia_start(dev)) {  
3261                 IF_INIT(printk("IA register failed!\n");)
3262                 iadev_count--;
3263                 ia_dev[iadev_count] = NULL;
3264                 _ia_dev[iadev_count] = NULL;
3265                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3266                 ret = -EINVAL;
3267                 goto err_out_deregister_dev;
3268         }
3269         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3270         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3271
3272         iadev->next_board = ia_boards;  
3273         ia_boards = dev;  
3274
3275         pci_set_drvdata(pdev, dev);
3276
3277         return 0;
3278
3279 err_out_deregister_dev:
3280         atm_dev_deregister(dev);  
3281 err_out_disable_dev:
3282         pci_disable_device(pdev);
3283 err_out_free_iadev:
3284         kfree(iadev);
3285 err_out:
3286         return ret;
3287 }
3288
3289 static void __devexit ia_remove_one(struct pci_dev *pdev)
3290 {
3291         struct atm_dev *dev = pci_get_drvdata(pdev);
3292         IADEV *iadev = INPH_IA_DEV(dev);
3293
3294         ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10); 
3295         udelay(1);
3296
3297         /* De-register device */  
3298         free_irq(iadev->irq, dev);
3299         iadev_count--;
3300         ia_dev[iadev_count] = NULL;
3301         _ia_dev[iadev_count] = NULL;
3302         atm_dev_deregister(dev);
3303         IF_EVENT(printk("iav deregistered at (itf:%d)\n", dev->number);)
3304
3305         iounmap((void *) iadev->base);  
3306         pci_disable_device(pdev);
3307
3308         ia_free_rx(iadev);
3309         ia_free_tx(iadev);
3310
3311         kfree(iadev);
3312 }
3313
3314 static struct pci_device_id ia_pci_tbl[] __devinitdata = {
3315         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3316         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3317         { 0,}
3318 };
3319 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3320
3321 static struct pci_driver ia_driver = {
3322         .name =         DEV_LABEL,
3323         .id_table =     ia_pci_tbl,
3324         .probe =        ia_init_one,
3325         .remove =       ia_remove_one,
3326 };
3327
3328 static int __init ia_init_module(void)
3329 {
3330         int ret;
3331
3332         ret = pci_module_init(&ia_driver);
3333         if (ret >= 0) {
3334                 ia_timer.expires = jiffies + 3*HZ;
3335                 add_timer(&ia_timer); 
3336         }
3337         return ret;
3338 }
3339
3340 static void __exit ia_cleanup_module(void)
3341 {
3342         pci_unregister_driver(&ia_driver);
3343
3344         del_timer(&ia_timer);
3345 }
3346
3347 module_init(ia_init_module);
3348 module_exit(ia_cleanup_module);