patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <asm/system.h>  
57 #include <asm/io.h>  
58 #include <asm/atomic.h>  
59 #include <asm/uaccess.h>  
60 #include <asm/string.h>  
61 #include <asm/byteorder.h>  
62 #include <linux/vmalloc.h>  
63 #include "iphase.h"               
64 #include "suni.h"                 
65 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
66 struct suni_priv {
67         struct k_sonet_stats sonet_stats; /* link diagnostics */
68         unsigned char loop_mode;        /* loopback mode */
69         struct atm_dev *dev;            /* device back-pointer */
70         struct suni_priv *next;         /* next SUNI */
71 }; 
72 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
73
74 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
75
76 static IADEV *ia_dev[8];
77 static struct atm_dev *_ia_dev[8];
78 static int iadev_count;
79 static void ia_led_timer(unsigned long arg);
80 static struct timer_list ia_timer = TIMER_INITIALIZER(ia_led_timer, 0, 0);
81 struct atm_vcc *vcc_close_que[100];
82 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
83 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
84 static u32 IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
85             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
86
87 MODULE_PARM(IA_TX_BUF, "i");
88 MODULE_PARM(IA_TX_BUF_SZ, "i");
89 MODULE_PARM(IA_RX_BUF, "i");
90 MODULE_PARM(IA_RX_BUF_SZ, "i");
91 MODULE_PARM(IADebugFlag, "i");
92
93 MODULE_LICENSE("GPL");
94
95 #if BITS_PER_LONG != 32
96 #  error FIXME: this driver only works on 32-bit platforms
97 #endif
98
99 /**************************** IA_LIB **********************************/
100
101 static void ia_init_rtn_q (IARTN_Q *que) 
102
103    que->next = NULL; 
104    que->tail = NULL; 
105 }
106
107 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
108 {
109    data->next = NULL;
110    if (que->next == NULL) 
111       que->next = que->tail = data;
112    else {
113       data->next = que->next;
114       que->next = data;
115    } 
116    return;
117 }
118
119 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
120    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
121    if (!entry) return -1;
122    entry->data = data;
123    entry->next = NULL;
124    if (que->next == NULL) 
125       que->next = que->tail = entry;
126    else {
127       que->tail->next = entry;
128       que->tail = que->tail->next;
129    }      
130    return 1;
131 }
132
133 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
134    IARTN_Q *tmpdata;
135    if (que->next == NULL)
136       return NULL;
137    tmpdata = que->next;
138    if ( que->next == que->tail)  
139       que->next = que->tail = NULL;
140    else 
141       que->next = que->next->next;
142    return tmpdata;
143 }
144
145 static void ia_hack_tcq(IADEV *dev) {
146
147   u_short               desc1;
148   u_short               tcq_wr;
149   struct ia_vcc         *iavcc_r = NULL; 
150   extern void desc_dbg(IADEV *iadev);
151
152   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
153   while (dev->host_tcq_wr != tcq_wr) {
154      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
155      if (!desc1) ;
156      else if (!dev->desc_tbl[desc1 -1].timestamp) {
157         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
158         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
159      }                                 
160      else if (dev->desc_tbl[desc1 -1].timestamp) {
161         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
162            printk("IA: Fatal err in get_desc\n");
163            continue;
164         }
165         iavcc_r->vc_desc_cnt--;
166         dev->desc_tbl[desc1 -1].timestamp = 0;
167         IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n", 
168                                    (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
169         if (iavcc_r->pcr < dev->rate_limit) {
170            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
171            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
172               printk("ia_hack_tcq: No memory available\n");
173         } 
174         dev->desc_tbl[desc1 -1].iavcc = NULL;
175         dev->desc_tbl[desc1 -1].txskb = NULL;
176      }
177      dev->host_tcq_wr += 2;
178      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
179         dev->host_tcq_wr = dev->ffL.tcq_st;
180   }
181 } /* ia_hack_tcq */
182
183 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
184   u_short               desc_num, i;
185   struct sk_buff        *skb;
186   struct ia_vcc         *iavcc_r = NULL; 
187   unsigned long delta;
188   static unsigned long timer = 0;
189   int ltimeout;
190   extern void desc_dbg(IADEV *iadev);
191
192   ia_hack_tcq (dev);
193   if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){      
194      timer = jiffies; 
195      i=0;
196      while (i < dev->num_tx_desc) {
197         if (!dev->desc_tbl[i].timestamp) {
198            i++;
199            continue;
200         }
201         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
202         delta = jiffies - dev->desc_tbl[i].timestamp;
203         if (delta >= ltimeout) {
204            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
205            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
206               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
207            else 
208               dev->ffL.tcq_rd -= 2;
209            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
210            if (!(skb = dev->desc_tbl[i].txskb) || 
211                           !(iavcc_r = dev->desc_tbl[i].iavcc))
212               printk("Fatal err, desc table vcc or skb is NULL\n");
213            else 
214               iavcc_r->vc_desc_cnt--;
215            dev->desc_tbl[i].timestamp = 0;
216            dev->desc_tbl[i].iavcc = NULL;
217            dev->desc_tbl[i].txskb = NULL;
218         }
219         i++;
220      } /* while */
221   }
222   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
223      return 0xFFFF;
224     
225   /* Get the next available descriptor number from TCQ */
226   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
227
228   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
229      dev->ffL.tcq_rd += 2;
230      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
231      dev->ffL.tcq_rd = dev->ffL.tcq_st;
232      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
233         return 0xFFFF; 
234      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
235   }
236
237   /* get system time */
238   dev->desc_tbl[desc_num -1].timestamp = jiffies;
239   return desc_num;
240 }
241
242 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
243   u_char                foundLockUp;
244   vcstatus_t            *vcstatus;
245   u_short               *shd_tbl;
246   u_short               tempCellSlot, tempFract;
247   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
248   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
249   u_int  i;
250
251   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
252      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
253      vcstatus->cnt++;
254      foundLockUp = 0;
255      if( vcstatus->cnt == 0x05 ) {
256         abr_vc += vcc->vci;
257         eabr_vc += vcc->vci;
258         if( eabr_vc->last_desc ) {
259            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
260               /* Wait for 10 Micro sec */
261               udelay(10);
262               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
263                  foundLockUp = 1;
264            }
265            else {
266               tempCellSlot = abr_vc->last_cell_slot;
267               tempFract    = abr_vc->fraction;
268               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
269                          && (tempFract == dev->testTable[vcc->vci]->fract))
270                  foundLockUp = 1;                   
271               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
272               dev->testTable[vcc->vci]->fract = tempFract; 
273            }        
274         } /* last descriptor */            
275         vcstatus->cnt = 0;      
276      } /* vcstatus->cnt */
277         
278      if (foundLockUp) {
279         IF_ABR(printk("LOCK UP found\n");) 
280         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
281         /* Wait for 10 Micro sec */
282         udelay(10); 
283         abr_vc->status &= 0xFFF8;
284         abr_vc->status |= 0x0001;  /* state is idle */
285         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
286         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
287         if (i < dev->num_vc)
288            shd_tbl[i] = vcc->vci;
289         else
290            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
291         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
292         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
293         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
294         vcstatus->cnt = 0;
295      } /* foundLockUp */
296
297   } /* if an ABR VC */
298
299
300 }
301  
302 /*
303 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
304 **
305 **  +----+----+------------------+-------------------------------+
306 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
307 **  +----+----+------------------+-------------------------------+
308 ** 
309 **    R = reserverd (written as 0)
310 **    NZ = 0 if 0 cells/sec; 1 otherwise
311 **
312 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
313 */
314 static u16
315 cellrate_to_float(u32 cr)
316 {
317
318 #define NZ              0x4000
319 #define M_BITS          9               /* Number of bits in mantissa */
320 #define E_BITS          5               /* Number of bits in exponent */
321 #define M_MASK          0x1ff           
322 #define E_MASK          0x1f
323   u16   flot;
324   u32   tmp = cr & 0x00ffffff;
325   int   i   = 0;
326   if (cr == 0)
327      return 0;
328   while (tmp != 1) {
329      tmp >>= 1;
330      i++;
331   }
332   if (i == M_BITS)
333      flot = NZ | (i << M_BITS) | (cr & M_MASK);
334   else if (i < M_BITS)
335      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
336   else
337      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
338   return flot;
339 }
340
341 #if 0
342 /*
343 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
344 */
345 static u32
346 float_to_cellrate(u16 rate)
347 {
348   u32   exp, mantissa, cps;
349   if ((rate & NZ) == 0)
350      return 0;
351   exp = (rate >> M_BITS) & E_MASK;
352   mantissa = rate & M_MASK;
353   if (exp == 0)
354      return 1;
355   cps = (1 << M_BITS) | mantissa;
356   if (exp == M_BITS)
357      cps = cps;
358   else if (exp > M_BITS)
359      cps <<= (exp - M_BITS);
360   else
361      cps >>= (M_BITS - exp);
362   return cps;
363 }
364 #endif 
365
366 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
367   srv_p->class_type = ATM_ABR;
368   srv_p->pcr        = dev->LineRate;
369   srv_p->mcr        = 0;
370   srv_p->icr        = 0x055cb7;
371   srv_p->tbe        = 0xffffff;
372   srv_p->frtt       = 0x3a;
373   srv_p->rif        = 0xf;
374   srv_p->rdf        = 0xb;
375   srv_p->nrm        = 0x4;
376   srv_p->trm        = 0x7;
377   srv_p->cdf        = 0x3;
378   srv_p->adtf       = 50;
379 }
380
381 static int
382 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
383                                                 struct atm_vcc *vcc, u8 flag)
384 {
385   f_vc_abr_entry  *f_abr_vc;
386   r_vc_abr_entry  *r_abr_vc;
387   u32           icr;
388   u8            trm, nrm, crm;
389   u16           adtf, air, *ptr16;      
390   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
391   f_abr_vc += vcc->vci;       
392   switch (flag) {
393      case 1: /* FFRED initialization */
394 #if 0  /* sanity check */
395        if (srv_p->pcr == 0)
396           return INVALID_PCR;
397        if (srv_p->pcr > dev->LineRate)
398           srv_p->pcr = dev->LineRate;
399        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
400           return MCR_UNAVAILABLE;
401        if (srv_p->mcr > srv_p->pcr)
402           return INVALID_MCR;
403        if (!(srv_p->icr))
404           srv_p->icr = srv_p->pcr;
405        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
406           return INVALID_ICR;
407        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
408           return INVALID_TBE;
409        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
410           return INVALID_FRTT;
411        if (srv_p->nrm > MAX_NRM)
412           return INVALID_NRM;
413        if (srv_p->trm > MAX_TRM)
414           return INVALID_TRM;
415        if (srv_p->adtf > MAX_ADTF)
416           return INVALID_ADTF;
417        else if (srv_p->adtf == 0)
418           srv_p->adtf = 1;
419        if (srv_p->cdf > MAX_CDF)
420           return INVALID_CDF;
421        if (srv_p->rif > MAX_RIF)
422           return INVALID_RIF;
423        if (srv_p->rdf > MAX_RDF)
424           return INVALID_RDF;
425 #endif
426        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
427        f_abr_vc->f_vc_type = ABR;
428        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
429                                   /* i.e 2**n = 2 << (n-1) */
430        f_abr_vc->f_nrm = nrm << 8 | nrm;
431        trm = 100000/(2 << (16 - srv_p->trm));
432        if ( trm == 0) trm = 1;
433        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
434        crm = srv_p->tbe / nrm;
435        if (crm == 0) crm = 1;
436        f_abr_vc->f_crm = crm & 0xff;
437        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
438        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
439                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
440                                 (1000000/(srv_p->frtt/srv_p->tbe)));
441        f_abr_vc->f_icr = cellrate_to_float(icr);
442        adtf = (10000 * srv_p->adtf)/8192;
443        if (adtf == 0) adtf = 1; 
444        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
445        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
446        f_abr_vc->f_acr = f_abr_vc->f_icr;
447        f_abr_vc->f_status = 0x0042;
448        break;
449     case 0: /* RFRED initialization */  
450        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
451        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
452        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
453        r_abr_vc += vcc->vci;
454        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
455        air = srv_p->pcr << (15 - srv_p->rif);
456        if (air == 0) air = 1;
457        r_abr_vc->r_air = cellrate_to_float(air);
458        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
459        dev->sum_mcr        += srv_p->mcr;
460        dev->n_abr++;
461        break;
462     default:
463        break;
464   }
465   return        0;
466 }
467 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
468    u32 rateLow=0, rateHigh, rate;
469    int entries;
470    struct ia_vcc *ia_vcc;
471
472    int   idealSlot =0, testSlot, toBeAssigned, inc;
473    u32   spacing;
474    u16  *SchedTbl, *TstSchedTbl;
475    u16  cbrVC, vcIndex;
476    u32   fracSlot    = 0;
477    u32   sp_mod      = 0;
478    u32   sp_mod2     = 0;
479
480    /* IpAdjustTrafficParams */
481    if (vcc->qos.txtp.max_pcr <= 0) {
482       IF_ERR(printk("PCR for CBR not defined\n");)
483       return -1;
484    }
485    rate = vcc->qos.txtp.max_pcr;
486    entries = rate / dev->Granularity;
487    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
488                                 entries, rate, dev->Granularity);)
489    if (entries < 1)
490       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
491    rateLow  =  entries * dev->Granularity;
492    rateHigh = (entries + 1) * dev->Granularity;
493    if (3*(rate - rateLow) > (rateHigh - rate))
494       entries++;
495    if (entries > dev->CbrRemEntries) {
496       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
497       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
498                                        entries, dev->CbrRemEntries);)
499       return -EBUSY;
500    }   
501
502    ia_vcc = INPH_IA_VCC(vcc);
503    ia_vcc->NumCbrEntry = entries; 
504    dev->sum_mcr += entries * dev->Granularity; 
505    /* IaFFrednInsertCbrSched */
506    // Starting at an arbitrary location, place the entries into the table
507    // as smoothly as possible
508    cbrVC   = 0;
509    spacing = dev->CbrTotEntries / entries;
510    sp_mod  = dev->CbrTotEntries % entries; // get modulo
511    toBeAssigned = entries;
512    fracSlot = 0;
513    vcIndex  = vcc->vci;
514    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
515    while (toBeAssigned)
516    {
517       // If this is the first time, start the table loading for this connection
518       // as close to entryPoint as possible.
519       if (toBeAssigned == entries)
520       {
521          idealSlot = dev->CbrEntryPt;
522          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
523          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
524             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
525       } else {
526          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
527          // in the table that would be  smoothest
528          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
529          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
530       }
531       if (idealSlot >= (int)dev->CbrTotEntries) 
532          idealSlot -= dev->CbrTotEntries;  
533       // Continuously check around this ideal value until a null
534       // location is encountered.
535       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
536       inc = 0;
537       testSlot = idealSlot;
538       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
539       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
540                                 testSlot, (u32)TstSchedTbl,toBeAssigned);) 
541       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
542       while (cbrVC)  // If another VC at this location, we have to keep looking
543       {
544           inc++;
545           testSlot = idealSlot - inc;
546           if (testSlot < 0) { // Wrap if necessary
547              testSlot += dev->CbrTotEntries;
548              IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
549                                                        (u32)SchedTbl,testSlot);)
550           }
551           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
552           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
553           if (!cbrVC)
554              break;
555           testSlot = idealSlot + inc;
556           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
557              testSlot -= dev->CbrTotEntries;
558              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
559              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
560                                             testSlot, toBeAssigned);)
561           } 
562           // set table index and read in value
563           TstSchedTbl = (u16*)(SchedTbl + testSlot);
564           IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
565                           (u32)TstSchedTbl,cbrVC,inc);) 
566           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
567        } /* while */
568        // Move this VCI number into this location of the CBR Sched table.
569        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
570        dev->CbrRemEntries--;
571        toBeAssigned--;
572    } /* while */ 
573
574    /* IaFFrednCbrEnable */
575    dev->NumEnabledCBR++;
576    if (dev->NumEnabledCBR == 1) {
577        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
578        IF_CBR(printk("CBR is enabled\n");)
579    }
580    return 0;
581 }
582 static void ia_cbrVc_close (struct atm_vcc *vcc) {
583    IADEV *iadev;
584    u16 *SchedTbl, NullVci = 0;
585    u32 i, NumFound;
586
587    iadev = INPH_IA_DEV(vcc->dev);
588    iadev->NumEnabledCBR--;
589    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
590    if (iadev->NumEnabledCBR == 0) {
591       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
592       IF_CBR (printk("CBR support disabled\n");)
593    }
594    NumFound = 0;
595    for (i=0; i < iadev->CbrTotEntries; i++)
596    {
597       if (*SchedTbl == vcc->vci) {
598          iadev->CbrRemEntries++;
599          *SchedTbl = NullVci;
600          IF_CBR(NumFound++;)
601       }
602       SchedTbl++;   
603    } 
604    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
605 }
606
607 static int ia_avail_descs(IADEV *iadev) {
608    int tmp = 0;
609    ia_hack_tcq(iadev);
610    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
611       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
612    else
613       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
614                    iadev->ffL.tcq_st) / 2;
615    return tmp;
616 }    
617
618 static int ia_que_tx (IADEV *iadev) { 
619    struct sk_buff *skb;
620    int num_desc;
621    struct atm_vcc *vcc;
622    struct ia_vcc *iavcc;
623    static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
624    num_desc = ia_avail_descs(iadev);
625
626    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
627       if (!(vcc = ATM_SKB(skb)->vcc)) {
628          dev_kfree_skb_any(skb);
629          printk("ia_que_tx: Null vcc\n");
630          break;
631       }
632       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
633          dev_kfree_skb_any(skb);
634          printk("Free the SKB on closed vci %d \n", vcc->vci);
635          break;
636       }
637       iavcc = INPH_IA_VCC(vcc);
638       if (ia_pkt_tx (vcc, skb)) {
639          skb_queue_head(&iadev->tx_backlog, skb);
640       }
641       num_desc--;
642    }
643    return 0;
644 }
645
646 void ia_tx_poll (IADEV *iadev) {
647    struct atm_vcc *vcc = NULL;
648    struct sk_buff *skb = NULL, *skb1 = NULL;
649    struct ia_vcc *iavcc;
650    IARTN_Q *  rtne;
651
652    ia_hack_tcq(iadev);
653    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
654        skb = rtne->data.txskb;
655        if (!skb) {
656            printk("ia_tx_poll: skb is null\n");
657            goto out;
658        }
659        vcc = ATM_SKB(skb)->vcc;
660        if (!vcc) {
661            printk("ia_tx_poll: vcc is null\n");
662            dev_kfree_skb_any(skb);
663            goto out;
664        }
665
666        iavcc = INPH_IA_VCC(vcc);
667        if (!iavcc) {
668            printk("ia_tx_poll: iavcc is null\n");
669            dev_kfree_skb_any(skb);
670            goto out;
671        }
672
673        skb1 = skb_dequeue(&iavcc->txing_skb);
674        while (skb1 && (skb1 != skb)) {
675           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
676              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
677           }
678           IF_ERR(printk("Release the SKB not match\n");)
679           if ((vcc->pop) && (skb1->len != 0))
680           {
681              vcc->pop(vcc, skb1);
682              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
683                                                           (long)skb1);)
684           }
685           else 
686              dev_kfree_skb_any(skb1);
687           skb1 = skb_dequeue(&iavcc->txing_skb);
688        }                                                        
689        if (!skb1) {
690           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
691           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
692           break;
693        }
694        if ((vcc->pop) && (skb->len != 0))
695        {
696           vcc->pop(vcc, skb);
697           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
698        }
699        else 
700           dev_kfree_skb_any(skb);
701        kfree(rtne);
702     }
703     ia_que_tx(iadev);
704 out:
705     return;
706 }
707 #if 0
708 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
709 {
710         u32     t;
711         int     i;
712         /*
713          * Issue a command to enable writes to the NOVRAM
714          */
715         NVRAM_CMD (EXTEND + EWEN);
716         NVRAM_CLR_CE;
717         /*
718          * issue the write command
719          */
720         NVRAM_CMD(IAWRITE + addr);
721         /* 
722          * Send the data, starting with D15, then D14, and so on for 16 bits
723          */
724         for (i=15; i>=0; i--) {
725                 NVRAM_CLKOUT (val & 0x8000);
726                 val <<= 1;
727         }
728         NVRAM_CLR_CE;
729         CFG_OR(NVCE);
730         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
731         while (!(t & NVDO))
732                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
733
734         NVRAM_CLR_CE;
735         /*
736          * disable writes again
737          */
738         NVRAM_CMD(EXTEND + EWDS)
739         NVRAM_CLR_CE;
740         CFG_AND(~NVDI);
741 }
742 #endif
743
744 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
745 {
746         u_short val;
747         u32     t;
748         int     i;
749         /*
750          * Read the first bit that was clocked with the falling edge of the
751          * the last command data clock
752          */
753         NVRAM_CMD(IAREAD + addr);
754         /*
755          * Now read the rest of the bits, the next bit read is D14, then D13,
756          * and so on.
757          */
758         val = 0;
759         for (i=15; i>=0; i--) {
760                 NVRAM_CLKIN(t);
761                 val |= (t << i);
762         }
763         NVRAM_CLR_CE;
764         CFG_AND(~NVDI);
765         return val;
766 }
767
768 static void ia_hw_type(IADEV *iadev) {
769    u_short memType = ia_eeprom_get(iadev, 25);   
770    iadev->memType = memType;
771    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
772       iadev->num_tx_desc = IA_TX_BUF;
773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
774       iadev->num_rx_desc = IA_RX_BUF;
775       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
776    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
777       if (IA_TX_BUF == DFL_TX_BUFFERS)
778         iadev->num_tx_desc = IA_TX_BUF / 2;
779       else 
780         iadev->num_tx_desc = IA_TX_BUF;
781       iadev->tx_buf_sz = IA_TX_BUF_SZ;
782       if (IA_RX_BUF == DFL_RX_BUFFERS)
783         iadev->num_rx_desc = IA_RX_BUF / 2;
784       else
785         iadev->num_rx_desc = IA_RX_BUF;
786       iadev->rx_buf_sz = IA_RX_BUF_SZ;
787    }
788    else {
789       if (IA_TX_BUF == DFL_TX_BUFFERS) 
790         iadev->num_tx_desc = IA_TX_BUF / 8;
791       else
792         iadev->num_tx_desc = IA_TX_BUF;
793       iadev->tx_buf_sz = IA_TX_BUF_SZ;
794       if (IA_RX_BUF == DFL_RX_BUFFERS)
795         iadev->num_rx_desc = IA_RX_BUF / 8;
796       else
797         iadev->num_rx_desc = IA_RX_BUF;
798       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
799    } 
800    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
801    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
802          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
803          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
804
805 #if 0
806    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
807       iadev->phy_type = PHY_OC3C_S;
808    else if ((memType & FE_MASK) == FE_UTP_OPTION)
809       iadev->phy_type = PHY_UTP155;
810    else
811      iadev->phy_type = PHY_OC3C_M;
812 #endif
813    
814    iadev->phy_type = memType & FE_MASK;
815    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
816                                          memType,iadev->phy_type);)
817    if (iadev->phy_type == FE_25MBIT_PHY) 
818       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
819    else if (iadev->phy_type == FE_DS3_PHY)
820       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
821    else if (iadev->phy_type == FE_E3_PHY) 
822       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
823    else
824        iadev->LineRate = (u32)(ATM_OC3_PCR);
825    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
826
827 }
828
829 static void IaFrontEndIntr(IADEV *iadev) {
830   volatile IA_SUNI *suni;
831   volatile ia_mb25_t *mb25;
832   volatile suni_pm7345_t *suni_pm7345;
833   u32 intr_status;
834   u_int frmr_intr;
835
836   if(iadev->phy_type & FE_25MBIT_PHY) {
837      mb25 = (ia_mb25_t*)iadev->phy;
838      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
839   } else if (iadev->phy_type & FE_DS3_PHY) {
840      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
841      /* clear FRMR interrupts */
842      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
843      iadev->carrier_detect =  
844            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
845   } else if (iadev->phy_type & FE_E3_PHY ) {
846      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
847      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
848      iadev->carrier_detect =
849            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
850   }
851   else { 
852      suni = (IA_SUNI *)iadev->phy;
853      intr_status = suni->suni_rsop_status & 0xff;
854      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
855   }
856   if (iadev->carrier_detect)
857     printk("IA: SUNI carrier detected\n");
858   else
859     printk("IA: SUNI carrier lost signal\n"); 
860   return;
861 }
862
863 void ia_mb25_init (IADEV *iadev)
864 {
865    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
866 #if 0
867    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
868 #endif
869    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
870    mb25->mb25_diag_control = 0;
871    /*
872     * Initialize carrier detect state
873     */
874    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
875    return;
876 }                   
877
878 void ia_suni_pm7345_init (IADEV *iadev)
879 {
880    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
881    if (iadev->phy_type & FE_DS3_PHY)
882    {
883       iadev->carrier_detect = 
884           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
885       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
886       suni_pm7345->suni_ds3_frm_cfg = 1;
887       suni_pm7345->suni_ds3_tran_cfg = 1;
888       suni_pm7345->suni_config = 0;
889       suni_pm7345->suni_splr_cfg = 0;
890       suni_pm7345->suni_splt_cfg = 0;
891    }
892    else 
893    {
894       iadev->carrier_detect = 
895           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
896       suni_pm7345->suni_e3_frm_fram_options = 0x4;
897       suni_pm7345->suni_e3_frm_maint_options = 0x20;
898       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
899       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
900       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
901       suni_pm7345->suni_e3_tran_fram_options = 0x1;
902       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
903       suni_pm7345->suni_splr_cfg = 0x41;
904       suni_pm7345->suni_splt_cfg = 0x41;
905    } 
906    /*
907     * Enable RSOP loss of signal interrupt.
908     */
909    suni_pm7345->suni_intr_enbl = 0x28;
910  
911    /*
912     * Clear error counters
913     */
914    suni_pm7345->suni_id_reset = 0;
915
916    /*
917     * Clear "PMCTST" in master test register.
918     */
919    suni_pm7345->suni_master_test = 0;
920
921    suni_pm7345->suni_rxcp_ctrl = 0x2c;
922    suni_pm7345->suni_rxcp_fctrl = 0x81;
923  
924    suni_pm7345->suni_rxcp_idle_pat_h1 =
925         suni_pm7345->suni_rxcp_idle_pat_h2 =
926         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
927    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
928  
929    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
930    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
931    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
932    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
933  
934    suni_pm7345->suni_rxcp_cell_pat_h1 =
935         suni_pm7345->suni_rxcp_cell_pat_h2 =
936         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
937    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
938  
939    suni_pm7345->suni_rxcp_cell_mask_h1 =
940         suni_pm7345->suni_rxcp_cell_mask_h2 =
941         suni_pm7345->suni_rxcp_cell_mask_h3 =
942         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
943  
944    suni_pm7345->suni_txcp_ctrl = 0xa4;
945    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
946    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
947  
948    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
949                                  SUNI_PM7345_CLB |
950                                  SUNI_PM7345_DLB |
951                                   SUNI_PM7345_PLB);
952 #ifdef __SNMP__
953    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
954 #endif /* __SNMP__ */
955    return;
956 }
957
958
959 /***************************** IA_LIB END *****************************/
960     
961 /* pwang_test debug utility */
962 int tcnter = 0, rcnter = 0;
963 void xdump( u_char*  cp, int  length, char*  prefix )
964 {
965     int col, count;
966     u_char prntBuf[120];
967     u_char*  pBuf = prntBuf;
968     count = 0;
969     while(count < length){
970         pBuf += sprintf( pBuf, "%s", prefix );
971         for(col = 0;count + col < length && col < 16; col++){
972             if (col != 0 && (col % 4) == 0)
973                 pBuf += sprintf( pBuf, " " );
974             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
975         }
976         while(col++ < 16){      /* pad end of buffer with blanks */
977             if ((col % 4) == 0)
978                 sprintf( pBuf, " " );
979             pBuf += sprintf( pBuf, "   " );
980         }
981         pBuf += sprintf( pBuf, "  " );
982         for(col = 0;count + col < length && col < 16; col++){
983             if (isprint((int)cp[count + col]))
984                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
985             else
986                 pBuf += sprintf( pBuf, "." );
987                 }
988         sprintf( pBuf, "\n" );
989         // SPrint(prntBuf);
990         printk(prntBuf);
991         count += col;
992         pBuf = prntBuf;
993     }
994
995 }  /* close xdump(... */
996
997   
998 static struct atm_dev *ia_boards = NULL;  
999   
1000 #define ACTUAL_RAM_BASE \
1001         RAM_BASE*((iadev->mem)/(128 * 1024))  
1002 #define ACTUAL_SEG_RAM_BASE \
1003         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1004 #define ACTUAL_REASS_RAM_BASE \
1005         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1006   
1007   
1008 /*-- some utilities and memory allocation stuff will come here -------------*/  
1009   
1010 void desc_dbg(IADEV *iadev) {
1011
1012   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1013   u32 tmp, i;
1014   // regval = readl((u32)ia_cmds->maddr);
1015   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1016   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1017                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1018                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1019   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1020                    iadev->ffL.tcq_rd);
1021   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1022   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1023   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1024   i = 0;
1025   while (tcq_st_ptr != tcq_ed_ptr) {
1026       tmp = iadev->seg_ram+tcq_st_ptr;
1027       printk("TCQ slot %d desc = %d  Addr = 0x%x\n", i++, readw(tmp), tmp);
1028       tcq_st_ptr += 2;
1029   }
1030   for(i=0; i <iadev->num_tx_desc; i++)
1031       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1032
1033   
1034   
1035 /*----------------------------- Recieving side stuff --------------------------*/  
1036  
1037 static void rx_excp_rcvd(struct atm_dev *dev)  
1038 {  
1039 #if 0 /* closing the receiving size will cause too many excp int */  
1040   IADEV *iadev;  
1041   u_short state;  
1042   u_short excpq_rd_ptr;  
1043   //u_short *ptr;  
1044   int vci, error = 1;  
1045   iadev = INPH_IA_DEV(dev);  
1046   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1047   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1048   { printk("state = %x \n", state); 
1049         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1050  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1051         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1052             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1053         // TODO: update exception stat
1054         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1055         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1056         // pwang_test
1057         excpq_rd_ptr += 4;  
1058         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1059             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1060         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1061         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1062   }  
1063 #endif
1064 }  
1065   
1066 static void free_desc(struct atm_dev *dev, int desc)  
1067 {  
1068         IADEV *iadev;  
1069         iadev = INPH_IA_DEV(dev);  
1070         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1071         iadev->rfL.fdq_wr +=2;
1072         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1073                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1074         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1075 }  
1076   
1077   
1078 static int rx_pkt(struct atm_dev *dev)  
1079 {  
1080         IADEV *iadev;  
1081         struct atm_vcc *vcc;  
1082         unsigned short status;  
1083         struct rx_buf_desc *buf_desc_ptr;  
1084         int desc;   
1085         struct dle* wr_ptr;  
1086         int len;  
1087         struct sk_buff *skb;  
1088         u_int buf_addr, dma_addr;  
1089
1090         iadev = INPH_IA_DEV(dev);  
1091         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1092         {  
1093             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1094             return -EINVAL;  
1095         }  
1096         /* mask 1st 3 bits to get the actual descno. */  
1097         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1098         IF_RX(printk("reass_ram = 0x%x iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1099                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1100               printk(" pcq_wr_ptr = 0x%x\n",
1101                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1102         /* update the read pointer  - maybe we shud do this in the end*/  
1103         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1104                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1105         else  
1106                 iadev->rfL.pcq_rd += 2;
1107         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1108   
1109         /* get the buffer desc entry.  
1110                 update stuff. - doesn't seem to be any update necessary  
1111         */  
1112         buf_desc_ptr = (struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1113         /* make the ptr point to the corresponding buffer desc entry */  
1114         buf_desc_ptr += desc;     
1115         if (!desc || (desc > iadev->num_rx_desc) || 
1116                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1117             free_desc(dev, desc);
1118             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1119             return -1;
1120         }
1121         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1122         if (!vcc)  
1123         {      
1124                 free_desc(dev, desc); 
1125                 printk("IA: null vcc, drop PDU\n");  
1126                 return -1;  
1127         }  
1128           
1129   
1130         /* might want to check the status bits for errors */  
1131         status = (u_short) (buf_desc_ptr->desc_mode);  
1132         if (status & (RX_CER | RX_PTE | RX_OFL))  
1133         {  
1134                 atomic_inc(&vcc->stats->rx_err);
1135                 IF_ERR(printk("IA: bad packet, dropping it");)  
1136                 if (status & RX_CER) { 
1137                     IF_ERR(printk(" cause: packet CRC error\n");)
1138                 }
1139                 else if (status & RX_PTE) {
1140                     IF_ERR(printk(" cause: packet time out\n");)
1141                 }
1142                 else {
1143                     IF_ERR(printk(" cause: buffer over flow\n");)
1144                 }
1145                 goto out_free_desc;
1146         }  
1147   
1148         /*  
1149                 build DLE.        
1150         */  
1151   
1152         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1153         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1154         len = dma_addr - buf_addr;  
1155         if (len > iadev->rx_buf_sz) {
1156            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1157            atomic_inc(&vcc->stats->rx_err);
1158            goto out_free_desc;
1159         }
1160                   
1161         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1162            if (vcc->vci < 32)
1163               printk("Drop control packets\n");
1164               goto out_free_desc;
1165         }
1166         skb_put(skb,len);  
1167         // pwang_test
1168         ATM_SKB(skb)->vcc = vcc;
1169         ATM_DESC(skb) = desc;        
1170         skb_queue_tail(&iadev->rx_dma_q, skb);  
1171
1172         /* Build the DLE structure */  
1173         wr_ptr = iadev->rx_dle_q.write;  
1174         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1175                 len, PCI_DMA_FROMDEVICE);
1176         wr_ptr->local_pkt_addr = buf_addr;  
1177         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1178         wr_ptr->mode = DMA_INT_ENABLE;  
1179   
1180         /* shud take care of wrap around here too. */  
1181         if(++wr_ptr == iadev->rx_dle_q.end)
1182              wr_ptr = iadev->rx_dle_q.start;
1183         iadev->rx_dle_q.write = wr_ptr;  
1184         udelay(1);  
1185         /* Increment transaction counter */  
1186         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1187 out:    return 0;  
1188 out_free_desc:
1189         free_desc(dev, desc);
1190         goto out;
1191 }  
1192   
1193 static void rx_intr(struct atm_dev *dev)  
1194 {  
1195   IADEV *iadev;  
1196   u_short status;  
1197   u_short state, i;  
1198   
1199   iadev = INPH_IA_DEV(dev);  
1200   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1201   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1202   if (status & RX_PKT_RCVD)  
1203   {  
1204         /* do something */  
1205         /* Basically recvd an interrupt for receving a packet.  
1206         A descriptor would have been written to the packet complete   
1207         queue. Get all the descriptors and set up dma to move the   
1208         packets till the packet complete queue is empty..  
1209         */  
1210         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1211         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1212         while(!(state & PCQ_EMPTY))  
1213         {  
1214              rx_pkt(dev);  
1215              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1216         }  
1217         iadev->rxing = 1;
1218   }  
1219   if (status & RX_FREEQ_EMPT)  
1220   {   
1221      if (iadev->rxing) {
1222         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1223         iadev->rx_tmp_jif = jiffies; 
1224         iadev->rxing = 0;
1225      } 
1226      else if (((jiffies - iadev->rx_tmp_jif) > 50) && 
1227                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1228         for (i = 1; i <= iadev->num_rx_desc; i++)
1229                free_desc(dev, i);
1230 printk("Test logic RUN!!!!\n");
1231         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1232         iadev->rxing = 1;
1233      }
1234      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1235   }  
1236
1237   if (status & RX_EXCP_RCVD)  
1238   {  
1239         /* probably need to handle the exception queue also. */  
1240         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1241         rx_excp_rcvd(dev);  
1242   }  
1243
1244
1245   if (status & RX_RAW_RCVD)  
1246   {  
1247         /* need to handle the raw incoming cells. This deepnds on   
1248         whether we have programmed to receive the raw cells or not.  
1249         Else ignore. */  
1250         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1251   }  
1252 }  
1253   
1254   
1255 static void rx_dle_intr(struct atm_dev *dev)  
1256 {  
1257   IADEV *iadev;  
1258   struct atm_vcc *vcc;   
1259   struct sk_buff *skb;  
1260   int desc;  
1261   u_short state;   
1262   struct dle *dle, *cur_dle;  
1263   u_int dle_lp;  
1264   int len;
1265   iadev = INPH_IA_DEV(dev);  
1266  
1267   /* free all the dles done, that is just update our own dle read pointer   
1268         - do we really need to do this. Think not. */  
1269   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1270         and push them up to the higher layer protocol. Also free the desc  
1271         associated with the buffer. */  
1272   dle = iadev->rx_dle_q.read;  
1273   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1274   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1275   while(dle != cur_dle)  
1276   {  
1277       /* free the DMAed skb */  
1278       skb = skb_dequeue(&iadev->rx_dma_q);  
1279       if (!skb)  
1280          goto INCR_DLE;
1281       desc = ATM_DESC(skb);
1282       free_desc(dev, desc);  
1283                
1284       if (!(len = skb->len))
1285       {  
1286           printk("rx_dle_intr: skb len 0\n");  
1287           dev_kfree_skb_any(skb);  
1288       }  
1289       else  
1290       {  
1291           struct cpcs_trailer *trailer;
1292           u_short length;
1293           struct ia_vcc *ia_vcc;
1294
1295           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1296                 len, PCI_DMA_FROMDEVICE);
1297           /* no VCC related housekeeping done as yet. lets see */  
1298           vcc = ATM_SKB(skb)->vcc;
1299           if (!vcc) {
1300               printk("IA: null vcc\n");  
1301               dev_kfree_skb_any(skb);
1302               goto INCR_DLE;
1303           }
1304           ia_vcc = INPH_IA_VCC(vcc);
1305           if (ia_vcc == NULL)
1306           {
1307              atomic_inc(&vcc->stats->rx_err);
1308              dev_kfree_skb_any(skb);
1309              atm_return(vcc, atm_guess_pdu2truesize(len));
1310              goto INCR_DLE;
1311            }
1312           // get real pkt length  pwang_test
1313           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1314                                  skb->len - sizeof(*trailer));
1315           length =  swap(trailer->length);
1316           if ((length > iadev->rx_buf_sz) || (length > 
1317                               (skb->len - sizeof(struct cpcs_trailer))))
1318           {
1319              atomic_inc(&vcc->stats->rx_err);
1320              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1321                                                             length, skb->len);)
1322              dev_kfree_skb_any(skb);
1323              atm_return(vcc, atm_guess_pdu2truesize(len));
1324              goto INCR_DLE;
1325           }
1326           skb_trim(skb, length);
1327           
1328           /* Display the packet */  
1329           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1330           xdump(skb->data, skb->len, "RX: ");
1331           printk("\n");)
1332
1333           IF_RX(printk("rx_dle_intr: skb push");)  
1334           vcc->push(vcc,skb);  
1335           atomic_inc(&vcc->stats->rx);
1336           iadev->rx_pkt_cnt++;
1337       }  
1338 INCR_DLE:
1339       if (++dle == iadev->rx_dle_q.end)  
1340           dle = iadev->rx_dle_q.start;  
1341   }  
1342   iadev->rx_dle_q.read = dle;  
1343   
1344   /* if the interrupts are masked because there were no free desc available,  
1345                 unmask them now. */ 
1346   if (!iadev->rxing) {
1347      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1348      if (!(state & FREEQ_EMPTY)) {
1349         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1350         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1351                                       iadev->reass_reg+REASS_MASK_REG);
1352         iadev->rxing++; 
1353      }
1354   }
1355 }  
1356   
1357   
1358 static int open_rx(struct atm_vcc *vcc)  
1359 {  
1360         IADEV *iadev;  
1361         u_short *vc_table;  
1362         u_short *reass_ptr;  
1363         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1364
1365         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1366         iadev = INPH_IA_DEV(vcc->dev);  
1367         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1368            if (iadev->phy_type & FE_25MBIT_PHY) {
1369                printk("IA:  ABR not support\n");
1370                return -EINVAL; 
1371            }
1372         }
1373         /* Make only this VCI in the vc table valid and let all   
1374                 others be invalid entries */  
1375         vc_table = (u_short *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1376         vc_table += vcc->vci;  
1377         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1378
1379         *vc_table = vcc->vci << 6;
1380         /* Also keep a list of open rx vcs so that we can attach them with  
1381                 incoming PDUs later. */  
1382         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1383                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1384         {  
1385                 srv_cls_param_t srv_p;
1386                 init_abr_vc(iadev, &srv_p);
1387                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1388         } 
1389         else {  /* for UBR  later may need to add CBR logic */
1390                 reass_ptr = (u_short *)
1391                            (iadev->reass_ram+REASS_TABLE*iadev->memSize);
1392                 reass_ptr += vcc->vci;  
1393                 *reass_ptr = NO_AAL5_PKT;
1394         }
1395         
1396         if (iadev->rx_open[vcc->vci])  
1397                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1398                         vcc->dev->number, vcc->vci);  
1399         iadev->rx_open[vcc->vci] = vcc;  
1400         return 0;  
1401 }  
1402   
1403 static int rx_init(struct atm_dev *dev)  
1404 {  
1405         IADEV *iadev;  
1406         struct rx_buf_desc *buf_desc_ptr;  
1407         unsigned long rx_pkt_start = 0;  
1408         void *dle_addr;  
1409         struct abr_vc_table  *abr_vc_table; 
1410         u16 *vc_table;  
1411         u16 *reass_table;  
1412         u16 *ptr16;
1413         int i,j, vcsize_sel;  
1414         u_short freeq_st_adr;  
1415         u_short *freeq_start;  
1416   
1417         iadev = INPH_IA_DEV(dev);  
1418   //    spin_lock_init(&iadev->rx_lock); 
1419   
1420         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1421         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1422                                         &iadev->rx_dle_dma);  
1423         if (!dle_addr)  {  
1424                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1425                 goto err_out;
1426         }
1427         iadev->rx_dle_q.start = (struct dle*)dle_addr;  
1428         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1429         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1430         iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1431         /* the end of the dle q points to the entry after the last  
1432         DLE that can be used. */  
1433   
1434         /* write the upper 20 bits of the start address to rx list address register */  
1435         writel(iadev->rx_dle_dma & 0xfffff000,
1436                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1437         IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n", 
1438                       (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR), 
1439                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1440         printk("Rx Dle list addr: 0x%08x value: 0x%0x\n", 
1441                       (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR), 
1442                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1443   
1444         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1445         writew(0, iadev->reass_reg+MODE_REG);  
1446         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1447   
1448         /* Receive side control memory map  
1449            -------------------------------  
1450   
1451                 Buffer descr    0x0000 (736 - 23K)  
1452                 VP Table        0x5c00 (256 - 512)  
1453                 Except q        0x5e00 (128 - 512)  
1454                 Free buffer q   0x6000 (1K - 2K)  
1455                 Packet comp q   0x6800 (1K - 2K)  
1456                 Reass Table     0x7000 (1K - 2K)  
1457                 VC Table        0x7800 (1K - 2K)  
1458                 ABR VC Table    0x8000 (1K - 32K)  
1459         */  
1460           
1461         /* Base address for Buffer Descriptor Table */  
1462         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1463         /* Set the buffer size register */  
1464         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1465   
1466         /* Initialize each entry in the Buffer Descriptor Table */  
1467         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1468         buf_desc_ptr =(struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1469         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1470         buf_desc_ptr++;  
1471         rx_pkt_start = iadev->rx_pkt_ram;  
1472         for(i=1; i<=iadev->num_rx_desc; i++)  
1473         {  
1474                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1475                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1476                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1477                 buf_desc_ptr++;           
1478                 rx_pkt_start += iadev->rx_buf_sz;  
1479         }  
1480         IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)  
1481         i = FREE_BUF_DESC_Q*iadev->memSize; 
1482         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1483         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1484         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1485                                          iadev->reass_reg+FREEQ_ED_ADR);
1486         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1487         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1488                                         iadev->reass_reg+FREEQ_WR_PTR);    
1489         /* Fill the FREEQ with all the free descriptors. */  
1490         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1491         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1492         for(i=1; i<=iadev->num_rx_desc; i++)  
1493         {  
1494                 *freeq_start = (u_short)i;  
1495                 freeq_start++;  
1496         }  
1497         IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)  
1498         /* Packet Complete Queue */
1499         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1500         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1501         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1502         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1503         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1504
1505         /* Exception Queue */
1506         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1507         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1508         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1509                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1510         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1511         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1512  
1513         /* Load local copy of FREEQ and PCQ ptrs */
1514         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1515         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1516         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1517         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1518         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1519         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1520         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1521         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1522         
1523         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1524               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1525               iadev->rfL.pcq_wr);)                
1526         /* just for check - no VP TBL */  
1527         /* VP Table */  
1528         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1529         /* initialize VP Table for invalid VPIs  
1530                 - I guess we can write all 1s or 0x000f in the entire memory  
1531                   space or something similar.  
1532         */  
1533   
1534         /* This seems to work and looks right to me too !!! */  
1535         i =  REASS_TABLE * iadev->memSize;
1536         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1537         /* initialize Reassembly table to I don't know what ???? */  
1538         reass_table = (u16 *)(iadev->reass_ram+i);  
1539         j = REASS_TABLE_SZ * iadev->memSize;
1540         for(i=0; i < j; i++)  
1541                 *reass_table++ = NO_AAL5_PKT;  
1542        i = 8*1024;
1543        vcsize_sel =  0;
1544        while (i != iadev->num_vc) {
1545           i /= 2;
1546           vcsize_sel++;
1547        }
1548        i = RX_VC_TABLE * iadev->memSize;
1549        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1550        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1551         j = RX_VC_TABLE_SZ * iadev->memSize;
1552         for(i = 0; i < j; i++)  
1553         {  
1554                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1555                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1556                 is those low 3 bits.   
1557                 Shall program this later.  
1558                 */  
1559                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1560                 vc_table++;  
1561         }  
1562         /* ABR VC table */
1563         i =  ABR_VC_TABLE * iadev->memSize;
1564         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1565                    
1566         i = ABR_VC_TABLE * iadev->memSize;
1567         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1568         j = REASS_TABLE_SZ * iadev->memSize;
1569         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1570         for(i = 0; i < j; i++) {                
1571                 abr_vc_table->rdf = 0x0003;
1572                 abr_vc_table->air = 0x5eb1;
1573                 abr_vc_table++;         
1574         }  
1575
1576         /* Initialize other registers */  
1577   
1578         /* VP Filter Register set for VC Reassembly only */  
1579         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1580         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1581         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1582
1583         /* Packet Timeout Count  related Registers : 
1584            Set packet timeout to occur in about 3 seconds
1585            Set Packet Aging Interval count register to overflow in about 4 us
1586         */  
1587         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1588         ptr16 = (u16*)j;
1589         i = ((u32)ptr16 >> 6) & 0xff;
1590         ptr16  += j - 1;
1591         i |=(((u32)ptr16 << 2) & 0xff00);
1592         writew(i, iadev->reass_reg+TMOUT_RANGE);
1593         /* initiate the desc_tble */
1594         for(i=0; i<iadev->num_tx_desc;i++)
1595             iadev->desc_tbl[i].timestamp = 0;
1596
1597         /* to clear the interrupt status register - read it */  
1598         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1599   
1600         /* Mask Register - clear it */  
1601         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1602   
1603         skb_queue_head_init(&iadev->rx_dma_q);  
1604         iadev->rx_free_desc_qhead = NULL;   
1605         iadev->rx_open = kmalloc(4*iadev->num_vc,GFP_KERNEL);
1606         if (!iadev->rx_open)  
1607         {  
1608                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1609                 dev->number);  
1610                 goto err_free_dle;
1611         }  
1612         memset(iadev->rx_open, 0, 4*iadev->num_vc);  
1613         iadev->rxing = 1;
1614         iadev->rx_pkt_cnt = 0;
1615         /* Mode Register */  
1616         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1617         return 0;  
1618
1619 err_free_dle:
1620         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1621                             iadev->rx_dle_dma);  
1622 err_out:
1623         return -ENOMEM;
1624 }  
1625   
1626
1627 /*  
1628         The memory map suggested in appendix A and the coding for it.   
1629         Keeping it around just in case we change our mind later.  
1630   
1631                 Buffer descr    0x0000 (128 - 4K)  
1632                 UBR sched       0x1000 (1K - 4K)  
1633                 UBR Wait q      0x2000 (1K - 4K)  
1634                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1635                                         (128 - 256) each  
1636                 extended VC     0x4000 (1K - 8K)  
1637                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1638                 CBR sched       0x7000 (as needed)  
1639                 VC table        0x8000 (1K - 32K)  
1640 */  
1641   
1642 static void tx_intr(struct atm_dev *dev)  
1643 {  
1644         IADEV *iadev;  
1645         unsigned short status;  
1646         unsigned long flags;
1647
1648         iadev = INPH_IA_DEV(dev);  
1649   
1650         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1651         if (status & TRANSMIT_DONE){
1652
1653            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1654            spin_lock_irqsave(&iadev->tx_lock, flags);
1655            ia_tx_poll(iadev);
1656            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1657            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1658            if (iadev->close_pending)  
1659                wake_up(&iadev->close_wait);
1660         }         
1661         if (status & TCQ_NOT_EMPTY)  
1662         {  
1663             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1664         }  
1665 }  
1666   
1667 static void tx_dle_intr(struct atm_dev *dev)
1668 {
1669         IADEV *iadev;
1670         struct dle *dle, *cur_dle; 
1671         struct sk_buff *skb;
1672         struct atm_vcc *vcc;
1673         struct ia_vcc  *iavcc;
1674         u_int dle_lp;
1675         unsigned long flags;
1676
1677         iadev = INPH_IA_DEV(dev);
1678         spin_lock_irqsave(&iadev->tx_lock, flags);   
1679         dle = iadev->tx_dle_q.read;
1680         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1681                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1682         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1683         while (dle != cur_dle)
1684         {
1685             /* free the DMAed skb */ 
1686             skb = skb_dequeue(&iadev->tx_dma_q); 
1687             if (!skb) break;
1688
1689             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1690             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1691                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1692                                  PCI_DMA_TODEVICE);
1693             }
1694             vcc = ATM_SKB(skb)->vcc;
1695             if (!vcc) {
1696                   printk("tx_dle_intr: vcc is null\n");
1697                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1698                   dev_kfree_skb_any(skb);
1699
1700                   return;
1701             }
1702             iavcc = INPH_IA_VCC(vcc);
1703             if (!iavcc) {
1704                   printk("tx_dle_intr: iavcc is null\n");
1705                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1706                   dev_kfree_skb_any(skb);
1707                   return;
1708             }
1709             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1710                if ((vcc->pop) && (skb->len != 0))
1711                {     
1712                  vcc->pop(vcc, skb);
1713                } 
1714                else {
1715                  dev_kfree_skb_any(skb);
1716                }
1717             }
1718             else { /* Hold the rate-limited skb for flow control */
1719                IA_SKB_STATE(skb) |= IA_DLED;
1720                skb_queue_tail(&iavcc->txing_skb, skb);
1721             }
1722             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1723             if (++dle == iadev->tx_dle_q.end)
1724                  dle = iadev->tx_dle_q.start;
1725         }
1726         iadev->tx_dle_q.read = dle;
1727         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1728 }
1729   
1730 static int open_tx(struct atm_vcc *vcc)  
1731 {  
1732         struct ia_vcc *ia_vcc;  
1733         IADEV *iadev;  
1734         struct main_vc *vc;  
1735         struct ext_vc *evc;  
1736         int ret;
1737         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1738         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1739         iadev = INPH_IA_DEV(vcc->dev);  
1740         
1741         if (iadev->phy_type & FE_25MBIT_PHY) {
1742            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1743                printk("IA:  ABR not support\n");
1744                return -EINVAL; 
1745            }
1746           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1747                printk("IA:  CBR not support\n");
1748                return -EINVAL; 
1749           }
1750         }
1751         ia_vcc =  INPH_IA_VCC(vcc);
1752         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1753         if (vcc->qos.txtp.max_sdu > 
1754                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1755            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1756                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1757            vcc->dev_data = NULL;
1758            kfree(ia_vcc);
1759            return -EINVAL; 
1760         }
1761         ia_vcc->vc_desc_cnt = 0;
1762         ia_vcc->txing = 1;
1763
1764         /* find pcr */
1765         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1766            vcc->qos.txtp.pcr = iadev->LineRate;
1767         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1768            vcc->qos.txtp.pcr = iadev->LineRate;
1769         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1770            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1771         if (vcc->qos.txtp.pcr > iadev->LineRate)
1772              vcc->qos.txtp.pcr = iadev->LineRate;
1773         ia_vcc->pcr = vcc->qos.txtp.pcr;
1774
1775         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1776         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1777         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1778         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1779         if (ia_vcc->pcr < iadev->rate_limit)
1780            skb_queue_head_init (&ia_vcc->txing_skb);
1781         if (ia_vcc->pcr < iadev->rate_limit) {
1782            if (vcc->qos.txtp.max_sdu != 0) {
1783                if (ia_vcc->pcr > 60000)
1784                   vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1785                else if (ia_vcc->pcr > 2000)
1786                   vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1787                else
1788                  vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1789            }
1790            else
1791              vcc->sk->sk_sndbuf = 24576;
1792         }
1793            
1794         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1795         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1796         vc += vcc->vci;  
1797         evc += vcc->vci;  
1798         memset((caddr_t)vc, 0, sizeof(*vc));  
1799         memset((caddr_t)evc, 0, sizeof(*evc));  
1800           
1801         /* store the most significant 4 bits of vci as the last 4 bits   
1802                 of first part of atm header.  
1803            store the last 12 bits of vci as first 12 bits of the second  
1804                 part of the atm header.  
1805         */  
1806         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1807         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1808  
1809         /* check the following for different traffic classes */  
1810         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1811         {  
1812                 vc->type = UBR;  
1813                 vc->status = CRC_APPEND;
1814                 vc->acr = cellrate_to_float(iadev->LineRate);  
1815                 if (vcc->qos.txtp.pcr > 0) 
1816                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1817                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1818                                              vcc->qos.txtp.max_pcr,vc->acr);)
1819         }  
1820         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1821         {       srv_cls_param_t srv_p;
1822                 IF_ABR(printk("Tx ABR VCC\n");)  
1823                 init_abr_vc(iadev, &srv_p);
1824                 if (vcc->qos.txtp.pcr > 0) 
1825                    srv_p.pcr = vcc->qos.txtp.pcr;
1826                 if (vcc->qos.txtp.min_pcr > 0) {
1827                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1828                    if (tmpsum > iadev->LineRate)
1829                        return -EBUSY;
1830                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1831                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1832                 } 
1833                 else srv_p.mcr = 0;
1834                 if (vcc->qos.txtp.icr)
1835                    srv_p.icr = vcc->qos.txtp.icr;
1836                 if (vcc->qos.txtp.tbe)
1837                    srv_p.tbe = vcc->qos.txtp.tbe;
1838                 if (vcc->qos.txtp.frtt)
1839                    srv_p.frtt = vcc->qos.txtp.frtt;
1840                 if (vcc->qos.txtp.rif)
1841                    srv_p.rif = vcc->qos.txtp.rif;
1842                 if (vcc->qos.txtp.rdf)
1843                    srv_p.rdf = vcc->qos.txtp.rdf;
1844                 if (vcc->qos.txtp.nrm_pres)
1845                    srv_p.nrm = vcc->qos.txtp.nrm;
1846                 if (vcc->qos.txtp.trm_pres)
1847                    srv_p.trm = vcc->qos.txtp.trm;
1848                 if (vcc->qos.txtp.adtf_pres)
1849                    srv_p.adtf = vcc->qos.txtp.adtf;
1850                 if (vcc->qos.txtp.cdf_pres)
1851                    srv_p.cdf = vcc->qos.txtp.cdf;    
1852                 if (srv_p.icr > srv_p.pcr)
1853                    srv_p.icr = srv_p.pcr;    
1854                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1855                                                       srv_p.pcr, srv_p.mcr);)
1856                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1857         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1858                 if (iadev->phy_type & FE_25MBIT_PHY) {
1859                     printk("IA:  CBR not support\n");
1860                     return -EINVAL; 
1861                 }
1862                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1863                    IF_CBR(printk("PCR is not availble\n");)
1864                    return -1;
1865                 }
1866                 vc->type = CBR;
1867                 vc->status = CRC_APPEND;
1868                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1869                     return ret;
1870                 }
1871        } 
1872         else  
1873            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1874         
1875         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1876         IF_EVENT(printk("ia open_tx returning \n");)  
1877         return 0;  
1878 }  
1879   
1880   
1881 static int tx_init(struct atm_dev *dev)  
1882 {  
1883         IADEV *iadev;  
1884         struct tx_buf_desc *buf_desc_ptr;
1885         unsigned int tx_pkt_start;  
1886         void *dle_addr;  
1887         int i;  
1888         u_short tcq_st_adr;  
1889         u_short *tcq_start;  
1890         u_short prq_st_adr;  
1891         u_short *prq_start;  
1892         struct main_vc *vc;  
1893         struct ext_vc *evc;   
1894         u_short tmp16;
1895         u32 vcsize_sel;
1896  
1897         iadev = INPH_IA_DEV(dev);  
1898         spin_lock_init(&iadev->tx_lock);
1899  
1900         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1901                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1902
1903         /* Allocate 4k (boundary aligned) bytes */
1904         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1905                                         &iadev->tx_dle_dma);  
1906         if (!dle_addr)  {
1907                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1908                 goto err_out;
1909         }
1910         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1911         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1912         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1913         iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1914
1915         /* write the upper 20 bits of the start address to tx list address register */  
1916         writel(iadev->tx_dle_dma & 0xfffff000,
1917                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1918         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1919         writew(0, iadev->seg_reg+MODE_REG_0);  
1920         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1921         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1922         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1923         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1924   
1925         /*  
1926            Transmit side control memory map  
1927            --------------------------------    
1928          Buffer descr   0x0000 (128 - 4K)  
1929          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1930                                         (512 - 1K) each  
1931                                         TCQ - 4K, PRQ - 5K  
1932          CBR Table      0x1800 (as needed) - 6K  
1933          UBR Table      0x3000 (1K - 4K) - 12K  
1934          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1935          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1936                                 ABR Tbl - 20K, ABR Wq - 22K   
1937          extended VC    0x6000 (1K - 8K) - 24K  
1938          VC Table       0x8000 (1K - 32K) - 32K  
1939           
1940         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1941         and Wait q, which can be allotted later.  
1942         */  
1943      
1944         /* Buffer Descriptor Table Base address */  
1945         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1946   
1947         /* initialize each entry in the buffer descriptor table */  
1948         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1949         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1950         buf_desc_ptr++;  
1951         tx_pkt_start = TX_PACKET_RAM;  
1952         for(i=1; i<=iadev->num_tx_desc; i++)  
1953         {  
1954                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1955                 buf_desc_ptr->desc_mode = AAL5;  
1956                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1957                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1958                 buf_desc_ptr++;           
1959                 tx_pkt_start += iadev->tx_buf_sz;  
1960         }  
1961         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1962         if (!iadev->tx_buf) {
1963             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1964             goto err_free_dle;
1965         }
1966         for (i= 0; i< iadev->num_tx_desc; i++)
1967         {
1968             struct cpcs_trailer *cpcs;
1969  
1970             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1971             if(!cpcs) {                
1972                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1973                 goto err_free_tx_bufs;
1974             }
1975             iadev->tx_buf[i].cpcs = cpcs;
1976             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1977                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1978         }
1979         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1980                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1981         if (!iadev->desc_tbl) {
1982                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1983                 goto err_free_all_tx_bufs;
1984         }
1985   
1986         /* Communication Queues base address */  
1987         i = TX_COMP_Q * iadev->memSize;
1988         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1989   
1990         /* Transmit Complete Queue */  
1991         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1992         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1993         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1994         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1995         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1996                                               iadev->seg_reg+TCQ_ED_ADR); 
1997         /* Fill the TCQ with all the free descriptors. */  
1998         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
1999         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2000         for(i=1; i<=iadev->num_tx_desc; i++)  
2001         {  
2002                 *tcq_start = (u_short)i;  
2003                 tcq_start++;  
2004         }  
2005   
2006         /* Packet Ready Queue */  
2007         i = PKT_RDY_Q * iadev->memSize; 
2008         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2009         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2010                                               iadev->seg_reg+PRQ_ED_ADR);
2011         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2012         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2013          
2014         /* Load local copy of PRQ and TCQ ptrs */
2015         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2016         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2017         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2018
2019         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2020         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2021         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2022
2023         /* Just for safety initializing the queue to have desc 1 always */  
2024         /* Fill the PRQ with all the free descriptors. */  
2025         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2026         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2027         for(i=1; i<=iadev->num_tx_desc; i++)  
2028         {  
2029                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2030                 prq_start++;  
2031         }  
2032         /* CBR Table */  
2033         IF_INIT(printk("Start CBR Init\n");)
2034 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2035         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2036 #else /* Charlie's logic is wrong ? */
2037         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2038         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2039         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2040 #endif
2041
2042         IF_INIT(printk("value in register = 0x%x\n",
2043                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2044         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2045         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2046         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2047                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2048         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2049         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2050         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2051         IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2052                (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2053         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2054           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2055           readw(iadev->seg_reg+CBR_TAB_END+1));)
2056         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
2057
2058         /* Initialize the CBR Schedualing Table */
2059         memset((caddr_t)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize), 
2060                                                           0, iadev->num_vc*6); 
2061         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2062         iadev->CbrEntryPt = 0;
2063         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2064         iadev->NumEnabledCBR = 0;
2065
2066         /* UBR scheduling Table and wait queue */  
2067         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2068                 - SCHEDSZ is 1K (# of entries).  
2069                 - UBR Table size is 4K  
2070                 - UBR wait queue is 4K  
2071            since the table and wait queues are contiguous, all the bytes   
2072            can be initialized by one memeset.  
2073         */  
2074         
2075         vcsize_sel = 0;
2076         i = 8*1024;
2077         while (i != iadev->num_vc) {
2078           i /= 2;
2079           vcsize_sel++;
2080         }
2081  
2082         i = MAIN_VC_TABLE * iadev->memSize;
2083         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2084         i =  EXT_VC_TABLE * iadev->memSize;
2085         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2086         i = UBR_SCHED_TABLE * iadev->memSize;
2087         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2088         i = UBR_WAIT_Q * iadev->memSize; 
2089         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2090         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2091                                                        0, iadev->num_vc*8);
2092         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2093         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2094                 - SCHEDSZ is 1K (# of entries).  
2095                 - ABR Table size is 2K  
2096                 - ABR wait queue is 2K  
2097            since the table and wait queues are contiguous, all the bytes   
2098            can be intialized by one memeset.  
2099         */  
2100         i = ABR_SCHED_TABLE * iadev->memSize;
2101         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2102         i = ABR_WAIT_Q * iadev->memSize;
2103         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2104  
2105         i = ABR_SCHED_TABLE*iadev->memSize;
2106         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2107         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2108         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2109         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2110         if (!iadev->testTable) {
2111            printk("Get freepage  failed\n");
2112            goto err_free_desc_tbl;
2113         }
2114         for(i=0; i<iadev->num_vc; i++)  
2115         {  
2116                 memset((caddr_t)vc, 0, sizeof(*vc));  
2117                 memset((caddr_t)evc, 0, sizeof(*evc));  
2118                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2119                                                 GFP_KERNEL);
2120                 if (!iadev->testTable[i])
2121                         goto err_free_test_tables;
2122                 iadev->testTable[i]->lastTime = 0;
2123                 iadev->testTable[i]->fract = 0;
2124                 iadev->testTable[i]->vc_status = VC_UBR;
2125                 vc++;  
2126                 evc++;  
2127         }  
2128   
2129         /* Other Initialization */  
2130           
2131         /* Max Rate Register */  
2132         if (iadev->phy_type & FE_25MBIT_PHY) {
2133            writew(RATE25, iadev->seg_reg+MAXRATE);  
2134            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2135         }
2136         else {
2137            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2138            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2139         }
2140         /* Set Idle Header Reigisters to be sure */  
2141         writew(0, iadev->seg_reg+IDLEHEADHI);  
2142         writew(0, iadev->seg_reg+IDLEHEADLO);  
2143   
2144         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2145         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2146
2147         iadev->close_pending = 0;
2148         init_waitqueue_head(&iadev->close_wait);
2149         init_waitqueue_head(&iadev->timeout_wait);
2150         skb_queue_head_init(&iadev->tx_dma_q);  
2151         ia_init_rtn_q(&iadev->tx_return_q);  
2152
2153         /* RM Cell Protocol ID and Message Type */  
2154         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2155         skb_queue_head_init (&iadev->tx_backlog);
2156   
2157         /* Mode Register 1 */  
2158         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2159   
2160         /* Mode Register 0 */  
2161         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2162   
2163         /* Interrupt Status Register - read to clear */  
2164         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2165   
2166         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2167         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2168         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2169         iadev->tx_pkt_cnt = 0;
2170         iadev->rate_limit = iadev->LineRate / 3;
2171   
2172         return 0;
2173
2174 err_free_test_tables:
2175         while (--i >= 0)
2176                 kfree(iadev->testTable[i]);
2177         kfree(iadev->testTable);
2178 err_free_desc_tbl:
2179         kfree(iadev->desc_tbl);
2180 err_free_all_tx_bufs:
2181         i = iadev->num_tx_desc;
2182 err_free_tx_bufs:
2183         while (--i >= 0) {
2184                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2185
2186                 pci_unmap_single(iadev->pci, desc->dma_addr,
2187                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2188                 kfree(desc->cpcs);
2189         }
2190         kfree(iadev->tx_buf);
2191 err_free_dle:
2192         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2193                             iadev->tx_dle_dma);  
2194 err_out:
2195         return -ENOMEM;
2196 }   
2197    
2198 static irqreturn_t ia_int(int irq, void *dev_id, struct pt_regs *regs)  
2199 {  
2200    struct atm_dev *dev;  
2201    IADEV *iadev;  
2202    unsigned int status;  
2203    int handled = 0;
2204
2205    dev = dev_id;  
2206    iadev = INPH_IA_DEV(dev);  
2207    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2208    { 
2209         handled = 1;
2210         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2211         if (status & STAT_REASSINT)  
2212         {  
2213            /* do something */  
2214            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2215            rx_intr(dev);  
2216         }  
2217         if (status & STAT_DLERINT)  
2218         {  
2219            /* Clear this bit by writing a 1 to it. */  
2220            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2221            rx_dle_intr(dev);  
2222         }  
2223         if (status & STAT_SEGINT)  
2224         {  
2225            /* do something */ 
2226            IF_EVENT(printk("IA: tx_intr \n");) 
2227            tx_intr(dev);  
2228         }  
2229         if (status & STAT_DLETINT)  
2230         {  
2231            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2232            tx_dle_intr(dev);  
2233         }  
2234         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2235         {  
2236            if (status & STAT_FEINT) 
2237                IaFrontEndIntr(iadev);
2238         }  
2239    }
2240    return IRQ_RETVAL(handled);
2241 }  
2242           
2243           
2244           
2245 /*----------------------------- entries --------------------------------*/  
2246 static int get_esi(struct atm_dev *dev)  
2247 {  
2248         IADEV *iadev;  
2249         int i;  
2250         u32 mac1;  
2251         u16 mac2;  
2252           
2253         iadev = INPH_IA_DEV(dev);  
2254         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2255                                 iadev->reg+IPHASE5575_MAC1)));  
2256         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2257         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2258         for (i=0; i<MAC1_LEN; i++)  
2259                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2260           
2261         for (i=0; i<MAC2_LEN; i++)  
2262                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2263         return 0;  
2264 }  
2265           
2266 static int reset_sar(struct atm_dev *dev)  
2267 {  
2268         IADEV *iadev;  
2269         int i, error = 1;  
2270         unsigned int pci[64];  
2271           
2272         iadev = INPH_IA_DEV(dev);  
2273         for(i=0; i<64; i++)  
2274           if ((error = pci_read_config_dword(iadev->pci,  
2275                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2276               return error;  
2277         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2278         for(i=0; i<64; i++)  
2279           if ((error = pci_write_config_dword(iadev->pci,  
2280                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2281             return error;  
2282         udelay(5);  
2283         return 0;  
2284 }  
2285           
2286           
2287 static int __init ia_init(struct atm_dev *dev)
2288 {  
2289         IADEV *iadev;  
2290         unsigned long real_base, base;  
2291         unsigned short command;  
2292         unsigned char revision;  
2293         int error, i; 
2294           
2295         /* The device has been identified and registered. Now we read   
2296            necessary configuration info like memory base address,   
2297            interrupt number etc */  
2298           
2299         IF_INIT(printk(">ia_init\n");)  
2300         dev->ci_range.vpi_bits = 0;  
2301         dev->ci_range.vci_bits = NR_VCI_LD;  
2302
2303         iadev = INPH_IA_DEV(dev);  
2304         real_base = pci_resource_start (iadev->pci, 0);
2305         iadev->irq = iadev->pci->irq;
2306                   
2307         if ((error = pci_read_config_word(iadev->pci, PCI_COMMAND,&command))   
2308                     || (error = pci_read_config_byte(iadev->pci,   
2309                                 PCI_REVISION_ID,&revision)))   
2310         {  
2311                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2312                                 dev->number,error);  
2313                 return -EINVAL;  
2314         }  
2315         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2316                         dev->number, revision, real_base, iadev->irq);)  
2317           
2318         /* find mapping size of board */  
2319           
2320         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2321
2322         if (iadev->pci_map_size == 0x100000){
2323           iadev->num_vc = 4096;
2324           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2325           iadev->memSize = 4;
2326         }
2327         else if (iadev->pci_map_size == 0x40000) {
2328           iadev->num_vc = 1024;
2329           iadev->memSize = 1;
2330         }
2331         else {
2332            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2333            return -EINVAL;
2334         }
2335         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2336           
2337         /* enable bus mastering */
2338         pci_set_master(iadev->pci);
2339
2340         /*  
2341          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2342          */  
2343         udelay(10);  
2344           
2345         /* mapping the physical address to a virtual address in address space */  
2346         base=(unsigned long)ioremap((unsigned long)real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2347           
2348         if (!base)  
2349         {  
2350                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2351                             dev->number);  
2352                 return error;  
2353         }  
2354         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=0x%lx,irq=%d\n",  
2355                         dev->number, revision, base, iadev->irq);)  
2356           
2357         /* filling the iphase dev structure */  
2358         iadev->mem = iadev->pci_map_size /2;  
2359         iadev->base_diff = real_base - base;  
2360         iadev->real_base = real_base;  
2361         iadev->base = base;  
2362                   
2363         /* Bus Interface Control Registers */  
2364         iadev->reg = (u32 *) (base + REG_BASE);  
2365         /* Segmentation Control Registers */  
2366         iadev->seg_reg = (u32 *) (base + SEG_BASE);  
2367         /* Reassembly Control Registers */  
2368         iadev->reass_reg = (u32 *) (base + REASS_BASE);  
2369         /* Front end/ DMA control registers */  
2370         iadev->phy = (u32 *) (base + PHY_BASE);  
2371         iadev->dma = (u32 *) (base + PHY_BASE);  
2372         /* RAM - Segmentation RAm and Reassembly RAM */  
2373         iadev->ram = (u32 *) (base + ACTUAL_RAM_BASE);  
2374         iadev->seg_ram =  (base + ACTUAL_SEG_RAM_BASE);  
2375         iadev->reass_ram = (base + ACTUAL_REASS_RAM_BASE);  
2376   
2377         /* lets print out the above */  
2378         IF_INIT(printk("Base addrs: %08x %08x %08x \n %08x %08x %08x %08x\n", 
2379           (u32)iadev->reg,(u32)iadev->seg_reg,(u32)iadev->reass_reg, 
2380           (u32)iadev->phy, (u32)iadev->ram, (u32)iadev->seg_ram, 
2381           (u32)iadev->reass_ram);) 
2382           
2383         /* lets try reading the MAC address */  
2384         error = get_esi(dev);  
2385         if (error) {
2386           iounmap((void *) iadev->base);
2387           return error;  
2388         }
2389         printk("IA: ");
2390         for (i=0; i < ESI_LEN; i++)  
2391                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2392         printk("\n");  
2393   
2394         /* reset SAR */  
2395         if (reset_sar(dev)) {
2396            iounmap((void *) iadev->base);
2397            printk("IA: reset SAR fail, please try again\n");
2398            return 1;
2399         }
2400         return 0;  
2401 }  
2402
2403 static void ia_update_stats(IADEV *iadev) {
2404     if (!iadev->carrier_detect)
2405         return;
2406     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2407     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2408     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2409     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2410     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2411     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2412     return;
2413 }
2414   
2415 static void ia_led_timer(unsigned long arg) {
2416         unsigned long flags;
2417         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2418         u_char i;
2419         static u32 ctrl_reg; 
2420         for (i = 0; i < iadev_count; i++) {
2421            if (ia_dev[i]) {
2422               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2423               if (blinking[i] == 0) {
2424                  blinking[i]++;
2425                  ctrl_reg &= (~CTRL_LED);
2426                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2427                  ia_update_stats(ia_dev[i]);
2428               }
2429               else {
2430                  blinking[i] = 0;
2431                  ctrl_reg |= CTRL_LED;
2432                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2433                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2434                  if (ia_dev[i]->close_pending)  
2435                     wake_up(&ia_dev[i]->close_wait);
2436                  ia_tx_poll(ia_dev[i]);
2437                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2438               }
2439            }
2440         }
2441         mod_timer(&ia_timer, jiffies + HZ / 4);
2442         return;
2443 }
2444
2445 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2446         unsigned long addr)  
2447 {  
2448         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2449 }  
2450   
2451 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2452 {  
2453         return readl(INPH_IA_DEV(dev)->phy+addr);  
2454 }  
2455
2456 static void ia_free_tx(IADEV *iadev)
2457 {
2458         int i;
2459
2460         kfree(iadev->desc_tbl);
2461         for (i = 0; i < iadev->num_vc; i++)
2462                 kfree(iadev->testTable[i]);
2463         kfree(iadev->testTable);
2464         for (i = 0; i < iadev->num_tx_desc; i++) {
2465                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2466
2467                 pci_unmap_single(iadev->pci, desc->dma_addr,
2468                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2469                 kfree(desc->cpcs);
2470         }
2471         kfree(iadev->tx_buf);
2472         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2473                             iadev->tx_dle_dma);  
2474 }
2475
2476 static void ia_free_rx(IADEV *iadev)
2477 {
2478         kfree(iadev->rx_open);
2479         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2480                           iadev->rx_dle_dma);  
2481 }
2482
2483 static int __init ia_start(struct atm_dev *dev)
2484 {  
2485         IADEV *iadev;  
2486         int error;  
2487         unsigned char phy;  
2488         u32 ctrl_reg;  
2489         IF_EVENT(printk(">ia_start\n");)  
2490         iadev = INPH_IA_DEV(dev);  
2491         if (request_irq(iadev->irq, &ia_int, SA_SHIRQ, DEV_LABEL, dev)) {  
2492                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2493                     dev->number, iadev->irq);  
2494                 error = -EAGAIN;
2495                 goto err_out;
2496         }  
2497         /* @@@ should release IRQ on error */  
2498         /* enabling memory + master */  
2499         if ((error = pci_write_config_word(iadev->pci,   
2500                                 PCI_COMMAND,   
2501                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2502         {  
2503                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2504                     "master (0x%x)\n",dev->number, error);  
2505                 error = -EIO;  
2506                 goto err_free_irq;
2507         }  
2508         udelay(10);  
2509   
2510         /* Maybe we should reset the front end, initialize Bus Interface Control   
2511                 Registers and see. */  
2512   
2513         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2514                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2515         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2516         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2517                         | CTRL_B8  
2518                         | CTRL_B16  
2519                         | CTRL_B32  
2520                         | CTRL_B48  
2521                         | CTRL_B64  
2522                         | CTRL_B128  
2523                         | CTRL_ERRMASK  
2524                         | CTRL_DLETMASK         /* shud be removed l8r */  
2525                         | CTRL_DLERMASK  
2526                         | CTRL_SEGMASK  
2527                         | CTRL_REASSMASK          
2528                         | CTRL_FEMASK  
2529                         | CTRL_CSPREEMPT;  
2530   
2531        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2532   
2533         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2534                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2535            printk("Bus status reg after init: %08x\n", 
2536                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2537     
2538         ia_hw_type(iadev); 
2539         error = tx_init(dev);  
2540         if (error)
2541                 goto err_free_irq;
2542         error = rx_init(dev);  
2543         if (error)
2544                 goto err_free_tx;
2545   
2546         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2547         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2548         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2549                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2550         phy = 0; /* resolve compiler complaint */
2551         IF_INIT ( 
2552         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2553                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2554         else  
2555                 printk("IA: utopia,rev.%0x\n",phy);) 
2556
2557         if (iadev->phy_type &  FE_25MBIT_PHY)
2558            ia_mb25_init(iadev);
2559         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2560            ia_suni_pm7345_init(iadev);
2561         else {
2562                 error = suni_init(dev);
2563                 if (error)
2564                         goto err_free_rx;
2565                 /* 
2566                  * Enable interrupt on loss of signal
2567                  * SUNI_RSOP_CIE - 0x10
2568                  * SUNI_RSOP_CIE_LOSE - 0x04
2569                  */
2570                 ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2571 #ifndef MODULE
2572                 error = dev->phy->start(dev);
2573                 if (error)
2574                         goto err_free_rx;
2575 #endif
2576                 /* Get iadev->carrier_detect status */
2577                 IaFrontEndIntr(iadev);
2578         }
2579         return 0;
2580
2581 err_free_rx:
2582         ia_free_rx(iadev);
2583 err_free_tx:
2584         ia_free_tx(iadev);
2585 err_free_irq:
2586         free_irq(iadev->irq, dev);  
2587 err_out:
2588         return error;
2589 }  
2590   
2591 static void ia_close(struct atm_vcc *vcc)  
2592 {  
2593         u16 *vc_table;
2594         IADEV *iadev;
2595         struct ia_vcc *ia_vcc;
2596         struct sk_buff *skb = NULL;
2597         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2598         unsigned long closetime, flags;
2599         int ctimeout;
2600
2601         iadev = INPH_IA_DEV(vcc->dev);
2602         ia_vcc = INPH_IA_VCC(vcc);
2603         if (!ia_vcc) return;  
2604
2605         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2606                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2607         clear_bit(ATM_VF_READY,&vcc->flags);
2608         skb_queue_head_init (&tmp_tx_backlog);
2609         skb_queue_head_init (&tmp_vcc_backlog); 
2610         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2611            iadev->close_pending++;
2612            sleep_on_timeout(&iadev->timeout_wait, 50);
2613            spin_lock_irqsave(&iadev->tx_lock, flags); 
2614            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2615               if (ATM_SKB(skb)->vcc == vcc){ 
2616                  if (vcc->pop) vcc->pop(vcc, skb);
2617                  else dev_kfree_skb_any(skb);
2618               }
2619               else 
2620                  skb_queue_tail(&tmp_tx_backlog, skb);
2621            } 
2622            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2623              skb_queue_tail(&iadev->tx_backlog, skb);
2624            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2625            closetime = jiffies;
2626            ctimeout = 300000 / ia_vcc->pcr;
2627            if (ctimeout == 0)
2628               ctimeout = 1;
2629            while (ia_vcc->vc_desc_cnt > 0){
2630               if ((jiffies - closetime) >= ctimeout) 
2631                  break;
2632               spin_unlock_irqrestore(&iadev->tx_lock, flags);
2633               sleep_on(&iadev->close_wait);
2634               spin_lock_irqsave(&iadev->tx_lock, flags);
2635            }    
2636            iadev->close_pending--;
2637            iadev->testTable[vcc->vci]->lastTime = 0;
2638            iadev->testTable[vcc->vci]->fract = 0; 
2639            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2640            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2641               if (vcc->qos.txtp.min_pcr > 0)
2642                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2643            }
2644            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2645               ia_vcc = INPH_IA_VCC(vcc); 
2646               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2647               ia_cbrVc_close (vcc);
2648            }
2649            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2650         }
2651         
2652         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2653            // reset reass table
2654            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2655            vc_table += vcc->vci; 
2656            *vc_table = NO_AAL5_PKT;
2657            // reset vc table
2658            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2659            vc_table += vcc->vci;
2660            *vc_table = (vcc->vci << 6) | 15;
2661            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2662               struct abr_vc_table *abr_vc_table = (struct abr_vc_table *)
2663                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2664               abr_vc_table +=  vcc->vci;
2665               abr_vc_table->rdf = 0x0003;
2666               abr_vc_table->air = 0x5eb1;
2667            }                                 
2668            // Drain the packets
2669            rx_dle_intr(vcc->dev); 
2670            iadev->rx_open[vcc->vci] = 0;
2671         }
2672         kfree(INPH_IA_VCC(vcc));  
2673         ia_vcc = NULL;
2674         vcc->dev_data = NULL;
2675         clear_bit(ATM_VF_ADDR,&vcc->flags);
2676         return;        
2677 }  
2678   
2679 static int ia_open(struct atm_vcc *vcc)
2680 {  
2681         IADEV *iadev;  
2682         struct ia_vcc *ia_vcc;  
2683         int error;  
2684         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2685         {  
2686                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2687                 vcc->dev_data = NULL;
2688         }  
2689         iadev = INPH_IA_DEV(vcc->dev);  
2690         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2691         {  
2692                 IF_EVENT(printk("iphase open: unspec part\n");)  
2693                 set_bit(ATM_VF_ADDR,&vcc->flags);
2694         }  
2695         if (vcc->qos.aal != ATM_AAL5)  
2696                 return -EINVAL;  
2697         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2698                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2699   
2700         /* Device dependent initialization */  
2701         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2702         if (!ia_vcc) return -ENOMEM;  
2703         vcc->dev_data = ia_vcc;
2704   
2705         if ((error = open_rx(vcc)))  
2706         {  
2707                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2708                 ia_close(vcc);  
2709                 return error;  
2710         }  
2711   
2712         if ((error = open_tx(vcc)))  
2713         {  
2714                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2715                 ia_close(vcc);  
2716                 return error;  
2717         }  
2718   
2719         set_bit(ATM_VF_READY,&vcc->flags);
2720
2721 #if 0
2722         {
2723            static u8 first = 1; 
2724            if (first) {
2725               ia_timer.expires = jiffies + 3*HZ;
2726               add_timer(&ia_timer);
2727               first = 0;
2728            }           
2729         }
2730 #endif
2731         IF_EVENT(printk("ia open returning\n");)  
2732         return 0;  
2733 }  
2734   
2735 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2736 {  
2737         IF_EVENT(printk(">ia_change_qos\n");)  
2738         return 0;  
2739 }  
2740   
2741 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2742 {  
2743    IA_CMDBUF ia_cmds;
2744    IADEV *iadev;
2745    int i, board;
2746    u16 __user *tmps;
2747    IF_EVENT(printk(">ia_ioctl\n");)  
2748    if (cmd != IA_CMD) {
2749       if (!dev->phy->ioctl) return -EINVAL;
2750       return dev->phy->ioctl(dev,cmd,arg);
2751    }
2752    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2753    board = ia_cmds.status;
2754    if ((board < 0) || (board > iadev_count))
2755          board = 0;    
2756    iadev = ia_dev[board];
2757    switch (ia_cmds.cmd) {
2758    case MEMDUMP:
2759    {
2760         switch (ia_cmds.sub_cmd) {
2761           case MEMDUMP_DEV:     
2762              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2763              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2764                 return -EFAULT;
2765              ia_cmds.status = 0;
2766              break;
2767           case MEMDUMP_SEGREG:
2768              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2769              tmps = (u16 __user *)ia_cmds.buf;
2770              for(i=0; i<0x80; i+=2, tmps++)
2771                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2772              ia_cmds.status = 0;
2773              ia_cmds.len = 0x80;
2774              break;
2775           case MEMDUMP_REASSREG:
2776              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2777              tmps = (u16 __user *)ia_cmds.buf;
2778              for(i=0; i<0x80; i+=2, tmps++)
2779                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2780              ia_cmds.status = 0;
2781              ia_cmds.len = 0x80;
2782              break;
2783           case MEMDUMP_FFL:
2784           {  
2785              ia_regs_t       *regs_local;
2786              ffredn_t        *ffL;
2787              rfredn_t        *rfL;
2788                      
2789              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2790              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2791              if (!regs_local) return -ENOMEM;
2792              ffL = &regs_local->ffredn;
2793              rfL = &regs_local->rfredn;
2794              /* Copy real rfred registers into the local copy */
2795              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2796                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2797                 /* Copy real ffred registers into the local copy */
2798              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2799                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2800
2801              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2802                 kfree(regs_local);
2803                 return -EFAULT;
2804              }
2805              kfree(regs_local);
2806              printk("Board %d registers dumped\n", board);
2807              ia_cmds.status = 0;                  
2808          }      
2809              break;        
2810          case READ_REG:
2811          {  
2812              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2813              desc_dbg(iadev); 
2814              ia_cmds.status = 0; 
2815          }
2816              break;
2817          case 0x6:
2818          {  
2819              ia_cmds.status = 0; 
2820              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2821              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2822          }
2823              break;
2824          case 0x8:
2825          {
2826              struct k_sonet_stats *stats;
2827              stats = &PRIV(_ia_dev[board])->sonet_stats;
2828              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2829              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2830              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2831              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2832              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2833              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2834              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2835              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2836              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2837          }
2838             ia_cmds.status = 0;
2839             break;
2840          case 0x9:
2841             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2842             for (i = 1; i <= iadev->num_rx_desc; i++)
2843                free_desc(_ia_dev[board], i);
2844             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2845                                             iadev->reass_reg+REASS_MASK_REG);
2846             iadev->rxing = 1;
2847             
2848             ia_cmds.status = 0;
2849             break;
2850
2851          case 0xb:
2852             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2853             IaFrontEndIntr(iadev);
2854             break;
2855          case 0xa:
2856             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2857          {  
2858              ia_cmds.status = 0; 
2859              IADebugFlag = ia_cmds.maddr;
2860              printk("New debug option loaded\n");
2861          }
2862              break;
2863          default:
2864              ia_cmds.status = 0;
2865              break;
2866       } 
2867    }
2868       break;
2869    default:
2870       break;
2871
2872    }    
2873    return 0;  
2874 }  
2875   
2876 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2877         void __user *optval, int optlen)  
2878 {  
2879         IF_EVENT(printk(">ia_getsockopt\n");)  
2880         return -EINVAL;  
2881 }  
2882   
2883 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2884         void __user *optval, int optlen)  
2885 {  
2886         IF_EVENT(printk(">ia_setsockopt\n");)  
2887         return -EINVAL;  
2888 }  
2889   
2890 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2891         IADEV *iadev;
2892         struct dle *wr_ptr;
2893         struct tx_buf_desc *buf_desc_ptr;
2894         int desc;
2895         int comp_code;
2896         int total_len;
2897         struct cpcs_trailer *trailer;
2898         struct ia_vcc *iavcc;
2899
2900         iadev = INPH_IA_DEV(vcc->dev);  
2901         iavcc = INPH_IA_VCC(vcc);
2902         if (!iavcc->txing) {
2903            printk("discard packet on closed VC\n");
2904            if (vcc->pop)
2905                 vcc->pop(vcc, skb);
2906            else
2907                 dev_kfree_skb_any(skb);
2908            return 0;
2909         }
2910
2911         if (skb->len > iadev->tx_buf_sz - 8) {
2912            printk("Transmit size over tx buffer size\n");
2913            if (vcc->pop)
2914                  vcc->pop(vcc, skb);
2915            else
2916                  dev_kfree_skb_any(skb);
2917           return 0;
2918         }
2919         if ((u32)skb->data & 3) {
2920            printk("Misaligned SKB\n");
2921            if (vcc->pop)
2922                  vcc->pop(vcc, skb);
2923            else
2924                  dev_kfree_skb_any(skb);
2925            return 0;
2926         }       
2927         /* Get a descriptor number from our free descriptor queue  
2928            We get the descr number from the TCQ now, since I am using  
2929            the TCQ as a free buffer queue. Initially TCQ will be   
2930            initialized with all the descriptors and is hence, full.  
2931         */
2932         desc = get_desc (iadev, iavcc);
2933         if (desc == 0xffff) 
2934             return 1;
2935         comp_code = desc >> 13;  
2936         desc &= 0x1fff;  
2937   
2938         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2939         {  
2940                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2941                 atomic_inc(&vcc->stats->tx);
2942                 if (vcc->pop)   
2943                     vcc->pop(vcc, skb);   
2944                 else  
2945                     dev_kfree_skb_any(skb);
2946                 return 0;   /* return SUCCESS */
2947         }  
2948   
2949         if (comp_code)  
2950         {  
2951             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2952                                                             desc, comp_code);)  
2953         }  
2954        
2955         /* remember the desc and vcc mapping */
2956         iavcc->vc_desc_cnt++;
2957         iadev->desc_tbl[desc-1].iavcc = iavcc;
2958         iadev->desc_tbl[desc-1].txskb = skb;
2959         IA_SKB_STATE(skb) = 0;
2960
2961         iadev->ffL.tcq_rd += 2;
2962         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2963                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2964         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2965   
2966         /* Put the descriptor number in the packet ready queue  
2967                 and put the updated write pointer in the DLE field   
2968         */   
2969         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2970
2971         iadev->ffL.prq_wr += 2;
2972         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2973                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2974           
2975         /* Figure out the exact length of the packet and padding required to 
2976            make it  aligned on a 48 byte boundary.  */
2977         total_len = skb->len + sizeof(struct cpcs_trailer);  
2978         total_len = ((total_len + 47) / 48) * 48;
2979         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2980  
2981         /* Put the packet in a tx buffer */   
2982         trailer = iadev->tx_buf[desc-1].cpcs;
2983         IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2984                   (u32)skb, (u32)skb->data, skb->len, desc);)
2985         trailer->control = 0; 
2986         /*big endian*/ 
2987         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2988         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2989
2990         /* Display the packet */  
2991         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2992                                                         skb->len, tcnter++);  
2993         xdump(skb->data, skb->len, "TX: ");
2994         printk("\n");)
2995
2996         /* Build the buffer descriptor */  
2997         buf_desc_ptr = (struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
2998         buf_desc_ptr += desc;   /* points to the corresponding entry */  
2999         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
3000         /* Huh ? p.115 of users guide describes this as a read-only register */
3001         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3002         buf_desc_ptr->vc_index = vcc->vci;
3003         buf_desc_ptr->bytes = total_len;  
3004
3005         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3006            clear_lockup (vcc, iadev);
3007
3008         /* Build the DLE structure */  
3009         wr_ptr = iadev->tx_dle_q.write;  
3010         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3011         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3012                 skb->len, PCI_DMA_TODEVICE);
3013         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3014                                                   buf_desc_ptr->buf_start_lo;  
3015         /* wr_ptr->bytes = swap(total_len);     didn't seem to affect ?? */  
3016         wr_ptr->bytes = skb->len;  
3017
3018         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3019         if ((wr_ptr->bytes >> 2) == 0xb)
3020            wr_ptr->bytes = 0x30;
3021
3022         wr_ptr->mode = TX_DLE_PSI; 
3023         wr_ptr->prq_wr_ptr_data = 0;
3024   
3025         /* end is not to be used for the DLE q */  
3026         if (++wr_ptr == iadev->tx_dle_q.end)  
3027                 wr_ptr = iadev->tx_dle_q.start;  
3028         
3029         /* Build trailer dle */
3030         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3031         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3032           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3033
3034         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3035         wr_ptr->mode = DMA_INT_ENABLE; 
3036         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3037         
3038         /* end is not to be used for the DLE q */
3039         if (++wr_ptr == iadev->tx_dle_q.end)  
3040                 wr_ptr = iadev->tx_dle_q.start;
3041
3042         iadev->tx_dle_q.write = wr_ptr;  
3043         ATM_DESC(skb) = vcc->vci;
3044         skb_queue_tail(&iadev->tx_dma_q, skb);
3045
3046         atomic_inc(&vcc->stats->tx);
3047         iadev->tx_pkt_cnt++;
3048         /* Increment transaction counter */  
3049         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3050         
3051 #if 0        
3052         /* add flow control logic */ 
3053         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3054           if (iavcc->vc_desc_cnt > 10) {
3055              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3056             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3057               iavcc->flow_inc = -1;
3058               iavcc->saved_tx_quota = vcc->tx_quota;
3059            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3060              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3061              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3062               iavcc->flow_inc = 0;
3063            }
3064         }
3065 #endif
3066         IF_TX(printk("ia send done\n");)  
3067         return 0;  
3068 }  
3069
3070 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3071 {
3072         IADEV *iadev; 
3073         struct ia_vcc *iavcc;
3074         unsigned long flags;
3075
3076         iadev = INPH_IA_DEV(vcc->dev);
3077         iavcc = INPH_IA_VCC(vcc); 
3078         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3079         {
3080             if (!skb)
3081                 printk(KERN_CRIT "null skb in ia_send\n");
3082             else dev_kfree_skb_any(skb);
3083             return -EINVAL;
3084         }                         
3085         spin_lock_irqsave(&iadev->tx_lock, flags); 
3086         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3087             dev_kfree_skb_any(skb);
3088             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3089             return -EINVAL; 
3090         }
3091         ATM_SKB(skb)->vcc = vcc;
3092  
3093         if (skb_peek(&iadev->tx_backlog)) {
3094            skb_queue_tail(&iadev->tx_backlog, skb);
3095         }
3096         else {
3097            if (ia_pkt_tx (vcc, skb)) {
3098               skb_queue_tail(&iadev->tx_backlog, skb);
3099            }
3100         }
3101         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3102         return 0;
3103
3104 }
3105
3106 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3107
3108   int   left = *pos, n;   
3109   char  *tmpPtr;
3110   IADEV *iadev = INPH_IA_DEV(dev);
3111   if(!left--) {
3112      if (iadev->phy_type == FE_25MBIT_PHY) {
3113        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3114        return n;
3115      }
3116      if (iadev->phy_type == FE_DS3_PHY)
3117         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3118      else if (iadev->phy_type == FE_E3_PHY)
3119         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3120      else if (iadev->phy_type == FE_UTP_OPTION)
3121          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3122      else
3123         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3124      tmpPtr = page + n;
3125      if (iadev->pci_map_size == 0x40000)
3126         n += sprintf(tmpPtr, "-1KVC-");
3127      else
3128         n += sprintf(tmpPtr, "-4KVC-");  
3129      tmpPtr = page + n; 
3130      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3131         n += sprintf(tmpPtr, "1M  \n");
3132      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3133         n += sprintf(tmpPtr, "512K\n");
3134      else
3135        n += sprintf(tmpPtr, "128K\n");
3136      return n;
3137   }
3138   if (!left) {
3139      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3140                            "  Size of Tx Buffer  :  %u\n"
3141                            "  Number of Rx Buffer:  %u\n"
3142                            "  Size of Rx Buffer  :  %u\n"
3143                            "  Packets Receiverd  :  %u\n"
3144                            "  Packets Transmitted:  %u\n"
3145                            "  Cells Received     :  %u\n"
3146                            "  Cells Transmitted  :  %u\n"
3147                            "  Board Dropped Cells:  %u\n"
3148                            "  Board Dropped Pkts :  %u\n",
3149                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3150                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3151                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3152                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3153                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3154   }
3155   return 0;
3156 }
3157   
3158 static const struct atmdev_ops ops = {  
3159         .open           = ia_open,  
3160         .close          = ia_close,  
3161         .ioctl          = ia_ioctl,  
3162         .getsockopt     = ia_getsockopt,  
3163         .setsockopt     = ia_setsockopt,  
3164         .send           = ia_send,  
3165         .phy_put        = ia_phy_put,  
3166         .phy_get        = ia_phy_get,  
3167         .change_qos     = ia_change_qos,  
3168         .proc_read      = ia_proc_read,
3169         .owner          = THIS_MODULE,
3170 };  
3171           
3172 static int __devinit ia_init_one(struct pci_dev *pdev,
3173                                  const struct pci_device_id *ent)
3174 {  
3175         struct atm_dev *dev;  
3176         IADEV *iadev;  
3177         unsigned long flags;
3178         int ret;
3179
3180         iadev = kmalloc(sizeof(*iadev), GFP_KERNEL); 
3181         if (!iadev) {
3182                 ret = -ENOMEM;
3183                 goto err_out;
3184         }
3185         memset(iadev, 0, sizeof(*iadev));
3186         iadev->pci = pdev;
3187
3188         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3189                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3190         if (pci_enable_device(pdev)) {
3191                 ret = -ENODEV;
3192                 goto err_out_free_iadev;
3193         }
3194         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3195         if (!dev) {
3196                 ret = -ENOMEM;
3197                 goto err_out_disable_dev;
3198         }
3199         dev->dev_data = iadev;
3200         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3201         IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3202                 iadev->LineRate);)
3203
3204         ia_dev[iadev_count] = iadev;
3205         _ia_dev[iadev_count] = dev;
3206         iadev_count++;
3207         spin_lock_init(&iadev->misc_lock);
3208         /* First fixes first. I don't want to think about this now. */
3209         spin_lock_irqsave(&iadev->misc_lock, flags); 
3210         if (ia_init(dev) || ia_start(dev)) {  
3211                 IF_INIT(printk("IA register failed!\n");)
3212                 iadev_count--;
3213                 ia_dev[iadev_count] = NULL;
3214                 _ia_dev[iadev_count] = NULL;
3215                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3216                 ret = -EINVAL;
3217                 goto err_out_deregister_dev;
3218         }
3219         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3220         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3221
3222         iadev->next_board = ia_boards;  
3223         ia_boards = dev;  
3224
3225         pci_set_drvdata(pdev, dev);
3226
3227         return 0;
3228
3229 err_out_deregister_dev:
3230         atm_dev_deregister(dev);  
3231 err_out_disable_dev:
3232         pci_disable_device(pdev);
3233 err_out_free_iadev:
3234         kfree(iadev);
3235 err_out:
3236         return ret;
3237 }
3238
3239 static void __devexit ia_remove_one(struct pci_dev *pdev)
3240 {
3241         struct atm_dev *dev = pci_get_drvdata(pdev);
3242         IADEV *iadev = INPH_IA_DEV(dev);
3243
3244         ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10); 
3245         udelay(1);
3246
3247         /* De-register device */  
3248         free_irq(iadev->irq, dev);
3249         iadev_count--;
3250         ia_dev[iadev_count] = NULL;
3251         _ia_dev[iadev_count] = NULL;
3252         atm_dev_deregister(dev);
3253         IF_EVENT(printk("iav deregistered at (itf:%d)\n", dev->number);)
3254
3255         iounmap((void *) iadev->base);  
3256         pci_disable_device(pdev);
3257
3258         ia_free_rx(iadev);
3259         ia_free_tx(iadev);
3260
3261         kfree(iadev);
3262 }
3263
3264 static struct pci_device_id ia_pci_tbl[] = {
3265         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3266         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3267         { 0,}
3268 };
3269 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3270
3271 static struct pci_driver ia_driver = {
3272         .name =         DEV_LABEL,
3273         .id_table =     ia_pci_tbl,
3274         .probe =        ia_init_one,
3275         .remove =       __devexit_p(ia_remove_one),
3276 };
3277
3278 static int __init ia_module_init(void)
3279 {
3280         int ret;
3281
3282         ret = pci_module_init(&ia_driver);
3283         if (ret >= 0) {
3284                 ia_timer.expires = jiffies + 3*HZ;
3285                 add_timer(&ia_timer); 
3286         } else
3287                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3288         return ret;
3289 }
3290
3291 static void __exit ia_module_exit(void)
3292 {
3293         pci_unregister_driver(&ia_driver);
3294
3295         del_timer(&ia_timer);
3296 }
3297
3298 module_init(ia_module_init);
3299 module_exit(ia_module_exit);