vserver 1.9.5.x5
[linux-2.6.git] / drivers / atm / iphase.c
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
3                     Author: Peter Wang  <pwang@iphase.com>            
4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>           
6                                Version: 1.0                           
7 *******************************************************************************
8       
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18       
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
20       was originally written by Monalisa Agrawal at UNH. Now this driver 
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
23       in terms of PHY type, the size of control memory and the size of 
24       packet memory. The followings are the change log and history:
25      
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32           Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI 
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
41 *******************************************************************************/
42
43 #include <linux/module.h>  
44 #include <linux/kernel.h>  
45 #include <linux/mm.h>  
46 #include <linux/pci.h>  
47 #include <linux/errno.h>  
48 #include <linux/atm.h>  
49 #include <linux/atmdev.h>  
50 #include <linux/sonet.h>  
51 #include <linux/skbuff.h>  
52 #include <linux/time.h>  
53 #include <linux/delay.h>  
54 #include <linux/uio.h>  
55 #include <linux/init.h>  
56 #include <linux/wait.h>
57 #include <asm/system.h>  
58 #include <asm/io.h>  
59 #include <asm/atomic.h>  
60 #include <asm/uaccess.h>  
61 #include <asm/string.h>  
62 #include <asm/byteorder.h>  
63 #include <linux/vmalloc.h>  
64 #include "iphase.h"               
65 #include "suni.h"                 
66 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
67 struct suni_priv {
68         struct k_sonet_stats sonet_stats; /* link diagnostics */
69         unsigned char loop_mode;        /* loopback mode */
70         struct atm_dev *dev;            /* device back-pointer */
71         struct suni_priv *next;         /* next SUNI */
72 }; 
73 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
74
75 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
76 static void desc_dbg(IADEV *iadev);
77
78 static IADEV *ia_dev[8];
79 static struct atm_dev *_ia_dev[8];
80 static int iadev_count;
81 static void ia_led_timer(unsigned long arg);
82 static struct timer_list ia_timer = TIMER_INITIALIZER(ia_led_timer, 0, 0);
83 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
84 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
85 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
86             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
87
88 module_param(IA_TX_BUF, int, 0);
89 module_param(IA_TX_BUF_SZ, int, 0);
90 module_param(IA_RX_BUF, int, 0);
91 module_param(IA_RX_BUF_SZ, int, 0);
92 module_param(IADebugFlag, uint, 0644);
93
94 MODULE_LICENSE("GPL");
95
96 #if BITS_PER_LONG != 32
97 #  error FIXME: this driver only works on 32-bit platforms
98 #endif
99
100 /**************************** IA_LIB **********************************/
101
102 static void ia_init_rtn_q (IARTN_Q *que) 
103
104    que->next = NULL; 
105    que->tail = NULL; 
106 }
107
108 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
109 {
110    data->next = NULL;
111    if (que->next == NULL) 
112       que->next = que->tail = data;
113    else {
114       data->next = que->next;
115       que->next = data;
116    } 
117    return;
118 }
119
120 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
121    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
122    if (!entry) return -1;
123    entry->data = data;
124    entry->next = NULL;
125    if (que->next == NULL) 
126       que->next = que->tail = entry;
127    else {
128       que->tail->next = entry;
129       que->tail = que->tail->next;
130    }      
131    return 1;
132 }
133
134 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
135    IARTN_Q *tmpdata;
136    if (que->next == NULL)
137       return NULL;
138    tmpdata = que->next;
139    if ( que->next == que->tail)  
140       que->next = que->tail = NULL;
141    else 
142       que->next = que->next->next;
143    return tmpdata;
144 }
145
146 static void ia_hack_tcq(IADEV *dev) {
147
148   u_short               desc1;
149   u_short               tcq_wr;
150   struct ia_vcc         *iavcc_r = NULL; 
151
152   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
153   while (dev->host_tcq_wr != tcq_wr) {
154      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
155      if (!desc1) ;
156      else if (!dev->desc_tbl[desc1 -1].timestamp) {
157         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
158         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
159      }                                 
160      else if (dev->desc_tbl[desc1 -1].timestamp) {
161         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
162            printk("IA: Fatal err in get_desc\n");
163            continue;
164         }
165         iavcc_r->vc_desc_cnt--;
166         dev->desc_tbl[desc1 -1].timestamp = 0;
167         IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n", 
168                                    (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
169         if (iavcc_r->pcr < dev->rate_limit) {
170            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
171            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
172               printk("ia_hack_tcq: No memory available\n");
173         } 
174         dev->desc_tbl[desc1 -1].iavcc = NULL;
175         dev->desc_tbl[desc1 -1].txskb = NULL;
176      }
177      dev->host_tcq_wr += 2;
178      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
179         dev->host_tcq_wr = dev->ffL.tcq_st;
180   }
181 } /* ia_hack_tcq */
182
183 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
184   u_short               desc_num, i;
185   struct sk_buff        *skb;
186   struct ia_vcc         *iavcc_r = NULL; 
187   unsigned long delta;
188   static unsigned long timer = 0;
189   int ltimeout;
190
191   ia_hack_tcq (dev);
192   if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){      
193      timer = jiffies; 
194      i=0;
195      while (i < dev->num_tx_desc) {
196         if (!dev->desc_tbl[i].timestamp) {
197            i++;
198            continue;
199         }
200         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
201         delta = jiffies - dev->desc_tbl[i].timestamp;
202         if (delta >= ltimeout) {
203            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
204            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
205               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
206            else 
207               dev->ffL.tcq_rd -= 2;
208            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
209            if (!(skb = dev->desc_tbl[i].txskb) || 
210                           !(iavcc_r = dev->desc_tbl[i].iavcc))
211               printk("Fatal err, desc table vcc or skb is NULL\n");
212            else 
213               iavcc_r->vc_desc_cnt--;
214            dev->desc_tbl[i].timestamp = 0;
215            dev->desc_tbl[i].iavcc = NULL;
216            dev->desc_tbl[i].txskb = NULL;
217         }
218         i++;
219      } /* while */
220   }
221   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
222      return 0xFFFF;
223     
224   /* Get the next available descriptor number from TCQ */
225   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
226
227   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
228      dev->ffL.tcq_rd += 2;
229      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
230      dev->ffL.tcq_rd = dev->ffL.tcq_st;
231      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
232         return 0xFFFF; 
233      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
234   }
235
236   /* get system time */
237   dev->desc_tbl[desc_num -1].timestamp = jiffies;
238   return desc_num;
239 }
240
241 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
242   u_char                foundLockUp;
243   vcstatus_t            *vcstatus;
244   u_short               *shd_tbl;
245   u_short               tempCellSlot, tempFract;
246   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
247   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
248   u_int  i;
249
250   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
251      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
252      vcstatus->cnt++;
253      foundLockUp = 0;
254      if( vcstatus->cnt == 0x05 ) {
255         abr_vc += vcc->vci;
256         eabr_vc += vcc->vci;
257         if( eabr_vc->last_desc ) {
258            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
259               /* Wait for 10 Micro sec */
260               udelay(10);
261               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
262                  foundLockUp = 1;
263            }
264            else {
265               tempCellSlot = abr_vc->last_cell_slot;
266               tempFract    = abr_vc->fraction;
267               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
268                          && (tempFract == dev->testTable[vcc->vci]->fract))
269                  foundLockUp = 1;                   
270               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
271               dev->testTable[vcc->vci]->fract = tempFract; 
272            }        
273         } /* last descriptor */            
274         vcstatus->cnt = 0;      
275      } /* vcstatus->cnt */
276         
277      if (foundLockUp) {
278         IF_ABR(printk("LOCK UP found\n");) 
279         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
280         /* Wait for 10 Micro sec */
281         udelay(10); 
282         abr_vc->status &= 0xFFF8;
283         abr_vc->status |= 0x0001;  /* state is idle */
284         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
285         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
286         if (i < dev->num_vc)
287            shd_tbl[i] = vcc->vci;
288         else
289            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
290         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
291         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
292         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
293         vcstatus->cnt = 0;
294      } /* foundLockUp */
295
296   } /* if an ABR VC */
297
298
299 }
300  
301 /*
302 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
303 **
304 **  +----+----+------------------+-------------------------------+
305 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
306 **  +----+----+------------------+-------------------------------+
307 ** 
308 **    R = reserverd (written as 0)
309 **    NZ = 0 if 0 cells/sec; 1 otherwise
310 **
311 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
312 */
313 static u16
314 cellrate_to_float(u32 cr)
315 {
316
317 #define NZ              0x4000
318 #define M_BITS          9               /* Number of bits in mantissa */
319 #define E_BITS          5               /* Number of bits in exponent */
320 #define M_MASK          0x1ff           
321 #define E_MASK          0x1f
322   u16   flot;
323   u32   tmp = cr & 0x00ffffff;
324   int   i   = 0;
325   if (cr == 0)
326      return 0;
327   while (tmp != 1) {
328      tmp >>= 1;
329      i++;
330   }
331   if (i == M_BITS)
332      flot = NZ | (i << M_BITS) | (cr & M_MASK);
333   else if (i < M_BITS)
334      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
335   else
336      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
337   return flot;
338 }
339
340 #if 0
341 /*
342 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
343 */
344 static u32
345 float_to_cellrate(u16 rate)
346 {
347   u32   exp, mantissa, cps;
348   if ((rate & NZ) == 0)
349      return 0;
350   exp = (rate >> M_BITS) & E_MASK;
351   mantissa = rate & M_MASK;
352   if (exp == 0)
353      return 1;
354   cps = (1 << M_BITS) | mantissa;
355   if (exp == M_BITS)
356      cps = cps;
357   else if (exp > M_BITS)
358      cps <<= (exp - M_BITS);
359   else
360      cps >>= (M_BITS - exp);
361   return cps;
362 }
363 #endif 
364
365 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
366   srv_p->class_type = ATM_ABR;
367   srv_p->pcr        = dev->LineRate;
368   srv_p->mcr        = 0;
369   srv_p->icr        = 0x055cb7;
370   srv_p->tbe        = 0xffffff;
371   srv_p->frtt       = 0x3a;
372   srv_p->rif        = 0xf;
373   srv_p->rdf        = 0xb;
374   srv_p->nrm        = 0x4;
375   srv_p->trm        = 0x7;
376   srv_p->cdf        = 0x3;
377   srv_p->adtf       = 50;
378 }
379
380 static int
381 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
382                                                 struct atm_vcc *vcc, u8 flag)
383 {
384   f_vc_abr_entry  *f_abr_vc;
385   r_vc_abr_entry  *r_abr_vc;
386   u32           icr;
387   u8            trm, nrm, crm;
388   u16           adtf, air, *ptr16;      
389   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
390   f_abr_vc += vcc->vci;       
391   switch (flag) {
392      case 1: /* FFRED initialization */
393 #if 0  /* sanity check */
394        if (srv_p->pcr == 0)
395           return INVALID_PCR;
396        if (srv_p->pcr > dev->LineRate)
397           srv_p->pcr = dev->LineRate;
398        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
399           return MCR_UNAVAILABLE;
400        if (srv_p->mcr > srv_p->pcr)
401           return INVALID_MCR;
402        if (!(srv_p->icr))
403           srv_p->icr = srv_p->pcr;
404        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
405           return INVALID_ICR;
406        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
407           return INVALID_TBE;
408        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
409           return INVALID_FRTT;
410        if (srv_p->nrm > MAX_NRM)
411           return INVALID_NRM;
412        if (srv_p->trm > MAX_TRM)
413           return INVALID_TRM;
414        if (srv_p->adtf > MAX_ADTF)
415           return INVALID_ADTF;
416        else if (srv_p->adtf == 0)
417           srv_p->adtf = 1;
418        if (srv_p->cdf > MAX_CDF)
419           return INVALID_CDF;
420        if (srv_p->rif > MAX_RIF)
421           return INVALID_RIF;
422        if (srv_p->rdf > MAX_RDF)
423           return INVALID_RDF;
424 #endif
425        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
426        f_abr_vc->f_vc_type = ABR;
427        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
428                                   /* i.e 2**n = 2 << (n-1) */
429        f_abr_vc->f_nrm = nrm << 8 | nrm;
430        trm = 100000/(2 << (16 - srv_p->trm));
431        if ( trm == 0) trm = 1;
432        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
433        crm = srv_p->tbe / nrm;
434        if (crm == 0) crm = 1;
435        f_abr_vc->f_crm = crm & 0xff;
436        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
437        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
438                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
439                                 (1000000/(srv_p->frtt/srv_p->tbe)));
440        f_abr_vc->f_icr = cellrate_to_float(icr);
441        adtf = (10000 * srv_p->adtf)/8192;
442        if (adtf == 0) adtf = 1; 
443        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
444        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
445        f_abr_vc->f_acr = f_abr_vc->f_icr;
446        f_abr_vc->f_status = 0x0042;
447        break;
448     case 0: /* RFRED initialization */  
449        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
450        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
451        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
452        r_abr_vc += vcc->vci;
453        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
454        air = srv_p->pcr << (15 - srv_p->rif);
455        if (air == 0) air = 1;
456        r_abr_vc->r_air = cellrate_to_float(air);
457        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
458        dev->sum_mcr        += srv_p->mcr;
459        dev->n_abr++;
460        break;
461     default:
462        break;
463   }
464   return        0;
465 }
466 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
467    u32 rateLow=0, rateHigh, rate;
468    int entries;
469    struct ia_vcc *ia_vcc;
470
471    int   idealSlot =0, testSlot, toBeAssigned, inc;
472    u32   spacing;
473    u16  *SchedTbl, *TstSchedTbl;
474    u16  cbrVC, vcIndex;
475    u32   fracSlot    = 0;
476    u32   sp_mod      = 0;
477    u32   sp_mod2     = 0;
478
479    /* IpAdjustTrafficParams */
480    if (vcc->qos.txtp.max_pcr <= 0) {
481       IF_ERR(printk("PCR for CBR not defined\n");)
482       return -1;
483    }
484    rate = vcc->qos.txtp.max_pcr;
485    entries = rate / dev->Granularity;
486    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
487                                 entries, rate, dev->Granularity);)
488    if (entries < 1)
489       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
490    rateLow  =  entries * dev->Granularity;
491    rateHigh = (entries + 1) * dev->Granularity;
492    if (3*(rate - rateLow) > (rateHigh - rate))
493       entries++;
494    if (entries > dev->CbrRemEntries) {
495       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
496       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
497                                        entries, dev->CbrRemEntries);)
498       return -EBUSY;
499    }   
500
501    ia_vcc = INPH_IA_VCC(vcc);
502    ia_vcc->NumCbrEntry = entries; 
503    dev->sum_mcr += entries * dev->Granularity; 
504    /* IaFFrednInsertCbrSched */
505    // Starting at an arbitrary location, place the entries into the table
506    // as smoothly as possible
507    cbrVC   = 0;
508    spacing = dev->CbrTotEntries / entries;
509    sp_mod  = dev->CbrTotEntries % entries; // get modulo
510    toBeAssigned = entries;
511    fracSlot = 0;
512    vcIndex  = vcc->vci;
513    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
514    while (toBeAssigned)
515    {
516       // If this is the first time, start the table loading for this connection
517       // as close to entryPoint as possible.
518       if (toBeAssigned == entries)
519       {
520          idealSlot = dev->CbrEntryPt;
521          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
522          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
523             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
524       } else {
525          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
526          // in the table that would be  smoothest
527          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
528          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
529       }
530       if (idealSlot >= (int)dev->CbrTotEntries) 
531          idealSlot -= dev->CbrTotEntries;  
532       // Continuously check around this ideal value until a null
533       // location is encountered.
534       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
535       inc = 0;
536       testSlot = idealSlot;
537       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
538       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
539                                 testSlot, (u32)TstSchedTbl,toBeAssigned);) 
540       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
541       while (cbrVC)  // If another VC at this location, we have to keep looking
542       {
543           inc++;
544           testSlot = idealSlot - inc;
545           if (testSlot < 0) { // Wrap if necessary
546              testSlot += dev->CbrTotEntries;
547              IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
548                                                        (u32)SchedTbl,testSlot);)
549           }
550           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
551           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
552           if (!cbrVC)
553              break;
554           testSlot = idealSlot + inc;
555           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
556              testSlot -= dev->CbrTotEntries;
557              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
558              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
559                                             testSlot, toBeAssigned);)
560           } 
561           // set table index and read in value
562           TstSchedTbl = (u16*)(SchedTbl + testSlot);
563           IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
564                           (u32)TstSchedTbl,cbrVC,inc);) 
565           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
566        } /* while */
567        // Move this VCI number into this location of the CBR Sched table.
568        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
569        dev->CbrRemEntries--;
570        toBeAssigned--;
571    } /* while */ 
572
573    /* IaFFrednCbrEnable */
574    dev->NumEnabledCBR++;
575    if (dev->NumEnabledCBR == 1) {
576        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
577        IF_CBR(printk("CBR is enabled\n");)
578    }
579    return 0;
580 }
581 static void ia_cbrVc_close (struct atm_vcc *vcc) {
582    IADEV *iadev;
583    u16 *SchedTbl, NullVci = 0;
584    u32 i, NumFound;
585
586    iadev = INPH_IA_DEV(vcc->dev);
587    iadev->NumEnabledCBR--;
588    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
589    if (iadev->NumEnabledCBR == 0) {
590       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
591       IF_CBR (printk("CBR support disabled\n");)
592    }
593    NumFound = 0;
594    for (i=0; i < iadev->CbrTotEntries; i++)
595    {
596       if (*SchedTbl == vcc->vci) {
597          iadev->CbrRemEntries++;
598          *SchedTbl = NullVci;
599          IF_CBR(NumFound++;)
600       }
601       SchedTbl++;   
602    } 
603    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
604 }
605
606 static int ia_avail_descs(IADEV *iadev) {
607    int tmp = 0;
608    ia_hack_tcq(iadev);
609    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
610       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
611    else
612       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
613                    iadev->ffL.tcq_st) / 2;
614    return tmp;
615 }    
616
617 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
618
619 static int ia_que_tx (IADEV *iadev) { 
620    struct sk_buff *skb;
621    int num_desc;
622    struct atm_vcc *vcc;
623    struct ia_vcc *iavcc;
624    num_desc = ia_avail_descs(iadev);
625
626    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
627       if (!(vcc = ATM_SKB(skb)->vcc)) {
628          dev_kfree_skb_any(skb);
629          printk("ia_que_tx: Null vcc\n");
630          break;
631       }
632       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
633          dev_kfree_skb_any(skb);
634          printk("Free the SKB on closed vci %d \n", vcc->vci);
635          break;
636       }
637       iavcc = INPH_IA_VCC(vcc);
638       if (ia_pkt_tx (vcc, skb)) {
639          skb_queue_head(&iadev->tx_backlog, skb);
640       }
641       num_desc--;
642    }
643    return 0;
644 }
645
646 static void ia_tx_poll (IADEV *iadev) {
647    struct atm_vcc *vcc = NULL;
648    struct sk_buff *skb = NULL, *skb1 = NULL;
649    struct ia_vcc *iavcc;
650    IARTN_Q *  rtne;
651
652    ia_hack_tcq(iadev);
653    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
654        skb = rtne->data.txskb;
655        if (!skb) {
656            printk("ia_tx_poll: skb is null\n");
657            goto out;
658        }
659        vcc = ATM_SKB(skb)->vcc;
660        if (!vcc) {
661            printk("ia_tx_poll: vcc is null\n");
662            dev_kfree_skb_any(skb);
663            goto out;
664        }
665
666        iavcc = INPH_IA_VCC(vcc);
667        if (!iavcc) {
668            printk("ia_tx_poll: iavcc is null\n");
669            dev_kfree_skb_any(skb);
670            goto out;
671        }
672
673        skb1 = skb_dequeue(&iavcc->txing_skb);
674        while (skb1 && (skb1 != skb)) {
675           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
676              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
677           }
678           IF_ERR(printk("Release the SKB not match\n");)
679           if ((vcc->pop) && (skb1->len != 0))
680           {
681              vcc->pop(vcc, skb1);
682              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
683                                                           (long)skb1);)
684           }
685           else 
686              dev_kfree_skb_any(skb1);
687           skb1 = skb_dequeue(&iavcc->txing_skb);
688        }                                                        
689        if (!skb1) {
690           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
691           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
692           break;
693        }
694        if ((vcc->pop) && (skb->len != 0))
695        {
696           vcc->pop(vcc, skb);
697           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
698        }
699        else 
700           dev_kfree_skb_any(skb);
701        kfree(rtne);
702     }
703     ia_que_tx(iadev);
704 out:
705     return;
706 }
707 #if 0
708 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
709 {
710         u32     t;
711         int     i;
712         /*
713          * Issue a command to enable writes to the NOVRAM
714          */
715         NVRAM_CMD (EXTEND + EWEN);
716         NVRAM_CLR_CE;
717         /*
718          * issue the write command
719          */
720         NVRAM_CMD(IAWRITE + addr);
721         /* 
722          * Send the data, starting with D15, then D14, and so on for 16 bits
723          */
724         for (i=15; i>=0; i--) {
725                 NVRAM_CLKOUT (val & 0x8000);
726                 val <<= 1;
727         }
728         NVRAM_CLR_CE;
729         CFG_OR(NVCE);
730         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
731         while (!(t & NVDO))
732                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
733
734         NVRAM_CLR_CE;
735         /*
736          * disable writes again
737          */
738         NVRAM_CMD(EXTEND + EWDS)
739         NVRAM_CLR_CE;
740         CFG_AND(~NVDI);
741 }
742 #endif
743
744 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
745 {
746         u_short val;
747         u32     t;
748         int     i;
749         /*
750          * Read the first bit that was clocked with the falling edge of the
751          * the last command data clock
752          */
753         NVRAM_CMD(IAREAD + addr);
754         /*
755          * Now read the rest of the bits, the next bit read is D14, then D13,
756          * and so on.
757          */
758         val = 0;
759         for (i=15; i>=0; i--) {
760                 NVRAM_CLKIN(t);
761                 val |= (t << i);
762         }
763         NVRAM_CLR_CE;
764         CFG_AND(~NVDI);
765         return val;
766 }
767
768 static void ia_hw_type(IADEV *iadev) {
769    u_short memType = ia_eeprom_get(iadev, 25);   
770    iadev->memType = memType;
771    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
772       iadev->num_tx_desc = IA_TX_BUF;
773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
774       iadev->num_rx_desc = IA_RX_BUF;
775       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
776    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
777       if (IA_TX_BUF == DFL_TX_BUFFERS)
778         iadev->num_tx_desc = IA_TX_BUF / 2;
779       else 
780         iadev->num_tx_desc = IA_TX_BUF;
781       iadev->tx_buf_sz = IA_TX_BUF_SZ;
782       if (IA_RX_BUF == DFL_RX_BUFFERS)
783         iadev->num_rx_desc = IA_RX_BUF / 2;
784       else
785         iadev->num_rx_desc = IA_RX_BUF;
786       iadev->rx_buf_sz = IA_RX_BUF_SZ;
787    }
788    else {
789       if (IA_TX_BUF == DFL_TX_BUFFERS) 
790         iadev->num_tx_desc = IA_TX_BUF / 8;
791       else
792         iadev->num_tx_desc = IA_TX_BUF;
793       iadev->tx_buf_sz = IA_TX_BUF_SZ;
794       if (IA_RX_BUF == DFL_RX_BUFFERS)
795         iadev->num_rx_desc = IA_RX_BUF / 8;
796       else
797         iadev->num_rx_desc = IA_RX_BUF;
798       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
799    } 
800    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
801    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
802          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
803          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
804
805 #if 0
806    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
807       iadev->phy_type = PHY_OC3C_S;
808    else if ((memType & FE_MASK) == FE_UTP_OPTION)
809       iadev->phy_type = PHY_UTP155;
810    else
811      iadev->phy_type = PHY_OC3C_M;
812 #endif
813    
814    iadev->phy_type = memType & FE_MASK;
815    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
816                                          memType,iadev->phy_type);)
817    if (iadev->phy_type == FE_25MBIT_PHY) 
818       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
819    else if (iadev->phy_type == FE_DS3_PHY)
820       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
821    else if (iadev->phy_type == FE_E3_PHY) 
822       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
823    else
824        iadev->LineRate = (u32)(ATM_OC3_PCR);
825    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
826
827 }
828
829 static void IaFrontEndIntr(IADEV *iadev) {
830   volatile IA_SUNI *suni;
831   volatile ia_mb25_t *mb25;
832   volatile suni_pm7345_t *suni_pm7345;
833   u32 intr_status;
834   u_int frmr_intr;
835
836   if(iadev->phy_type & FE_25MBIT_PHY) {
837      mb25 = (ia_mb25_t*)iadev->phy;
838      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
839   } else if (iadev->phy_type & FE_DS3_PHY) {
840      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
841      /* clear FRMR interrupts */
842      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat; 
843      iadev->carrier_detect =  
844            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
845   } else if (iadev->phy_type & FE_E3_PHY ) {
846      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
847      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
848      iadev->carrier_detect =
849            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
850   }
851   else { 
852      suni = (IA_SUNI *)iadev->phy;
853      intr_status = suni->suni_rsop_status & 0xff;
854      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
855   }
856   if (iadev->carrier_detect)
857     printk("IA: SUNI carrier detected\n");
858   else
859     printk("IA: SUNI carrier lost signal\n"); 
860   return;
861 }
862
863 static void ia_mb25_init (IADEV *iadev)
864 {
865    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
866 #if 0
867    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
868 #endif
869    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
870    mb25->mb25_diag_control = 0;
871    /*
872     * Initialize carrier detect state
873     */
874    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
875    return;
876 }                   
877
878 static void ia_suni_pm7345_init (IADEV *iadev)
879 {
880    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
881    if (iadev->phy_type & FE_DS3_PHY)
882    {
883       iadev->carrier_detect = 
884           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 
885       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
886       suni_pm7345->suni_ds3_frm_cfg = 1;
887       suni_pm7345->suni_ds3_tran_cfg = 1;
888       suni_pm7345->suni_config = 0;
889       suni_pm7345->suni_splr_cfg = 0;
890       suni_pm7345->suni_splt_cfg = 0;
891    }
892    else 
893    {
894       iadev->carrier_detect = 
895           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
896       suni_pm7345->suni_e3_frm_fram_options = 0x4;
897       suni_pm7345->suni_e3_frm_maint_options = 0x20;
898       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
899       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
900       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
901       suni_pm7345->suni_e3_tran_fram_options = 0x1;
902       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
903       suni_pm7345->suni_splr_cfg = 0x41;
904       suni_pm7345->suni_splt_cfg = 0x41;
905    } 
906    /*
907     * Enable RSOP loss of signal interrupt.
908     */
909    suni_pm7345->suni_intr_enbl = 0x28;
910  
911    /*
912     * Clear error counters
913     */
914    suni_pm7345->suni_id_reset = 0;
915
916    /*
917     * Clear "PMCTST" in master test register.
918     */
919    suni_pm7345->suni_master_test = 0;
920
921    suni_pm7345->suni_rxcp_ctrl = 0x2c;
922    suni_pm7345->suni_rxcp_fctrl = 0x81;
923  
924    suni_pm7345->suni_rxcp_idle_pat_h1 =
925         suni_pm7345->suni_rxcp_idle_pat_h2 =
926         suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
927    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
928  
929    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
930    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
931    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
932    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
933  
934    suni_pm7345->suni_rxcp_cell_pat_h1 =
935         suni_pm7345->suni_rxcp_cell_pat_h2 =
936         suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
937    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
938  
939    suni_pm7345->suni_rxcp_cell_mask_h1 =
940         suni_pm7345->suni_rxcp_cell_mask_h2 =
941         suni_pm7345->suni_rxcp_cell_mask_h3 =
942         suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
943  
944    suni_pm7345->suni_txcp_ctrl = 0xa4;
945    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
946    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
947  
948    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
949                                  SUNI_PM7345_CLB |
950                                  SUNI_PM7345_DLB |
951                                   SUNI_PM7345_PLB);
952 #ifdef __SNMP__
953    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
954 #endif /* __SNMP__ */
955    return;
956 }
957
958
959 /***************************** IA_LIB END *****************************/
960     
961 static int tcnter = 0;
962 static void xdump( u_char*  cp, int  length, char*  prefix )
963 {
964     int col, count;
965     u_char prntBuf[120];
966     u_char*  pBuf = prntBuf;
967     count = 0;
968     while(count < length){
969         pBuf += sprintf( pBuf, "%s", prefix );
970         for(col = 0;count + col < length && col < 16; col++){
971             if (col != 0 && (col % 4) == 0)
972                 pBuf += sprintf( pBuf, " " );
973             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
974         }
975         while(col++ < 16){      /* pad end of buffer with blanks */
976             if ((col % 4) == 0)
977                 sprintf( pBuf, " " );
978             pBuf += sprintf( pBuf, "   " );
979         }
980         pBuf += sprintf( pBuf, "  " );
981         for(col = 0;count + col < length && col < 16; col++){
982             if (isprint((int)cp[count + col]))
983                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
984             else
985                 pBuf += sprintf( pBuf, "." );
986                 }
987         sprintf( pBuf, "\n" );
988         // SPrint(prntBuf);
989         printk(prntBuf);
990         count += col;
991         pBuf = prntBuf;
992     }
993
994 }  /* close xdump(... */
995
996   
997 static struct atm_dev *ia_boards = NULL;  
998   
999 #define ACTUAL_RAM_BASE \
1000         RAM_BASE*((iadev->mem)/(128 * 1024))  
1001 #define ACTUAL_SEG_RAM_BASE \
1002         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1003 #define ACTUAL_REASS_RAM_BASE \
1004         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1005   
1006   
1007 /*-- some utilities and memory allocation stuff will come here -------------*/  
1008   
1009 static void desc_dbg(IADEV *iadev) {
1010
1011   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1012   u32 i;
1013   void __iomem *tmp;
1014   // regval = readl((u32)ia_cmds->maddr);
1015   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1016   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1017                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1018                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1019   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1020                    iadev->ffL.tcq_rd);
1021   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1022   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1023   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1024   i = 0;
1025   while (tcq_st_ptr != tcq_ed_ptr) {
1026       tmp = iadev->seg_ram+tcq_st_ptr;
1027       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1028       tcq_st_ptr += 2;
1029   }
1030   for(i=0; i <iadev->num_tx_desc; i++)
1031       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1032
1033   
1034   
1035 /*----------------------------- Recieving side stuff --------------------------*/  
1036  
1037 static void rx_excp_rcvd(struct atm_dev *dev)  
1038 {  
1039 #if 0 /* closing the receiving size will cause too many excp int */  
1040   IADEV *iadev;  
1041   u_short state;  
1042   u_short excpq_rd_ptr;  
1043   //u_short *ptr;  
1044   int vci, error = 1;  
1045   iadev = INPH_IA_DEV(dev);  
1046   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1047   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1048   { printk("state = %x \n", state); 
1049         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1050  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1051         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1052             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1053         // TODO: update exception stat
1054         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1055         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1056         // pwang_test
1057         excpq_rd_ptr += 4;  
1058         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1059             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1060         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1061         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1062   }  
1063 #endif
1064 }  
1065   
1066 static void free_desc(struct atm_dev *dev, int desc)  
1067 {  
1068         IADEV *iadev;  
1069         iadev = INPH_IA_DEV(dev);  
1070         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1071         iadev->rfL.fdq_wr +=2;
1072         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1073                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1074         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1075 }  
1076   
1077   
1078 static int rx_pkt(struct atm_dev *dev)  
1079 {  
1080         IADEV *iadev;  
1081         struct atm_vcc *vcc;  
1082         unsigned short status;  
1083         struct rx_buf_desc __iomem *buf_desc_ptr;  
1084         int desc;   
1085         struct dle* wr_ptr;  
1086         int len;  
1087         struct sk_buff *skb;  
1088         u_int buf_addr, dma_addr;  
1089
1090         iadev = INPH_IA_DEV(dev);  
1091         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1092         {  
1093             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1094             return -EINVAL;  
1095         }  
1096         /* mask 1st 3 bits to get the actual descno. */  
1097         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1098         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1099                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1100               printk(" pcq_wr_ptr = 0x%x\n",
1101                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1102         /* update the read pointer  - maybe we shud do this in the end*/  
1103         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1104                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1105         else  
1106                 iadev->rfL.pcq_rd += 2;
1107         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1108   
1109         /* get the buffer desc entry.  
1110                 update stuff. - doesn't seem to be any update necessary  
1111         */  
1112         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1113         /* make the ptr point to the corresponding buffer desc entry */  
1114         buf_desc_ptr += desc;     
1115         if (!desc || (desc > iadev->num_rx_desc) || 
1116                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
1117             free_desc(dev, desc);
1118             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1119             return -1;
1120         }
1121         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1122         if (!vcc)  
1123         {      
1124                 free_desc(dev, desc); 
1125                 printk("IA: null vcc, drop PDU\n");  
1126                 return -1;  
1127         }  
1128           
1129   
1130         /* might want to check the status bits for errors */  
1131         status = (u_short) (buf_desc_ptr->desc_mode);  
1132         if (status & (RX_CER | RX_PTE | RX_OFL))  
1133         {  
1134                 atomic_inc(&vcc->stats->rx_err);
1135                 IF_ERR(printk("IA: bad packet, dropping it");)  
1136                 if (status & RX_CER) { 
1137                     IF_ERR(printk(" cause: packet CRC error\n");)
1138                 }
1139                 else if (status & RX_PTE) {
1140                     IF_ERR(printk(" cause: packet time out\n");)
1141                 }
1142                 else {
1143                     IF_ERR(printk(" cause: buffer over flow\n");)
1144                 }
1145                 goto out_free_desc;
1146         }  
1147   
1148         /*  
1149                 build DLE.        
1150         */  
1151   
1152         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1153         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1154         len = dma_addr - buf_addr;  
1155         if (len > iadev->rx_buf_sz) {
1156            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1157            atomic_inc(&vcc->stats->rx_err);
1158            goto out_free_desc;
1159         }
1160                   
1161         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1162            if (vcc->vci < 32)
1163               printk("Drop control packets\n");
1164               goto out_free_desc;
1165         }
1166         skb_put(skb,len);  
1167         // pwang_test
1168         ATM_SKB(skb)->vcc = vcc;
1169         ATM_DESC(skb) = desc;        
1170         skb_queue_tail(&iadev->rx_dma_q, skb);  
1171
1172         /* Build the DLE structure */  
1173         wr_ptr = iadev->rx_dle_q.write;  
1174         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1175                 len, PCI_DMA_FROMDEVICE);
1176         wr_ptr->local_pkt_addr = buf_addr;  
1177         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1178         wr_ptr->mode = DMA_INT_ENABLE;  
1179   
1180         /* shud take care of wrap around here too. */  
1181         if(++wr_ptr == iadev->rx_dle_q.end)
1182              wr_ptr = iadev->rx_dle_q.start;
1183         iadev->rx_dle_q.write = wr_ptr;  
1184         udelay(1);  
1185         /* Increment transaction counter */  
1186         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1187 out:    return 0;  
1188 out_free_desc:
1189         free_desc(dev, desc);
1190         goto out;
1191 }  
1192   
1193 static void rx_intr(struct atm_dev *dev)  
1194 {  
1195   IADEV *iadev;  
1196   u_short status;  
1197   u_short state, i;  
1198   
1199   iadev = INPH_IA_DEV(dev);  
1200   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1201   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1202   if (status & RX_PKT_RCVD)  
1203   {  
1204         /* do something */  
1205         /* Basically recvd an interrupt for receving a packet.  
1206         A descriptor would have been written to the packet complete   
1207         queue. Get all the descriptors and set up dma to move the   
1208         packets till the packet complete queue is empty..  
1209         */  
1210         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1211         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1212         while(!(state & PCQ_EMPTY))  
1213         {  
1214              rx_pkt(dev);  
1215              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1216         }  
1217         iadev->rxing = 1;
1218   }  
1219   if (status & RX_FREEQ_EMPT)  
1220   {   
1221      if (iadev->rxing) {
1222         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1223         iadev->rx_tmp_jif = jiffies; 
1224         iadev->rxing = 0;
1225      } 
1226      else if (((jiffies - iadev->rx_tmp_jif) > 50) && 
1227                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1228         for (i = 1; i <= iadev->num_rx_desc; i++)
1229                free_desc(dev, i);
1230 printk("Test logic RUN!!!!\n");
1231         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1232         iadev->rxing = 1;
1233      }
1234      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1235   }  
1236
1237   if (status & RX_EXCP_RCVD)  
1238   {  
1239         /* probably need to handle the exception queue also. */  
1240         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1241         rx_excp_rcvd(dev);  
1242   }  
1243
1244
1245   if (status & RX_RAW_RCVD)  
1246   {  
1247         /* need to handle the raw incoming cells. This deepnds on   
1248         whether we have programmed to receive the raw cells or not.  
1249         Else ignore. */  
1250         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1251   }  
1252 }  
1253   
1254   
1255 static void rx_dle_intr(struct atm_dev *dev)  
1256 {  
1257   IADEV *iadev;  
1258   struct atm_vcc *vcc;   
1259   struct sk_buff *skb;  
1260   int desc;  
1261   u_short state;   
1262   struct dle *dle, *cur_dle;  
1263   u_int dle_lp;  
1264   int len;
1265   iadev = INPH_IA_DEV(dev);  
1266  
1267   /* free all the dles done, that is just update our own dle read pointer   
1268         - do we really need to do this. Think not. */  
1269   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1270         and push them up to the higher layer protocol. Also free the desc  
1271         associated with the buffer. */  
1272   dle = iadev->rx_dle_q.read;  
1273   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1274   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1275   while(dle != cur_dle)  
1276   {  
1277       /* free the DMAed skb */  
1278       skb = skb_dequeue(&iadev->rx_dma_q);  
1279       if (!skb)  
1280          goto INCR_DLE;
1281       desc = ATM_DESC(skb);
1282       free_desc(dev, desc);  
1283                
1284       if (!(len = skb->len))
1285       {  
1286           printk("rx_dle_intr: skb len 0\n");  
1287           dev_kfree_skb_any(skb);  
1288       }  
1289       else  
1290       {  
1291           struct cpcs_trailer *trailer;
1292           u_short length;
1293           struct ia_vcc *ia_vcc;
1294
1295           pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1296                 len, PCI_DMA_FROMDEVICE);
1297           /* no VCC related housekeeping done as yet. lets see */  
1298           vcc = ATM_SKB(skb)->vcc;
1299           if (!vcc) {
1300               printk("IA: null vcc\n");  
1301               dev_kfree_skb_any(skb);
1302               goto INCR_DLE;
1303           }
1304           ia_vcc = INPH_IA_VCC(vcc);
1305           if (ia_vcc == NULL)
1306           {
1307              atomic_inc(&vcc->stats->rx_err);
1308              dev_kfree_skb_any(skb);
1309              atm_return(vcc, atm_guess_pdu2truesize(len));
1310              goto INCR_DLE;
1311            }
1312           // get real pkt length  pwang_test
1313           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1314                                  skb->len - sizeof(*trailer));
1315           length =  swap(trailer->length);
1316           if ((length > iadev->rx_buf_sz) || (length > 
1317                               (skb->len - sizeof(struct cpcs_trailer))))
1318           {
1319              atomic_inc(&vcc->stats->rx_err);
1320              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1321                                                             length, skb->len);)
1322              dev_kfree_skb_any(skb);
1323              atm_return(vcc, atm_guess_pdu2truesize(len));
1324              goto INCR_DLE;
1325           }
1326           skb_trim(skb, length);
1327           
1328           /* Display the packet */  
1329           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1330           xdump(skb->data, skb->len, "RX: ");
1331           printk("\n");)
1332
1333           IF_RX(printk("rx_dle_intr: skb push");)  
1334           vcc->push(vcc,skb);  
1335           atomic_inc(&vcc->stats->rx);
1336           iadev->rx_pkt_cnt++;
1337       }  
1338 INCR_DLE:
1339       if (++dle == iadev->rx_dle_q.end)  
1340           dle = iadev->rx_dle_q.start;  
1341   }  
1342   iadev->rx_dle_q.read = dle;  
1343   
1344   /* if the interrupts are masked because there were no free desc available,  
1345                 unmask them now. */ 
1346   if (!iadev->rxing) {
1347      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1348      if (!(state & FREEQ_EMPTY)) {
1349         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1350         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1351                                       iadev->reass_reg+REASS_MASK_REG);
1352         iadev->rxing++; 
1353      }
1354   }
1355 }  
1356   
1357   
1358 static int open_rx(struct atm_vcc *vcc)  
1359 {  
1360         IADEV *iadev;  
1361         u_short __iomem *vc_table;  
1362         u_short __iomem *reass_ptr;  
1363         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1364
1365         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1366         iadev = INPH_IA_DEV(vcc->dev);  
1367         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1368            if (iadev->phy_type & FE_25MBIT_PHY) {
1369                printk("IA:  ABR not support\n");
1370                return -EINVAL; 
1371            }
1372         }
1373         /* Make only this VCI in the vc table valid and let all   
1374                 others be invalid entries */  
1375         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1376         vc_table += vcc->vci;
1377         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1378
1379         *vc_table = vcc->vci << 6;
1380         /* Also keep a list of open rx vcs so that we can attach them with  
1381                 incoming PDUs later. */  
1382         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1383                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1384         {  
1385                 srv_cls_param_t srv_p;
1386                 init_abr_vc(iadev, &srv_p);
1387                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1388         } 
1389         else {  /* for UBR  later may need to add CBR logic */
1390                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1391                 reass_ptr += vcc->vci;
1392                 *reass_ptr = NO_AAL5_PKT;
1393         }
1394         
1395         if (iadev->rx_open[vcc->vci])  
1396                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1397                         vcc->dev->number, vcc->vci);  
1398         iadev->rx_open[vcc->vci] = vcc;  
1399         return 0;  
1400 }  
1401   
1402 static int rx_init(struct atm_dev *dev)  
1403 {  
1404         IADEV *iadev;  
1405         struct rx_buf_desc __iomem *buf_desc_ptr;  
1406         unsigned long rx_pkt_start = 0;  
1407         void *dle_addr;  
1408         struct abr_vc_table  *abr_vc_table; 
1409         u16 *vc_table;  
1410         u16 *reass_table;  
1411         u16 *ptr16;
1412         int i,j, vcsize_sel;  
1413         u_short freeq_st_adr;  
1414         u_short *freeq_start;  
1415   
1416         iadev = INPH_IA_DEV(dev);  
1417   //    spin_lock_init(&iadev->rx_lock); 
1418   
1419         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1420         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1421                                         &iadev->rx_dle_dma);  
1422         if (!dle_addr)  {  
1423                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1424                 goto err_out;
1425         }
1426         iadev->rx_dle_q.start = (struct dle*)dle_addr;  
1427         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1428         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1429         iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1430         /* the end of the dle q points to the entry after the last  
1431         DLE that can be used. */  
1432   
1433         /* write the upper 20 bits of the start address to rx list address register */  
1434         writel(iadev->rx_dle_dma & 0xfffff000,
1435                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1436         IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n", 
1437                       (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR), 
1438                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));  
1439         printk("Rx Dle list addr: 0x%08x value: 0x%0x\n", 
1440                       (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR), 
1441                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)  
1442   
1443         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1444         writew(0, iadev->reass_reg+MODE_REG);  
1445         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1446   
1447         /* Receive side control memory map  
1448            -------------------------------  
1449   
1450                 Buffer descr    0x0000 (736 - 23K)  
1451                 VP Table        0x5c00 (256 - 512)  
1452                 Except q        0x5e00 (128 - 512)  
1453                 Free buffer q   0x6000 (1K - 2K)  
1454                 Packet comp q   0x6800 (1K - 2K)  
1455                 Reass Table     0x7000 (1K - 2K)  
1456                 VC Table        0x7800 (1K - 2K)  
1457                 ABR VC Table    0x8000 (1K - 32K)  
1458         */  
1459           
1460         /* Base address for Buffer Descriptor Table */  
1461         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1462         /* Set the buffer size register */  
1463         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1464   
1465         /* Initialize each entry in the Buffer Descriptor Table */  
1466         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1467         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1468         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1469         buf_desc_ptr++;  
1470         rx_pkt_start = iadev->rx_pkt_ram;  
1471         for(i=1; i<=iadev->num_rx_desc; i++)  
1472         {  
1473                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1474                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1475                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1476                 buf_desc_ptr++;           
1477                 rx_pkt_start += iadev->rx_buf_sz;  
1478         }  
1479         IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)  
1480         i = FREE_BUF_DESC_Q*iadev->memSize; 
1481         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1482         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1483         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1484                                          iadev->reass_reg+FREEQ_ED_ADR);
1485         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1486         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1487                                         iadev->reass_reg+FREEQ_WR_PTR);    
1488         /* Fill the FREEQ with all the free descriptors. */  
1489         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1490         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1491         for(i=1; i<=iadev->num_rx_desc; i++)  
1492         {  
1493                 *freeq_start = (u_short)i;  
1494                 freeq_start++;  
1495         }  
1496         IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)  
1497         /* Packet Complete Queue */
1498         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1499         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1500         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1501         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1502         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1503
1504         /* Exception Queue */
1505         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1506         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1507         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1508                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1509         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1510         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1511  
1512         /* Load local copy of FREEQ and PCQ ptrs */
1513         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1514         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1515         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1516         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1517         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1518         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1519         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1520         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1521         
1522         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1523               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1524               iadev->rfL.pcq_wr);)                
1525         /* just for check - no VP TBL */  
1526         /* VP Table */  
1527         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1528         /* initialize VP Table for invalid VPIs  
1529                 - I guess we can write all 1s or 0x000f in the entire memory  
1530                   space or something similar.  
1531         */  
1532   
1533         /* This seems to work and looks right to me too !!! */  
1534         i =  REASS_TABLE * iadev->memSize;
1535         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1536         /* initialize Reassembly table to I don't know what ???? */  
1537         reass_table = (u16 *)(iadev->reass_ram+i);  
1538         j = REASS_TABLE_SZ * iadev->memSize;
1539         for(i=0; i < j; i++)  
1540                 *reass_table++ = NO_AAL5_PKT;  
1541        i = 8*1024;
1542        vcsize_sel =  0;
1543        while (i != iadev->num_vc) {
1544           i /= 2;
1545           vcsize_sel++;
1546        }
1547        i = RX_VC_TABLE * iadev->memSize;
1548        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1549        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1550         j = RX_VC_TABLE_SZ * iadev->memSize;
1551         for(i = 0; i < j; i++)  
1552         {  
1553                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1554                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1555                 is those low 3 bits.   
1556                 Shall program this later.  
1557                 */  
1558                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1559                 vc_table++;  
1560         }  
1561         /* ABR VC table */
1562         i =  ABR_VC_TABLE * iadev->memSize;
1563         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1564                    
1565         i = ABR_VC_TABLE * iadev->memSize;
1566         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1567         j = REASS_TABLE_SZ * iadev->memSize;
1568         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1569         for(i = 0; i < j; i++) {                
1570                 abr_vc_table->rdf = 0x0003;
1571                 abr_vc_table->air = 0x5eb1;
1572                 abr_vc_table++;         
1573         }  
1574
1575         /* Initialize other registers */  
1576   
1577         /* VP Filter Register set for VC Reassembly only */  
1578         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1579         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1580         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1581
1582         /* Packet Timeout Count  related Registers : 
1583            Set packet timeout to occur in about 3 seconds
1584            Set Packet Aging Interval count register to overflow in about 4 us
1585         */  
1586         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1587         ptr16 = (u16*)j;
1588         i = ((u32)ptr16 >> 6) & 0xff;
1589         ptr16  += j - 1;
1590         i |=(((u32)ptr16 << 2) & 0xff00);
1591         writew(i, iadev->reass_reg+TMOUT_RANGE);
1592         /* initiate the desc_tble */
1593         for(i=0; i<iadev->num_tx_desc;i++)
1594             iadev->desc_tbl[i].timestamp = 0;
1595
1596         /* to clear the interrupt status register - read it */  
1597         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1598   
1599         /* Mask Register - clear it */  
1600         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1601   
1602         skb_queue_head_init(&iadev->rx_dma_q);  
1603         iadev->rx_free_desc_qhead = NULL;   
1604         iadev->rx_open = kmalloc(4*iadev->num_vc,GFP_KERNEL);
1605         if (!iadev->rx_open)  
1606         {  
1607                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1608                 dev->number);  
1609                 goto err_free_dle;
1610         }  
1611         memset(iadev->rx_open, 0, 4*iadev->num_vc);  
1612         iadev->rxing = 1;
1613         iadev->rx_pkt_cnt = 0;
1614         /* Mode Register */  
1615         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1616         return 0;  
1617
1618 err_free_dle:
1619         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1620                             iadev->rx_dle_dma);  
1621 err_out:
1622         return -ENOMEM;
1623 }  
1624   
1625
1626 /*  
1627         The memory map suggested in appendix A and the coding for it.   
1628         Keeping it around just in case we change our mind later.  
1629   
1630                 Buffer descr    0x0000 (128 - 4K)  
1631                 UBR sched       0x1000 (1K - 4K)  
1632                 UBR Wait q      0x2000 (1K - 4K)  
1633                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1634                                         (128 - 256) each  
1635                 extended VC     0x4000 (1K - 8K)  
1636                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1637                 CBR sched       0x7000 (as needed)  
1638                 VC table        0x8000 (1K - 32K)  
1639 */  
1640   
1641 static void tx_intr(struct atm_dev *dev)  
1642 {  
1643         IADEV *iadev;  
1644         unsigned short status;  
1645         unsigned long flags;
1646
1647         iadev = INPH_IA_DEV(dev);  
1648   
1649         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1650         if (status & TRANSMIT_DONE){
1651
1652            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1653            spin_lock_irqsave(&iadev->tx_lock, flags);
1654            ia_tx_poll(iadev);
1655            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1656            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1657            if (iadev->close_pending)  
1658                wake_up(&iadev->close_wait);
1659         }         
1660         if (status & TCQ_NOT_EMPTY)  
1661         {  
1662             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1663         }  
1664 }  
1665   
1666 static void tx_dle_intr(struct atm_dev *dev)
1667 {
1668         IADEV *iadev;
1669         struct dle *dle, *cur_dle; 
1670         struct sk_buff *skb;
1671         struct atm_vcc *vcc;
1672         struct ia_vcc  *iavcc;
1673         u_int dle_lp;
1674         unsigned long flags;
1675
1676         iadev = INPH_IA_DEV(dev);
1677         spin_lock_irqsave(&iadev->tx_lock, flags);   
1678         dle = iadev->tx_dle_q.read;
1679         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1680                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1681         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1682         while (dle != cur_dle)
1683         {
1684             /* free the DMAed skb */ 
1685             skb = skb_dequeue(&iadev->tx_dma_q); 
1686             if (!skb) break;
1687
1688             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1689             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1690                 pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1691                                  PCI_DMA_TODEVICE);
1692             }
1693             vcc = ATM_SKB(skb)->vcc;
1694             if (!vcc) {
1695                   printk("tx_dle_intr: vcc is null\n");
1696                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1697                   dev_kfree_skb_any(skb);
1698
1699                   return;
1700             }
1701             iavcc = INPH_IA_VCC(vcc);
1702             if (!iavcc) {
1703                   printk("tx_dle_intr: iavcc is null\n");
1704                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1705                   dev_kfree_skb_any(skb);
1706                   return;
1707             }
1708             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1709                if ((vcc->pop) && (skb->len != 0))
1710                {     
1711                  vcc->pop(vcc, skb);
1712                } 
1713                else {
1714                  dev_kfree_skb_any(skb);
1715                }
1716             }
1717             else { /* Hold the rate-limited skb for flow control */
1718                IA_SKB_STATE(skb) |= IA_DLED;
1719                skb_queue_tail(&iavcc->txing_skb, skb);
1720             }
1721             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1722             if (++dle == iadev->tx_dle_q.end)
1723                  dle = iadev->tx_dle_q.start;
1724         }
1725         iadev->tx_dle_q.read = dle;
1726         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1727 }
1728   
1729 static int open_tx(struct atm_vcc *vcc)  
1730 {  
1731         struct ia_vcc *ia_vcc;  
1732         IADEV *iadev;  
1733         struct main_vc *vc;  
1734         struct ext_vc *evc;  
1735         int ret;
1736         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1737         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1738         iadev = INPH_IA_DEV(vcc->dev);  
1739         
1740         if (iadev->phy_type & FE_25MBIT_PHY) {
1741            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1742                printk("IA:  ABR not support\n");
1743                return -EINVAL; 
1744            }
1745           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1746                printk("IA:  CBR not support\n");
1747                return -EINVAL; 
1748           }
1749         }
1750         ia_vcc =  INPH_IA_VCC(vcc);
1751         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1752         if (vcc->qos.txtp.max_sdu > 
1753                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1754            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1755                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1756            vcc->dev_data = NULL;
1757            kfree(ia_vcc);
1758            return -EINVAL; 
1759         }
1760         ia_vcc->vc_desc_cnt = 0;
1761         ia_vcc->txing = 1;
1762
1763         /* find pcr */
1764         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1765            vcc->qos.txtp.pcr = iadev->LineRate;
1766         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1767            vcc->qos.txtp.pcr = iadev->LineRate;
1768         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1769            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1770         if (vcc->qos.txtp.pcr > iadev->LineRate)
1771              vcc->qos.txtp.pcr = iadev->LineRate;
1772         ia_vcc->pcr = vcc->qos.txtp.pcr;
1773
1774         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1775         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1776         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1777         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1778         if (ia_vcc->pcr < iadev->rate_limit)
1779            skb_queue_head_init (&ia_vcc->txing_skb);
1780         if (ia_vcc->pcr < iadev->rate_limit) {
1781            if (vcc->qos.txtp.max_sdu != 0) {
1782                if (ia_vcc->pcr > 60000)
1783                   vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1784                else if (ia_vcc->pcr > 2000)
1785                   vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1786                else
1787                  vcc->sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1788            }
1789            else
1790              vcc->sk->sk_sndbuf = 24576;
1791         }
1792            
1793         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1794         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1795         vc += vcc->vci;  
1796         evc += vcc->vci;  
1797         memset((caddr_t)vc, 0, sizeof(*vc));  
1798         memset((caddr_t)evc, 0, sizeof(*evc));  
1799           
1800         /* store the most significant 4 bits of vci as the last 4 bits   
1801                 of first part of atm header.  
1802            store the last 12 bits of vci as first 12 bits of the second  
1803                 part of the atm header.  
1804         */  
1805         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1806         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1807  
1808         /* check the following for different traffic classes */  
1809         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1810         {  
1811                 vc->type = UBR;  
1812                 vc->status = CRC_APPEND;
1813                 vc->acr = cellrate_to_float(iadev->LineRate);  
1814                 if (vcc->qos.txtp.pcr > 0) 
1815                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1816                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1817                                              vcc->qos.txtp.max_pcr,vc->acr);)
1818         }  
1819         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1820         {       srv_cls_param_t srv_p;
1821                 IF_ABR(printk("Tx ABR VCC\n");)  
1822                 init_abr_vc(iadev, &srv_p);
1823                 if (vcc->qos.txtp.pcr > 0) 
1824                    srv_p.pcr = vcc->qos.txtp.pcr;
1825                 if (vcc->qos.txtp.min_pcr > 0) {
1826                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1827                    if (tmpsum > iadev->LineRate)
1828                        return -EBUSY;
1829                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1830                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1831                 } 
1832                 else srv_p.mcr = 0;
1833                 if (vcc->qos.txtp.icr)
1834                    srv_p.icr = vcc->qos.txtp.icr;
1835                 if (vcc->qos.txtp.tbe)
1836                    srv_p.tbe = vcc->qos.txtp.tbe;
1837                 if (vcc->qos.txtp.frtt)
1838                    srv_p.frtt = vcc->qos.txtp.frtt;
1839                 if (vcc->qos.txtp.rif)
1840                    srv_p.rif = vcc->qos.txtp.rif;
1841                 if (vcc->qos.txtp.rdf)
1842                    srv_p.rdf = vcc->qos.txtp.rdf;
1843                 if (vcc->qos.txtp.nrm_pres)
1844                    srv_p.nrm = vcc->qos.txtp.nrm;
1845                 if (vcc->qos.txtp.trm_pres)
1846                    srv_p.trm = vcc->qos.txtp.trm;
1847                 if (vcc->qos.txtp.adtf_pres)
1848                    srv_p.adtf = vcc->qos.txtp.adtf;
1849                 if (vcc->qos.txtp.cdf_pres)
1850                    srv_p.cdf = vcc->qos.txtp.cdf;    
1851                 if (srv_p.icr > srv_p.pcr)
1852                    srv_p.icr = srv_p.pcr;    
1853                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1854                                                       srv_p.pcr, srv_p.mcr);)
1855                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1856         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1857                 if (iadev->phy_type & FE_25MBIT_PHY) {
1858                     printk("IA:  CBR not support\n");
1859                     return -EINVAL; 
1860                 }
1861                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1862                    IF_CBR(printk("PCR is not availble\n");)
1863                    return -1;
1864                 }
1865                 vc->type = CBR;
1866                 vc->status = CRC_APPEND;
1867                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1868                     return ret;
1869                 }
1870        } 
1871         else  
1872            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
1873         
1874         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1875         IF_EVENT(printk("ia open_tx returning \n");)  
1876         return 0;  
1877 }  
1878   
1879   
1880 static int tx_init(struct atm_dev *dev)  
1881 {  
1882         IADEV *iadev;  
1883         struct tx_buf_desc *buf_desc_ptr;
1884         unsigned int tx_pkt_start;  
1885         void *dle_addr;  
1886         int i;  
1887         u_short tcq_st_adr;  
1888         u_short *tcq_start;  
1889         u_short prq_st_adr;  
1890         u_short *prq_start;  
1891         struct main_vc *vc;  
1892         struct ext_vc *evc;   
1893         u_short tmp16;
1894         u32 vcsize_sel;
1895  
1896         iadev = INPH_IA_DEV(dev);  
1897         spin_lock_init(&iadev->tx_lock);
1898  
1899         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1900                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1901
1902         /* Allocate 4k (boundary aligned) bytes */
1903         dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1904                                         &iadev->tx_dle_dma);  
1905         if (!dle_addr)  {
1906                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1907                 goto err_out;
1908         }
1909         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1910         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1911         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1912         iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);  
1913
1914         /* write the upper 20 bits of the start address to tx list address register */  
1915         writel(iadev->tx_dle_dma & 0xfffff000,
1916                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1917         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1918         writew(0, iadev->seg_reg+MODE_REG_0);  
1919         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1920         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1921         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1922         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1923   
1924         /*  
1925            Transmit side control memory map  
1926            --------------------------------    
1927          Buffer descr   0x0000 (128 - 4K)  
1928          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1929                                         (512 - 1K) each  
1930                                         TCQ - 4K, PRQ - 5K  
1931          CBR Table      0x1800 (as needed) - 6K  
1932          UBR Table      0x3000 (1K - 4K) - 12K  
1933          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1934          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1935                                 ABR Tbl - 20K, ABR Wq - 22K   
1936          extended VC    0x6000 (1K - 8K) - 24K  
1937          VC Table       0x8000 (1K - 32K) - 32K  
1938           
1939         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1940         and Wait q, which can be allotted later.  
1941         */  
1942      
1943         /* Buffer Descriptor Table Base address */  
1944         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1945   
1946         /* initialize each entry in the buffer descriptor table */  
1947         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1948         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1949         buf_desc_ptr++;  
1950         tx_pkt_start = TX_PACKET_RAM;  
1951         for(i=1; i<=iadev->num_tx_desc; i++)  
1952         {  
1953                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1954                 buf_desc_ptr->desc_mode = AAL5;  
1955                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1956                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1957                 buf_desc_ptr++;           
1958                 tx_pkt_start += iadev->tx_buf_sz;  
1959         }  
1960         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1961         if (!iadev->tx_buf) {
1962             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1963             goto err_free_dle;
1964         }
1965         for (i= 0; i< iadev->num_tx_desc; i++)
1966         {
1967             struct cpcs_trailer *cpcs;
1968  
1969             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1970             if(!cpcs) {                
1971                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1972                 goto err_free_tx_bufs;
1973             }
1974             iadev->tx_buf[i].cpcs = cpcs;
1975             iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1976                 cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1977         }
1978         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1979                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1980         if (!iadev->desc_tbl) {
1981                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1982                 goto err_free_all_tx_bufs;
1983         }
1984   
1985         /* Communication Queues base address */  
1986         i = TX_COMP_Q * iadev->memSize;
1987         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
1988   
1989         /* Transmit Complete Queue */  
1990         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
1991         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
1992         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
1993         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1994         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
1995                                               iadev->seg_reg+TCQ_ED_ADR); 
1996         /* Fill the TCQ with all the free descriptors. */  
1997         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
1998         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
1999         for(i=1; i<=iadev->num_tx_desc; i++)  
2000         {  
2001                 *tcq_start = (u_short)i;  
2002                 tcq_start++;  
2003         }  
2004   
2005         /* Packet Ready Queue */  
2006         i = PKT_RDY_Q * iadev->memSize; 
2007         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2008         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2009                                               iadev->seg_reg+PRQ_ED_ADR);
2010         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2011         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2012          
2013         /* Load local copy of PRQ and TCQ ptrs */
2014         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2015         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2016         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2017
2018         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2019         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2020         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2021
2022         /* Just for safety initializing the queue to have desc 1 always */  
2023         /* Fill the PRQ with all the free descriptors. */  
2024         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2025         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2026         for(i=1; i<=iadev->num_tx_desc; i++)  
2027         {  
2028                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2029                 prq_start++;  
2030         }  
2031         /* CBR Table */  
2032         IF_INIT(printk("Start CBR Init\n");)
2033 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2034         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2035 #else /* Charlie's logic is wrong ? */
2036         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2037         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2038         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2039 #endif
2040
2041         IF_INIT(printk("value in register = 0x%x\n",
2042                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2043         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2044         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2045         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2046                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2047         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2048         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2049         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2050         IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2051                (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2052         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2053           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2054           readw(iadev->seg_reg+CBR_TAB_END+1));)
2055
2056         /* Initialize the CBR Schedualing Table */
2057         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2058                                                           0, iadev->num_vc*6); 
2059         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2060         iadev->CbrEntryPt = 0;
2061         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2062         iadev->NumEnabledCBR = 0;
2063
2064         /* UBR scheduling Table and wait queue */  
2065         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2066                 - SCHEDSZ is 1K (# of entries).  
2067                 - UBR Table size is 4K  
2068                 - UBR wait queue is 4K  
2069            since the table and wait queues are contiguous, all the bytes   
2070            can be initialized by one memeset.  
2071         */  
2072         
2073         vcsize_sel = 0;
2074         i = 8*1024;
2075         while (i != iadev->num_vc) {
2076           i /= 2;
2077           vcsize_sel++;
2078         }
2079  
2080         i = MAIN_VC_TABLE * iadev->memSize;
2081         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2082         i =  EXT_VC_TABLE * iadev->memSize;
2083         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2084         i = UBR_SCHED_TABLE * iadev->memSize;
2085         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2086         i = UBR_WAIT_Q * iadev->memSize; 
2087         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2088         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2089                                                        0, iadev->num_vc*8);
2090         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2091         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2092                 - SCHEDSZ is 1K (# of entries).  
2093                 - ABR Table size is 2K  
2094                 - ABR wait queue is 2K  
2095            since the table and wait queues are contiguous, all the bytes   
2096            can be intialized by one memeset.  
2097         */  
2098         i = ABR_SCHED_TABLE * iadev->memSize;
2099         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2100         i = ABR_WAIT_Q * iadev->memSize;
2101         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2102  
2103         i = ABR_SCHED_TABLE*iadev->memSize;
2104         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2105         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2106         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2107         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); 
2108         if (!iadev->testTable) {
2109            printk("Get freepage  failed\n");
2110            goto err_free_desc_tbl;
2111         }
2112         for(i=0; i<iadev->num_vc; i++)  
2113         {  
2114                 memset((caddr_t)vc, 0, sizeof(*vc));  
2115                 memset((caddr_t)evc, 0, sizeof(*evc));  
2116                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2117                                                 GFP_KERNEL);
2118                 if (!iadev->testTable[i])
2119                         goto err_free_test_tables;
2120                 iadev->testTable[i]->lastTime = 0;
2121                 iadev->testTable[i]->fract = 0;
2122                 iadev->testTable[i]->vc_status = VC_UBR;
2123                 vc++;  
2124                 evc++;  
2125         }  
2126   
2127         /* Other Initialization */  
2128           
2129         /* Max Rate Register */  
2130         if (iadev->phy_type & FE_25MBIT_PHY) {
2131            writew(RATE25, iadev->seg_reg+MAXRATE);  
2132            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2133         }
2134         else {
2135            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2136            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2137         }
2138         /* Set Idle Header Reigisters to be sure */  
2139         writew(0, iadev->seg_reg+IDLEHEADHI);  
2140         writew(0, iadev->seg_reg+IDLEHEADLO);  
2141   
2142         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2143         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2144
2145         iadev->close_pending = 0;
2146         init_waitqueue_head(&iadev->close_wait);
2147         init_waitqueue_head(&iadev->timeout_wait);
2148         skb_queue_head_init(&iadev->tx_dma_q);  
2149         ia_init_rtn_q(&iadev->tx_return_q);  
2150
2151         /* RM Cell Protocol ID and Message Type */  
2152         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2153         skb_queue_head_init (&iadev->tx_backlog);
2154   
2155         /* Mode Register 1 */  
2156         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2157   
2158         /* Mode Register 0 */  
2159         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2160   
2161         /* Interrupt Status Register - read to clear */  
2162         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2163   
2164         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2165         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2166         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2167         iadev->tx_pkt_cnt = 0;
2168         iadev->rate_limit = iadev->LineRate / 3;
2169   
2170         return 0;
2171
2172 err_free_test_tables:
2173         while (--i >= 0)
2174                 kfree(iadev->testTable[i]);
2175         kfree(iadev->testTable);
2176 err_free_desc_tbl:
2177         kfree(iadev->desc_tbl);
2178 err_free_all_tx_bufs:
2179         i = iadev->num_tx_desc;
2180 err_free_tx_bufs:
2181         while (--i >= 0) {
2182                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2183
2184                 pci_unmap_single(iadev->pci, desc->dma_addr,
2185                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2186                 kfree(desc->cpcs);
2187         }
2188         kfree(iadev->tx_buf);
2189 err_free_dle:
2190         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2191                             iadev->tx_dle_dma);  
2192 err_out:
2193         return -ENOMEM;
2194 }   
2195    
2196 static irqreturn_t ia_int(int irq, void *dev_id, struct pt_regs *regs)  
2197 {  
2198    struct atm_dev *dev;  
2199    IADEV *iadev;  
2200    unsigned int status;  
2201    int handled = 0;
2202
2203    dev = dev_id;  
2204    iadev = INPH_IA_DEV(dev);  
2205    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2206    { 
2207         handled = 1;
2208         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2209         if (status & STAT_REASSINT)  
2210         {  
2211            /* do something */  
2212            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2213            rx_intr(dev);  
2214         }  
2215         if (status & STAT_DLERINT)  
2216         {  
2217            /* Clear this bit by writing a 1 to it. */  
2218            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2219            rx_dle_intr(dev);  
2220         }  
2221         if (status & STAT_SEGINT)  
2222         {  
2223            /* do something */ 
2224            IF_EVENT(printk("IA: tx_intr \n");) 
2225            tx_intr(dev);  
2226         }  
2227         if (status & STAT_DLETINT)  
2228         {  
2229            *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;  
2230            tx_dle_intr(dev);  
2231         }  
2232         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2233         {  
2234            if (status & STAT_FEINT) 
2235                IaFrontEndIntr(iadev);
2236         }  
2237    }
2238    return IRQ_RETVAL(handled);
2239 }  
2240           
2241           
2242           
2243 /*----------------------------- entries --------------------------------*/  
2244 static int get_esi(struct atm_dev *dev)  
2245 {  
2246         IADEV *iadev;  
2247         int i;  
2248         u32 mac1;  
2249         u16 mac2;  
2250           
2251         iadev = INPH_IA_DEV(dev);  
2252         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2253                                 iadev->reg+IPHASE5575_MAC1)));  
2254         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2255         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2256         for (i=0; i<MAC1_LEN; i++)  
2257                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2258           
2259         for (i=0; i<MAC2_LEN; i++)  
2260                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2261         return 0;  
2262 }  
2263           
2264 static int reset_sar(struct atm_dev *dev)  
2265 {  
2266         IADEV *iadev;  
2267         int i, error = 1;  
2268         unsigned int pci[64];  
2269           
2270         iadev = INPH_IA_DEV(dev);  
2271         for(i=0; i<64; i++)  
2272           if ((error = pci_read_config_dword(iadev->pci,  
2273                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2274               return error;  
2275         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2276         for(i=0; i<64; i++)  
2277           if ((error = pci_write_config_dword(iadev->pci,  
2278                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2279             return error;  
2280         udelay(5);  
2281         return 0;  
2282 }  
2283           
2284           
2285 static int __init ia_init(struct atm_dev *dev)
2286 {  
2287         IADEV *iadev;  
2288         unsigned long real_base;
2289         void __iomem *base;
2290         unsigned short command;  
2291         unsigned char revision;  
2292         int error, i; 
2293           
2294         /* The device has been identified and registered. Now we read   
2295            necessary configuration info like memory base address,   
2296            interrupt number etc */  
2297           
2298         IF_INIT(printk(">ia_init\n");)  
2299         dev->ci_range.vpi_bits = 0;  
2300         dev->ci_range.vci_bits = NR_VCI_LD;  
2301
2302         iadev = INPH_IA_DEV(dev);  
2303         real_base = pci_resource_start (iadev->pci, 0);
2304         iadev->irq = iadev->pci->irq;
2305                   
2306         if ((error = pci_read_config_word(iadev->pci, PCI_COMMAND,&command))   
2307                     || (error = pci_read_config_byte(iadev->pci,   
2308                                 PCI_REVISION_ID,&revision)))   
2309         {  
2310                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2311                                 dev->number,error);  
2312                 return -EINVAL;  
2313         }  
2314         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2315                         dev->number, revision, real_base, iadev->irq);)  
2316           
2317         /* find mapping size of board */  
2318           
2319         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2320
2321         if (iadev->pci_map_size == 0x100000){
2322           iadev->num_vc = 4096;
2323           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2324           iadev->memSize = 4;
2325         }
2326         else if (iadev->pci_map_size == 0x40000) {
2327           iadev->num_vc = 1024;
2328           iadev->memSize = 1;
2329         }
2330         else {
2331            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2332            return -EINVAL;
2333         }
2334         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2335           
2336         /* enable bus mastering */
2337         pci_set_master(iadev->pci);
2338
2339         /*  
2340          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2341          */  
2342         udelay(10);  
2343           
2344         /* mapping the physical address to a virtual address in address space */  
2345         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2346           
2347         if (!base)  
2348         {  
2349                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2350                             dev->number);  
2351                 return error;  
2352         }  
2353         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2354                         dev->number, revision, base, iadev->irq);)  
2355           
2356         /* filling the iphase dev structure */  
2357         iadev->mem = iadev->pci_map_size /2;  
2358         iadev->real_base = real_base;  
2359         iadev->base = base;  
2360                   
2361         /* Bus Interface Control Registers */  
2362         iadev->reg = base + REG_BASE;
2363         /* Segmentation Control Registers */  
2364         iadev->seg_reg = base + SEG_BASE;
2365         /* Reassembly Control Registers */  
2366         iadev->reass_reg = base + REASS_BASE;  
2367         /* Front end/ DMA control registers */  
2368         iadev->phy = base + PHY_BASE;  
2369         iadev->dma = base + PHY_BASE;  
2370         /* RAM - Segmentation RAm and Reassembly RAM */  
2371         iadev->ram = base + ACTUAL_RAM_BASE;  
2372         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2373         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2374   
2375         /* lets print out the above */  
2376         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2377           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2378           iadev->phy, iadev->ram, iadev->seg_ram, 
2379           iadev->reass_ram);) 
2380           
2381         /* lets try reading the MAC address */  
2382         error = get_esi(dev);  
2383         if (error) {
2384           iounmap(iadev->base);
2385           return error;  
2386         }
2387         printk("IA: ");
2388         for (i=0; i < ESI_LEN; i++)  
2389                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2390         printk("\n");  
2391   
2392         /* reset SAR */  
2393         if (reset_sar(dev)) {
2394            iounmap(iadev->base);
2395            printk("IA: reset SAR fail, please try again\n");
2396            return 1;
2397         }
2398         return 0;  
2399 }  
2400
2401 static void ia_update_stats(IADEV *iadev) {
2402     if (!iadev->carrier_detect)
2403         return;
2404     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2405     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2406     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2407     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2408     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2409     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2410     return;
2411 }
2412   
2413 static void ia_led_timer(unsigned long arg) {
2414         unsigned long flags;
2415         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2416         u_char i;
2417         static u32 ctrl_reg; 
2418         for (i = 0; i < iadev_count; i++) {
2419            if (ia_dev[i]) {
2420               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2421               if (blinking[i] == 0) {
2422                  blinking[i]++;
2423                  ctrl_reg &= (~CTRL_LED);
2424                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2425                  ia_update_stats(ia_dev[i]);
2426               }
2427               else {
2428                  blinking[i] = 0;
2429                  ctrl_reg |= CTRL_LED;
2430                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2431                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2432                  if (ia_dev[i]->close_pending)  
2433                     wake_up(&ia_dev[i]->close_wait);
2434                  ia_tx_poll(ia_dev[i]);
2435                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2436               }
2437            }
2438         }
2439         mod_timer(&ia_timer, jiffies + HZ / 4);
2440         return;
2441 }
2442
2443 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2444         unsigned long addr)  
2445 {  
2446         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2447 }  
2448   
2449 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2450 {  
2451         return readl(INPH_IA_DEV(dev)->phy+addr);  
2452 }  
2453
2454 static void ia_free_tx(IADEV *iadev)
2455 {
2456         int i;
2457
2458         kfree(iadev->desc_tbl);
2459         for (i = 0; i < iadev->num_vc; i++)
2460                 kfree(iadev->testTable[i]);
2461         kfree(iadev->testTable);
2462         for (i = 0; i < iadev->num_tx_desc; i++) {
2463                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2464
2465                 pci_unmap_single(iadev->pci, desc->dma_addr,
2466                         sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2467                 kfree(desc->cpcs);
2468         }
2469         kfree(iadev->tx_buf);
2470         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2471                             iadev->tx_dle_dma);  
2472 }
2473
2474 static void ia_free_rx(IADEV *iadev)
2475 {
2476         kfree(iadev->rx_open);
2477         pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2478                           iadev->rx_dle_dma);  
2479 }
2480
2481 static int __init ia_start(struct atm_dev *dev)
2482 {  
2483         IADEV *iadev;  
2484         int error;  
2485         unsigned char phy;  
2486         u32 ctrl_reg;  
2487         IF_EVENT(printk(">ia_start\n");)  
2488         iadev = INPH_IA_DEV(dev);  
2489         if (request_irq(iadev->irq, &ia_int, SA_SHIRQ, DEV_LABEL, dev)) {  
2490                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2491                     dev->number, iadev->irq);  
2492                 error = -EAGAIN;
2493                 goto err_out;
2494         }  
2495         /* @@@ should release IRQ on error */  
2496         /* enabling memory + master */  
2497         if ((error = pci_write_config_word(iadev->pci,   
2498                                 PCI_COMMAND,   
2499                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2500         {  
2501                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2502                     "master (0x%x)\n",dev->number, error);  
2503                 error = -EIO;  
2504                 goto err_free_irq;
2505         }  
2506         udelay(10);  
2507   
2508         /* Maybe we should reset the front end, initialize Bus Interface Control   
2509                 Registers and see. */  
2510   
2511         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2512                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2513         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2514         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2515                         | CTRL_B8  
2516                         | CTRL_B16  
2517                         | CTRL_B32  
2518                         | CTRL_B48  
2519                         | CTRL_B64  
2520                         | CTRL_B128  
2521                         | CTRL_ERRMASK  
2522                         | CTRL_DLETMASK         /* shud be removed l8r */  
2523                         | CTRL_DLERMASK  
2524                         | CTRL_SEGMASK  
2525                         | CTRL_REASSMASK          
2526                         | CTRL_FEMASK  
2527                         | CTRL_CSPREEMPT;  
2528   
2529        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2530   
2531         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2532                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2533            printk("Bus status reg after init: %08x\n", 
2534                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2535     
2536         ia_hw_type(iadev); 
2537         error = tx_init(dev);  
2538         if (error)
2539                 goto err_free_irq;
2540         error = rx_init(dev);  
2541         if (error)
2542                 goto err_free_tx;
2543   
2544         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2545         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2546         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2547                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2548         phy = 0; /* resolve compiler complaint */
2549         IF_INIT ( 
2550         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2551                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2552         else  
2553                 printk("IA: utopia,rev.%0x\n",phy);) 
2554
2555         if (iadev->phy_type &  FE_25MBIT_PHY)
2556            ia_mb25_init(iadev);
2557         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2558            ia_suni_pm7345_init(iadev);
2559         else {
2560                 error = suni_init(dev);
2561                 if (error)
2562                         goto err_free_rx;
2563                 /* 
2564                  * Enable interrupt on loss of signal
2565                  * SUNI_RSOP_CIE - 0x10
2566                  * SUNI_RSOP_CIE_LOSE - 0x04
2567                  */
2568                 ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2569 #ifndef MODULE
2570                 error = dev->phy->start(dev);
2571                 if (error)
2572                         goto err_free_rx;
2573 #endif
2574                 /* Get iadev->carrier_detect status */
2575                 IaFrontEndIntr(iadev);
2576         }
2577         return 0;
2578
2579 err_free_rx:
2580         ia_free_rx(iadev);
2581 err_free_tx:
2582         ia_free_tx(iadev);
2583 err_free_irq:
2584         free_irq(iadev->irq, dev);  
2585 err_out:
2586         return error;
2587 }  
2588   
2589 static void ia_close(struct atm_vcc *vcc)  
2590 {
2591         DEFINE_WAIT(wait);
2592         u16 *vc_table;
2593         IADEV *iadev;
2594         struct ia_vcc *ia_vcc;
2595         struct sk_buff *skb = NULL;
2596         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2597         unsigned long closetime, flags;
2598
2599         iadev = INPH_IA_DEV(vcc->dev);
2600         ia_vcc = INPH_IA_VCC(vcc);
2601         if (!ia_vcc) return;  
2602
2603         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2604                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2605         clear_bit(ATM_VF_READY,&vcc->flags);
2606         skb_queue_head_init (&tmp_tx_backlog);
2607         skb_queue_head_init (&tmp_vcc_backlog); 
2608         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2609            iadev->close_pending++;
2610            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2611            schedule_timeout(50);
2612            finish_wait(&iadev->timeout_wait, &wait);
2613            spin_lock_irqsave(&iadev->tx_lock, flags); 
2614            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2615               if (ATM_SKB(skb)->vcc == vcc){ 
2616                  if (vcc->pop) vcc->pop(vcc, skb);
2617                  else dev_kfree_skb_any(skb);
2618               }
2619               else 
2620                  skb_queue_tail(&tmp_tx_backlog, skb);
2621            } 
2622            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2623              skb_queue_tail(&iadev->tx_backlog, skb);
2624            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2625            closetime = 300000 / ia_vcc->pcr;
2626            if (closetime == 0)
2627               closetime = 1;
2628            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2629            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2630            spin_lock_irqsave(&iadev->tx_lock, flags);
2631            iadev->close_pending--;
2632            iadev->testTable[vcc->vci]->lastTime = 0;
2633            iadev->testTable[vcc->vci]->fract = 0; 
2634            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2635            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2636               if (vcc->qos.txtp.min_pcr > 0)
2637                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2638            }
2639            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2640               ia_vcc = INPH_IA_VCC(vcc); 
2641               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2642               ia_cbrVc_close (vcc);
2643            }
2644            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2645         }
2646         
2647         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2648            // reset reass table
2649            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2650            vc_table += vcc->vci; 
2651            *vc_table = NO_AAL5_PKT;
2652            // reset vc table
2653            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2654            vc_table += vcc->vci;
2655            *vc_table = (vcc->vci << 6) | 15;
2656            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2657               struct abr_vc_table __iomem *abr_vc_table = 
2658                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2659               abr_vc_table +=  vcc->vci;
2660               abr_vc_table->rdf = 0x0003;
2661               abr_vc_table->air = 0x5eb1;
2662            }                                 
2663            // Drain the packets
2664            rx_dle_intr(vcc->dev); 
2665            iadev->rx_open[vcc->vci] = NULL;
2666         }
2667         kfree(INPH_IA_VCC(vcc));  
2668         ia_vcc = NULL;
2669         vcc->dev_data = NULL;
2670         clear_bit(ATM_VF_ADDR,&vcc->flags);
2671         return;        
2672 }  
2673   
2674 static int ia_open(struct atm_vcc *vcc)
2675 {  
2676         IADEV *iadev;  
2677         struct ia_vcc *ia_vcc;  
2678         int error;  
2679         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2680         {  
2681                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2682                 vcc->dev_data = NULL;
2683         }  
2684         iadev = INPH_IA_DEV(vcc->dev);  
2685         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2686         {  
2687                 IF_EVENT(printk("iphase open: unspec part\n");)  
2688                 set_bit(ATM_VF_ADDR,&vcc->flags);
2689         }  
2690         if (vcc->qos.aal != ATM_AAL5)  
2691                 return -EINVAL;  
2692         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2693                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2694   
2695         /* Device dependent initialization */  
2696         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2697         if (!ia_vcc) return -ENOMEM;  
2698         vcc->dev_data = ia_vcc;
2699   
2700         if ((error = open_rx(vcc)))  
2701         {  
2702                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2703                 ia_close(vcc);  
2704                 return error;  
2705         }  
2706   
2707         if ((error = open_tx(vcc)))  
2708         {  
2709                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2710                 ia_close(vcc);  
2711                 return error;  
2712         }  
2713   
2714         set_bit(ATM_VF_READY,&vcc->flags);
2715
2716 #if 0
2717         {
2718            static u8 first = 1; 
2719            if (first) {
2720               ia_timer.expires = jiffies + 3*HZ;
2721               add_timer(&ia_timer);
2722               first = 0;
2723            }           
2724         }
2725 #endif
2726         IF_EVENT(printk("ia open returning\n");)  
2727         return 0;  
2728 }  
2729   
2730 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2731 {  
2732         IF_EVENT(printk(">ia_change_qos\n");)  
2733         return 0;  
2734 }  
2735   
2736 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2737 {  
2738    IA_CMDBUF ia_cmds;
2739    IADEV *iadev;
2740    int i, board;
2741    u16 __user *tmps;
2742    IF_EVENT(printk(">ia_ioctl\n");)  
2743    if (cmd != IA_CMD) {
2744       if (!dev->phy->ioctl) return -EINVAL;
2745       return dev->phy->ioctl(dev,cmd,arg);
2746    }
2747    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2748    board = ia_cmds.status;
2749    if ((board < 0) || (board > iadev_count))
2750          board = 0;    
2751    iadev = ia_dev[board];
2752    switch (ia_cmds.cmd) {
2753    case MEMDUMP:
2754    {
2755         switch (ia_cmds.sub_cmd) {
2756           case MEMDUMP_DEV:     
2757              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2758              if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2759                 return -EFAULT;
2760              ia_cmds.status = 0;
2761              break;
2762           case MEMDUMP_SEGREG:
2763              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2764              tmps = (u16 __user *)ia_cmds.buf;
2765              for(i=0; i<0x80; i+=2, tmps++)
2766                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2767              ia_cmds.status = 0;
2768              ia_cmds.len = 0x80;
2769              break;
2770           case MEMDUMP_REASSREG:
2771              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2772              tmps = (u16 __user *)ia_cmds.buf;
2773              for(i=0; i<0x80; i+=2, tmps++)
2774                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2775              ia_cmds.status = 0;
2776              ia_cmds.len = 0x80;
2777              break;
2778           case MEMDUMP_FFL:
2779           {  
2780              ia_regs_t       *regs_local;
2781              ffredn_t        *ffL;
2782              rfredn_t        *rfL;
2783                      
2784              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2785              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2786              if (!regs_local) return -ENOMEM;
2787              ffL = &regs_local->ffredn;
2788              rfL = &regs_local->rfredn;
2789              /* Copy real rfred registers into the local copy */
2790              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2791                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2792                 /* Copy real ffred registers into the local copy */
2793              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2794                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2795
2796              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2797                 kfree(regs_local);
2798                 return -EFAULT;
2799              }
2800              kfree(regs_local);
2801              printk("Board %d registers dumped\n", board);
2802              ia_cmds.status = 0;                  
2803          }      
2804              break;        
2805          case READ_REG:
2806          {  
2807              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2808              desc_dbg(iadev); 
2809              ia_cmds.status = 0; 
2810          }
2811              break;
2812          case 0x6:
2813          {  
2814              ia_cmds.status = 0; 
2815              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2816              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2817          }
2818              break;
2819          case 0x8:
2820          {
2821              struct k_sonet_stats *stats;
2822              stats = &PRIV(_ia_dev[board])->sonet_stats;
2823              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2824              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2825              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2826              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2827              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2828              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2829              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2830              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2831              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2832          }
2833             ia_cmds.status = 0;
2834             break;
2835          case 0x9:
2836             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2837             for (i = 1; i <= iadev->num_rx_desc; i++)
2838                free_desc(_ia_dev[board], i);
2839             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2840                                             iadev->reass_reg+REASS_MASK_REG);
2841             iadev->rxing = 1;
2842             
2843             ia_cmds.status = 0;
2844             break;
2845
2846          case 0xb:
2847             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2848             IaFrontEndIntr(iadev);
2849             break;
2850          case 0xa:
2851             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2852          {  
2853              ia_cmds.status = 0; 
2854              IADebugFlag = ia_cmds.maddr;
2855              printk("New debug option loaded\n");
2856          }
2857              break;
2858          default:
2859              ia_cmds.status = 0;
2860              break;
2861       } 
2862    }
2863       break;
2864    default:
2865       break;
2866
2867    }    
2868    return 0;  
2869 }  
2870   
2871 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2872         void __user *optval, int optlen)  
2873 {  
2874         IF_EVENT(printk(">ia_getsockopt\n");)  
2875         return -EINVAL;  
2876 }  
2877   
2878 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2879         void __user *optval, int optlen)  
2880 {  
2881         IF_EVENT(printk(">ia_setsockopt\n");)  
2882         return -EINVAL;  
2883 }  
2884   
2885 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2886         IADEV *iadev;
2887         struct dle *wr_ptr;
2888         struct tx_buf_desc __iomem *buf_desc_ptr;
2889         int desc;
2890         int comp_code;
2891         int total_len;
2892         struct cpcs_trailer *trailer;
2893         struct ia_vcc *iavcc;
2894
2895         iadev = INPH_IA_DEV(vcc->dev);  
2896         iavcc = INPH_IA_VCC(vcc);
2897         if (!iavcc->txing) {
2898            printk("discard packet on closed VC\n");
2899            if (vcc->pop)
2900                 vcc->pop(vcc, skb);
2901            else
2902                 dev_kfree_skb_any(skb);
2903            return 0;
2904         }
2905
2906         if (skb->len > iadev->tx_buf_sz - 8) {
2907            printk("Transmit size over tx buffer size\n");
2908            if (vcc->pop)
2909                  vcc->pop(vcc, skb);
2910            else
2911                  dev_kfree_skb_any(skb);
2912           return 0;
2913         }
2914         if ((u32)skb->data & 3) {
2915            printk("Misaligned SKB\n");
2916            if (vcc->pop)
2917                  vcc->pop(vcc, skb);
2918            else
2919                  dev_kfree_skb_any(skb);
2920            return 0;
2921         }       
2922         /* Get a descriptor number from our free descriptor queue  
2923            We get the descr number from the TCQ now, since I am using  
2924            the TCQ as a free buffer queue. Initially TCQ will be   
2925            initialized with all the descriptors and is hence, full.  
2926         */
2927         desc = get_desc (iadev, iavcc);
2928         if (desc == 0xffff) 
2929             return 1;
2930         comp_code = desc >> 13;  
2931         desc &= 0x1fff;  
2932   
2933         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2934         {  
2935                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2936                 atomic_inc(&vcc->stats->tx);
2937                 if (vcc->pop)   
2938                     vcc->pop(vcc, skb);   
2939                 else  
2940                     dev_kfree_skb_any(skb);
2941                 return 0;   /* return SUCCESS */
2942         }  
2943   
2944         if (comp_code)  
2945         {  
2946             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2947                                                             desc, comp_code);)  
2948         }  
2949        
2950         /* remember the desc and vcc mapping */
2951         iavcc->vc_desc_cnt++;
2952         iadev->desc_tbl[desc-1].iavcc = iavcc;
2953         iadev->desc_tbl[desc-1].txskb = skb;
2954         IA_SKB_STATE(skb) = 0;
2955
2956         iadev->ffL.tcq_rd += 2;
2957         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2958                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2959         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2960   
2961         /* Put the descriptor number in the packet ready queue  
2962                 and put the updated write pointer in the DLE field   
2963         */   
2964         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2965
2966         iadev->ffL.prq_wr += 2;
2967         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2968                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2969           
2970         /* Figure out the exact length of the packet and padding required to 
2971            make it  aligned on a 48 byte boundary.  */
2972         total_len = skb->len + sizeof(struct cpcs_trailer);  
2973         total_len = ((total_len + 47) / 48) * 48;
2974         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2975  
2976         /* Put the packet in a tx buffer */   
2977         trailer = iadev->tx_buf[desc-1].cpcs;
2978         IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2979                   (u32)skb, (u32)skb->data, skb->len, desc);)
2980         trailer->control = 0; 
2981         /*big endian*/ 
2982         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2983         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2984
2985         /* Display the packet */  
2986         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2987                                                         skb->len, tcnter++);  
2988         xdump(skb->data, skb->len, "TX: ");
2989         printk("\n");)
2990
2991         /* Build the buffer descriptor */  
2992         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2993         buf_desc_ptr += desc;   /* points to the corresponding entry */  
2994         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
2995         /* Huh ? p.115 of users guide describes this as a read-only register */
2996         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2997         buf_desc_ptr->vc_index = vcc->vci;
2998         buf_desc_ptr->bytes = total_len;  
2999
3000         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3001            clear_lockup (vcc, iadev);
3002
3003         /* Build the DLE structure */  
3004         wr_ptr = iadev->tx_dle_q.write;  
3005         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3006         wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3007                 skb->len, PCI_DMA_TODEVICE);
3008         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3009                                                   buf_desc_ptr->buf_start_lo;  
3010         /* wr_ptr->bytes = swap(total_len);     didn't seem to affect ?? */  
3011         wr_ptr->bytes = skb->len;  
3012
3013         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3014         if ((wr_ptr->bytes >> 2) == 0xb)
3015            wr_ptr->bytes = 0x30;
3016
3017         wr_ptr->mode = TX_DLE_PSI; 
3018         wr_ptr->prq_wr_ptr_data = 0;
3019   
3020         /* end is not to be used for the DLE q */  
3021         if (++wr_ptr == iadev->tx_dle_q.end)  
3022                 wr_ptr = iadev->tx_dle_q.start;  
3023         
3024         /* Build trailer dle */
3025         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3026         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3027           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3028
3029         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3030         wr_ptr->mode = DMA_INT_ENABLE; 
3031         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3032         
3033         /* end is not to be used for the DLE q */
3034         if (++wr_ptr == iadev->tx_dle_q.end)  
3035                 wr_ptr = iadev->tx_dle_q.start;
3036
3037         iadev->tx_dle_q.write = wr_ptr;  
3038         ATM_DESC(skb) = vcc->vci;
3039         skb_queue_tail(&iadev->tx_dma_q, skb);
3040
3041         atomic_inc(&vcc->stats->tx);
3042         iadev->tx_pkt_cnt++;
3043         /* Increment transaction counter */  
3044         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3045         
3046 #if 0        
3047         /* add flow control logic */ 
3048         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3049           if (iavcc->vc_desc_cnt > 10) {
3050              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3051             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3052               iavcc->flow_inc = -1;
3053               iavcc->saved_tx_quota = vcc->tx_quota;
3054            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3055              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3056              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3057               iavcc->flow_inc = 0;
3058            }
3059         }
3060 #endif
3061         IF_TX(printk("ia send done\n");)  
3062         return 0;  
3063 }  
3064
3065 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3066 {
3067         IADEV *iadev; 
3068         struct ia_vcc *iavcc;
3069         unsigned long flags;
3070
3071         iadev = INPH_IA_DEV(vcc->dev);
3072         iavcc = INPH_IA_VCC(vcc); 
3073         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3074         {
3075             if (!skb)
3076                 printk(KERN_CRIT "null skb in ia_send\n");
3077             else dev_kfree_skb_any(skb);
3078             return -EINVAL;
3079         }                         
3080         spin_lock_irqsave(&iadev->tx_lock, flags); 
3081         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3082             dev_kfree_skb_any(skb);
3083             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3084             return -EINVAL; 
3085         }
3086         ATM_SKB(skb)->vcc = vcc;
3087  
3088         if (skb_peek(&iadev->tx_backlog)) {
3089            skb_queue_tail(&iadev->tx_backlog, skb);
3090         }
3091         else {
3092            if (ia_pkt_tx (vcc, skb)) {
3093               skb_queue_tail(&iadev->tx_backlog, skb);
3094            }
3095         }
3096         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3097         return 0;
3098
3099 }
3100
3101 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3102
3103   int   left = *pos, n;   
3104   char  *tmpPtr;
3105   IADEV *iadev = INPH_IA_DEV(dev);
3106   if(!left--) {
3107      if (iadev->phy_type == FE_25MBIT_PHY) {
3108        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3109        return n;
3110      }
3111      if (iadev->phy_type == FE_DS3_PHY)
3112         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3113      else if (iadev->phy_type == FE_E3_PHY)
3114         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3115      else if (iadev->phy_type == FE_UTP_OPTION)
3116          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3117      else
3118         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3119      tmpPtr = page + n;
3120      if (iadev->pci_map_size == 0x40000)
3121         n += sprintf(tmpPtr, "-1KVC-");
3122      else
3123         n += sprintf(tmpPtr, "-4KVC-");  
3124      tmpPtr = page + n; 
3125      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3126         n += sprintf(tmpPtr, "1M  \n");
3127      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3128         n += sprintf(tmpPtr, "512K\n");
3129      else
3130        n += sprintf(tmpPtr, "128K\n");
3131      return n;
3132   }
3133   if (!left) {
3134      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3135                            "  Size of Tx Buffer  :  %u\n"
3136                            "  Number of Rx Buffer:  %u\n"
3137                            "  Size of Rx Buffer  :  %u\n"
3138                            "  Packets Receiverd  :  %u\n"
3139                            "  Packets Transmitted:  %u\n"
3140                            "  Cells Received     :  %u\n"
3141                            "  Cells Transmitted  :  %u\n"
3142                            "  Board Dropped Cells:  %u\n"
3143                            "  Board Dropped Pkts :  %u\n",
3144                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3145                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3146                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3147                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3148                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3149   }
3150   return 0;
3151 }
3152   
3153 static const struct atmdev_ops ops = {  
3154         .open           = ia_open,  
3155         .close          = ia_close,  
3156         .ioctl          = ia_ioctl,  
3157         .getsockopt     = ia_getsockopt,  
3158         .setsockopt     = ia_setsockopt,  
3159         .send           = ia_send,  
3160         .phy_put        = ia_phy_put,  
3161         .phy_get        = ia_phy_get,  
3162         .change_qos     = ia_change_qos,  
3163         .proc_read      = ia_proc_read,
3164         .owner          = THIS_MODULE,
3165 };  
3166           
3167 static int __devinit ia_init_one(struct pci_dev *pdev,
3168                                  const struct pci_device_id *ent)
3169 {  
3170         struct atm_dev *dev;  
3171         IADEV *iadev;  
3172         unsigned long flags;
3173         int ret;
3174
3175         iadev = kmalloc(sizeof(*iadev), GFP_KERNEL); 
3176         if (!iadev) {
3177                 ret = -ENOMEM;
3178                 goto err_out;
3179         }
3180         memset(iadev, 0, sizeof(*iadev));
3181         iadev->pci = pdev;
3182
3183         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3184                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3185         if (pci_enable_device(pdev)) {
3186                 ret = -ENODEV;
3187                 goto err_out_free_iadev;
3188         }
3189         dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3190         if (!dev) {
3191                 ret = -ENOMEM;
3192                 goto err_out_disable_dev;
3193         }
3194         dev->dev_data = iadev;
3195         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3196         IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3197                 iadev->LineRate);)
3198
3199         ia_dev[iadev_count] = iadev;
3200         _ia_dev[iadev_count] = dev;
3201         iadev_count++;
3202         spin_lock_init(&iadev->misc_lock);
3203         /* First fixes first. I don't want to think about this now. */
3204         spin_lock_irqsave(&iadev->misc_lock, flags); 
3205         if (ia_init(dev) || ia_start(dev)) {  
3206                 IF_INIT(printk("IA register failed!\n");)
3207                 iadev_count--;
3208                 ia_dev[iadev_count] = NULL;
3209                 _ia_dev[iadev_count] = NULL;
3210                 spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3211                 ret = -EINVAL;
3212                 goto err_out_deregister_dev;
3213         }
3214         spin_unlock_irqrestore(&iadev->misc_lock, flags); 
3215         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3216
3217         iadev->next_board = ia_boards;  
3218         ia_boards = dev;  
3219
3220         pci_set_drvdata(pdev, dev);
3221
3222         return 0;
3223
3224 err_out_deregister_dev:
3225         atm_dev_deregister(dev);  
3226 err_out_disable_dev:
3227         pci_disable_device(pdev);
3228 err_out_free_iadev:
3229         kfree(iadev);
3230 err_out:
3231         return ret;
3232 }
3233
3234 static void __devexit ia_remove_one(struct pci_dev *pdev)
3235 {
3236         struct atm_dev *dev = pci_get_drvdata(pdev);
3237         IADEV *iadev = INPH_IA_DEV(dev);
3238
3239         ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10); 
3240         udelay(1);
3241
3242         /* De-register device */  
3243         free_irq(iadev->irq, dev);
3244         iadev_count--;
3245         ia_dev[iadev_count] = NULL;
3246         _ia_dev[iadev_count] = NULL;
3247         atm_dev_deregister(dev);
3248         IF_EVENT(printk("iav deregistered at (itf:%d)\n", dev->number);)
3249
3250         iounmap(iadev->base);  
3251         pci_disable_device(pdev);
3252
3253         ia_free_rx(iadev);
3254         ia_free_tx(iadev);
3255
3256         kfree(iadev);
3257 }
3258
3259 static struct pci_device_id ia_pci_tbl[] = {
3260         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3261         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3262         { 0,}
3263 };
3264 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3265
3266 static struct pci_driver ia_driver = {
3267         .name =         DEV_LABEL,
3268         .id_table =     ia_pci_tbl,
3269         .probe =        ia_init_one,
3270         .remove =       __devexit_p(ia_remove_one),
3271 };
3272
3273 static int __init ia_module_init(void)
3274 {
3275         int ret;
3276
3277         ret = pci_module_init(&ia_driver);
3278         if (ret >= 0) {
3279                 ia_timer.expires = jiffies + 3*HZ;
3280                 add_timer(&ia_timer); 
3281         } else
3282                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3283         return ret;
3284 }
3285
3286 static void __exit ia_module_exit(void)
3287 {
3288         pci_unregister_driver(&ia_driver);
3289
3290         del_timer(&ia_timer);
3291 }
3292
3293 module_init(ia_module_init);
3294 module_exit(ia_module_exit);