vserver 1.9.3
[linux-2.6.git] / drivers / atm / he.c
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2
3 /*
4
5   he.c
6
7   ForeRunnerHE ATM Adapter driver for ATM on Linux
8   Copyright (C) 1999-2001  Naval Research Laboratory
9
10   This library is free software; you can redistribute it and/or
11   modify it under the terms of the GNU Lesser General Public
12   License as published by the Free Software Foundation; either
13   version 2.1 of the License, or (at your option) any later version.
14
15   This library is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   Lesser General Public License for more details.
19
20   You should have received a copy of the GNU Lesser General Public
21   License along with this library; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23
24 */
25
26 /*
27
28   he.c
29
30   ForeRunnerHE ATM Adapter driver for ATM on Linux
31   Copyright (C) 1999-2001  Naval Research Laboratory
32
33   Permission to use, copy, modify and distribute this software and its
34   documentation is hereby granted, provided that both the copyright
35   notice and this permission notice appear in all copies of the software,
36   derivative works or modified versions, and any portions thereof, and
37   that both notices appear in supporting documentation.
38
39   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41   RESULTING FROM THE USE OF THIS SOFTWARE.
42
43   This driver was written using the "Programmer's Reference Manual for
44   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45
46   AUTHORS:
47         chas williams <chas@cmf.nrl.navy.mil>
48         eric kinzie <ekinzie@cmf.nrl.navy.mil>
49
50   NOTES:
51         4096 supported 'connections'
52         group 0 is used for all traffic
53         interrupt queue 0 is used for all interrupts
54         aal0 support (based on work from ulrich.u.muller@nokia.com)
55
56  */
57
58 #include <linux/config.h>
59 #include <linux/module.h>
60 #include <linux/version.h>
61 #include <linux/kernel.h>
62 #include <linux/skbuff.h>
63 #include <linux/pci.h>
64 #include <linux/errno.h>
65 #include <linux/types.h>
66 #include <linux/string.h>
67 #include <linux/delay.h>
68 #include <linux/init.h>
69 #include <linux/mm.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
73 #include <asm/io.h>
74 #include <asm/byteorder.h>
75 #include <asm/uaccess.h>
76
77 #include <linux/atmdev.h>
78 #include <linux/atm.h>
79 #include <linux/sonet.h>
80
81 #define USE_TASKLET
82 #undef USE_SCATTERGATHER
83 #undef USE_CHECKSUM_HW                  /* still confused about this */
84 #define USE_RBPS
85 #undef USE_RBPS_POOL                    /* if memory is tight try this */
86 #undef USE_RBPL_POOL                    /* if memory is tight try this */
87 #define USE_TPD_POOL
88 /* #undef CONFIG_ATM_HE_USE_SUNI */
89
90 /* compatibility */
91
92 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
93 typedef void irqreturn_t;
94 #define IRQ_NONE
95 #define IRQ_HANDLED
96 #define IRQ_RETVAL(x)
97 #endif
98
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,9)
100 #define __devexit_p(func)               func
101 #endif
102
103 #ifndef MODULE_LICENSE
104 #define MODULE_LICENSE(x)
105 #endif
106
107 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
108 #define pci_set_drvdata(pci_dev, data)  (pci_dev)->driver_data = (data)
109 #define pci_get_drvdata(pci_dev)        (pci_dev)->driver_data
110 #endif
111
112 #include "he.h"
113
114 #include "suni.h"
115
116 #include <linux/atm_he.h>
117
118 #define hprintk(fmt,args...)    printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
119
120 #undef DEBUG
121 #ifdef DEBUG
122 #define HPRINTK(fmt,args...)    printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
123 #else
124 #define HPRINTK(fmt,args...)    do { } while (0)
125 #endif /* DEBUG */
126
127
128 /* version definition */
129
130 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
131
132 /* declarations */
133
134 static int he_open(struct atm_vcc *vcc);
135 static void he_close(struct atm_vcc *vcc);
136 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
137 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
138 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
139 static void he_tasklet(unsigned long data);
140 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
141 static int he_start(struct atm_dev *dev);
142 static void he_stop(struct he_dev *dev);
143 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
144 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
145
146 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
147
148 /* globals */
149
150 struct he_dev *he_devs = NULL;
151 static short disable64 = -1;
152 static short nvpibits = -1;
153 static short nvcibits = -1;
154 static short rx_skb_reserve = 16;
155 static short irq_coalesce = 1;
156 static short sdh = 0;
157
158 static struct atmdev_ops he_ops =
159 {
160         .open =         he_open,
161         .close =        he_close,       
162         .ioctl =        he_ioctl,       
163         .send =         he_send,
164         .phy_put =      he_phy_put,
165         .phy_get =      he_phy_get,
166         .proc_read =    he_proc_read,
167         .owner =        THIS_MODULE
168 };
169
170 #define he_writel(dev, val, reg)        do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
171 #define he_readl(dev, reg)              readl((dev)->membase + (reg))
172
173 /* section 2.12 connection memory access */
174
175 static __inline__ void
176 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
177                                                                 unsigned flags)
178 {
179         he_writel(he_dev, val, CON_DAT);
180         (void) he_readl(he_dev, CON_DAT);               /* flush posted writes */
181         he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
182         while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
183 }
184
185 #define he_writel_rcm(dev, val, reg)                            \
186                         he_writel_internal(dev, val, reg, CON_CTL_RCM)
187
188 #define he_writel_tcm(dev, val, reg)                            \
189                         he_writel_internal(dev, val, reg, CON_CTL_TCM)
190
191 #define he_writel_mbox(dev, val, reg)                           \
192                         he_writel_internal(dev, val, reg, CON_CTL_MBOX)
193
194 static unsigned
195 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
196 {
197         he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
198         while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
199         return he_readl(he_dev, CON_DAT);
200 }
201
202 #define he_readl_rcm(dev, reg) \
203                         he_readl_internal(dev, reg, CON_CTL_RCM)
204
205 #define he_readl_tcm(dev, reg) \
206                         he_readl_internal(dev, reg, CON_CTL_TCM)
207
208 #define he_readl_mbox(dev, reg) \
209                         he_readl_internal(dev, reg, CON_CTL_MBOX)
210
211
212 /* figure 2.2 connection id */
213
214 #define he_mkcid(dev, vpi, vci)         (((vpi << (dev)->vcibits) | vci) & 0x1fff)
215
216 /* 2.5.1 per connection transmit state registers */
217
218 #define he_writel_tsr0(dev, val, cid) \
219                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
220 #define he_readl_tsr0(dev, cid) \
221                 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
222
223 #define he_writel_tsr1(dev, val, cid) \
224                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
225
226 #define he_writel_tsr2(dev, val, cid) \
227                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
228
229 #define he_writel_tsr3(dev, val, cid) \
230                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
231
232 #define he_writel_tsr4(dev, val, cid) \
233                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
234
235         /* from page 2-20
236          *
237          * NOTE While the transmit connection is active, bits 23 through 0
238          *      of this register must not be written by the host.  Byte
239          *      enables should be used during normal operation when writing
240          *      the most significant byte.
241          */
242
243 #define he_writel_tsr4_upper(dev, val, cid) \
244                 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
245                                                         CON_CTL_TCM \
246                                                         | CON_BYTE_DISABLE_2 \
247                                                         | CON_BYTE_DISABLE_1 \
248                                                         | CON_BYTE_DISABLE_0)
249
250 #define he_readl_tsr4(dev, cid) \
251                 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
252
253 #define he_writel_tsr5(dev, val, cid) \
254                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
255
256 #define he_writel_tsr6(dev, val, cid) \
257                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
258
259 #define he_writel_tsr7(dev, val, cid) \
260                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
261
262
263 #define he_writel_tsr8(dev, val, cid) \
264                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
265
266 #define he_writel_tsr9(dev, val, cid) \
267                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
268
269 #define he_writel_tsr10(dev, val, cid) \
270                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
271
272 #define he_writel_tsr11(dev, val, cid) \
273                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
274
275
276 #define he_writel_tsr12(dev, val, cid) \
277                 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
278
279 #define he_writel_tsr13(dev, val, cid) \
280                 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
281
282
283 #define he_writel_tsr14(dev, val, cid) \
284                 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
285
286 #define he_writel_tsr14_upper(dev, val, cid) \
287                 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
288                                                         CON_CTL_TCM \
289                                                         | CON_BYTE_DISABLE_2 \
290                                                         | CON_BYTE_DISABLE_1 \
291                                                         | CON_BYTE_DISABLE_0)
292
293 /* 2.7.1 per connection receive state registers */
294
295 #define he_writel_rsr0(dev, val, cid) \
296                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
297 #define he_readl_rsr0(dev, cid) \
298                 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
299
300 #define he_writel_rsr1(dev, val, cid) \
301                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
302
303 #define he_writel_rsr2(dev, val, cid) \
304                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
305
306 #define he_writel_rsr3(dev, val, cid) \
307                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
308
309 #define he_writel_rsr4(dev, val, cid) \
310                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
311
312 #define he_writel_rsr5(dev, val, cid) \
313                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
314
315 #define he_writel_rsr6(dev, val, cid) \
316                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
317
318 #define he_writel_rsr7(dev, val, cid) \
319                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
320
321 static __inline__ struct atm_vcc*
322 __find_vcc(struct he_dev *he_dev, unsigned cid)
323 {
324         struct hlist_head *head;
325         struct atm_vcc *vcc;
326         struct hlist_node *node;
327         struct sock *s;
328         short vpi;
329         int vci;
330
331         vpi = cid >> he_dev->vcibits;
332         vci = cid & ((1 << he_dev->vcibits) - 1);
333         head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
334
335         sk_for_each(s, node, head) {
336                 vcc = atm_sk(s);
337                 if (vcc->dev == he_dev->atm_dev &&
338                     vcc->vci == vci && vcc->vpi == vpi &&
339                     vcc->qos.rxtp.traffic_class != ATM_NONE) {
340                                 return vcc;
341                 }
342         }
343         return NULL;
344 }
345
346 static int __devinit
347 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
348 {
349         struct atm_dev *atm_dev = NULL;
350         struct he_dev *he_dev = NULL;
351         int err = 0;
352
353         printk(KERN_INFO "he: %s\n", version);
354
355         if (pci_enable_device(pci_dev))
356                 return -EIO;
357         if (pci_set_dma_mask(pci_dev, HE_DMA_MASK) != 0) {
358                 printk(KERN_WARNING "he: no suitable dma available\n");
359                 err = -EIO;
360                 goto init_one_failure;
361         }
362
363         atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
364         if (!atm_dev) {
365                 err = -ENODEV;
366                 goto init_one_failure;
367         }
368         pci_set_drvdata(pci_dev, atm_dev);
369
370         he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
371                                                         GFP_KERNEL);
372         if (!he_dev) {
373                 err = -ENOMEM;
374                 goto init_one_failure;
375         }
376         memset(he_dev, 0, sizeof(struct he_dev));
377
378         he_dev->pci_dev = pci_dev;
379         he_dev->atm_dev = atm_dev;
380         he_dev->atm_dev->dev_data = he_dev;
381         atm_dev->dev_data = he_dev;
382         he_dev->number = atm_dev->number;
383         if (he_start(atm_dev)) {
384                 he_stop(he_dev);
385                 err = -ENODEV;
386                 goto init_one_failure;
387         }
388         he_dev->next = NULL;
389         if (he_devs)
390                 he_dev->next = he_devs;
391         he_devs = he_dev;
392         return 0;
393
394 init_one_failure:
395         if (atm_dev)
396                 atm_dev_deregister(atm_dev);
397         if (he_dev)
398                 kfree(he_dev);
399         pci_disable_device(pci_dev);
400         return err;
401 }
402
403 static void __devexit
404 he_remove_one (struct pci_dev *pci_dev)
405 {
406         struct atm_dev *atm_dev;
407         struct he_dev *he_dev;
408
409         atm_dev = pci_get_drvdata(pci_dev);
410         he_dev = HE_DEV(atm_dev);
411
412         /* need to remove from he_devs */
413
414         he_stop(he_dev);
415         atm_dev_deregister(atm_dev);
416         kfree(he_dev);
417
418         pci_set_drvdata(pci_dev, NULL);
419         pci_disable_device(pci_dev);
420 }
421
422
423 static unsigned
424 rate_to_atmf(unsigned rate)             /* cps to atm forum format */
425 {
426 #define NONZERO (1 << 14)
427
428         unsigned exp = 0;
429
430         if (rate == 0)
431                 return 0;
432
433         rate <<= 9;
434         while (rate > 0x3ff) {
435                 ++exp;
436                 rate >>= 1;
437         }
438
439         return (NONZERO | (exp << 9) | (rate & 0x1ff));
440 }
441
442 static void __init
443 he_init_rx_lbfp0(struct he_dev *he_dev)
444 {
445         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
446         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
447         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
448         unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
449         
450         lbufd_index = 0;
451         lbm_offset = he_readl(he_dev, RCMLBM_BA);
452
453         he_writel(he_dev, lbufd_index, RLBF0_H);
454
455         for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
456                 lbufd_index += 2;
457                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
458
459                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
460                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
461
462                 if (++lbuf_count == lbufs_per_row) {
463                         lbuf_count = 0;
464                         row_offset += he_dev->bytes_per_row;
465                 }
466                 lbm_offset += 4;
467         }
468                 
469         he_writel(he_dev, lbufd_index - 2, RLBF0_T);
470         he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
471 }
472
473 static void __init
474 he_init_rx_lbfp1(struct he_dev *he_dev)
475 {
476         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
477         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
478         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
479         unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
480         
481         lbufd_index = 1;
482         lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
483
484         he_writel(he_dev, lbufd_index, RLBF1_H);
485
486         for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
487                 lbufd_index += 2;
488                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
489
490                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
491                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
492
493                 if (++lbuf_count == lbufs_per_row) {
494                         lbuf_count = 0;
495                         row_offset += he_dev->bytes_per_row;
496                 }
497                 lbm_offset += 4;
498         }
499                 
500         he_writel(he_dev, lbufd_index - 2, RLBF1_T);
501         he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
502 }
503
504 static void __init
505 he_init_tx_lbfp(struct he_dev *he_dev)
506 {
507         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510         unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511         
512         lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513         lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514
515         he_writel(he_dev, lbufd_index, TLBF_H);
516
517         for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518                 lbufd_index += 1;
519                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520
521                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523
524                 if (++lbuf_count == lbufs_per_row) {
525                         lbuf_count = 0;
526                         row_offset += he_dev->bytes_per_row;
527                 }
528                 lbm_offset += 2;
529         }
530                 
531         he_writel(he_dev, lbufd_index - 1, TLBF_T);
532 }
533
534 static int __init
535 he_init_tpdrq(struct he_dev *he_dev)
536 {
537         he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
538                 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
539         if (he_dev->tpdrq_base == NULL) {
540                 hprintk("failed to alloc tpdrq\n");
541                 return -ENOMEM;
542         }
543         memset(he_dev->tpdrq_base, 0,
544                                 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
545
546         he_dev->tpdrq_tail = he_dev->tpdrq_base;
547         he_dev->tpdrq_head = he_dev->tpdrq_base;
548
549         he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
550         he_writel(he_dev, 0, TPDRQ_T);  
551         he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
552
553         return 0;
554 }
555
556 static void __init
557 he_init_cs_block(struct he_dev *he_dev)
558 {
559         unsigned clock, rate, delta;
560         int reg;
561
562         /* 5.1.7 cs block initialization */
563
564         for (reg = 0; reg < 0x20; ++reg)
565                 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
566
567         /* rate grid timer reload values */
568
569         clock = he_is622(he_dev) ? 66667000 : 50000000;
570         rate = he_dev->atm_dev->link_rate;
571         delta = rate / 16 / 2;
572
573         for (reg = 0; reg < 0x10; ++reg) {
574                 /* 2.4 internal transmit function
575                  *
576                  * we initialize the first row in the rate grid.
577                  * values are period (in clock cycles) of timer
578                  */
579                 unsigned period = clock / rate;
580
581                 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
582                 rate -= delta;
583         }
584
585         if (he_is622(he_dev)) {
586                 /* table 5.2 (4 cells per lbuf) */
587                 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
588                 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
589                 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
590                 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
591                 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
592
593                 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
594                 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
595                 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
596                 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
597                 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
598                 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
599                 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
600
601                 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
602
603                 /* table 5.8 */
604                 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
605                 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
606                 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
607                 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
608                 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
609                 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
610
611                 /* table 5.9 */
612                 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
613                 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
614         } else {
615                 /* table 5.1 (4 cells per lbuf) */
616                 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
617                 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
618                 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
619                 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
620                 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
621
622                 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
623                 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
624                 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
625                 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
626                 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
627                 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
628                 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
629
630                 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
631
632                 /* table 5.8 */
633                 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
634                 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
635                 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
636                 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
637                 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
638                 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
639
640                 /* table 5.9 */
641                 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
642                 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
643         }
644
645         he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
646
647         for (reg = 0; reg < 0x8; ++reg)
648                 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
649
650 }
651
652 static int __init
653 he_init_cs_block_rcm(struct he_dev *he_dev)
654 {
655         unsigned (*rategrid)[16][16];
656         unsigned rate, delta;
657         int i, j, reg;
658
659         unsigned rate_atmf, exp, man;
660         unsigned long long rate_cps;
661         int mult, buf, buf_limit = 4;
662
663         rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
664         if (!rategrid)
665                 return -ENOMEM;
666
667         /* initialize rate grid group table */
668
669         for (reg = 0x0; reg < 0xff; ++reg)
670                 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
671
672         /* initialize rate controller groups */
673
674         for (reg = 0x100; reg < 0x1ff; ++reg)
675                 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
676         
677         /* initialize tNrm lookup table */
678
679         /* the manual makes reference to a routine in a sample driver
680            for proper configuration; fortunately, we only need this
681            in order to support abr connection */
682         
683         /* initialize rate to group table */
684
685         rate = he_dev->atm_dev->link_rate;
686         delta = rate / 32;
687
688         /*
689          * 2.4 transmit internal functions
690          * 
691          * we construct a copy of the rate grid used by the scheduler
692          * in order to construct the rate to group table below
693          */
694
695         for (j = 0; j < 16; j++) {
696                 (*rategrid)[0][j] = rate;
697                 rate -= delta;
698         }
699
700         for (i = 1; i < 16; i++)
701                 for (j = 0; j < 16; j++)
702                         if (i > 14)
703                                 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
704                         else
705                                 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
706
707         /*
708          * 2.4 transmit internal function
709          *
710          * this table maps the upper 5 bits of exponent and mantissa
711          * of the atm forum representation of the rate into an index
712          * on rate grid  
713          */
714
715         rate_atmf = 0;
716         while (rate_atmf < 0x400) {
717                 man = (rate_atmf & 0x1f) << 4;
718                 exp = rate_atmf >> 5;
719
720                 /* 
721                         instead of '/ 512', use '>> 9' to prevent a call
722                         to divdu3 on x86 platforms
723                 */
724                 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
725
726                 if (rate_cps < 10)
727                         rate_cps = 10;  /* 2.2.1 minimum payload rate is 10 cps */
728
729                 for (i = 255; i > 0; i--)
730                         if ((*rategrid)[i/16][i%16] >= rate_cps)
731                                 break;   /* pick nearest rate instead? */
732
733                 /*
734                  * each table entry is 16 bits: (rate grid index (8 bits)
735                  * and a buffer limit (8 bits)
736                  * there are two table entries in each 32-bit register
737                  */
738
739 #ifdef notdef
740                 buf = rate_cps * he_dev->tx_numbuffs /
741                                 (he_dev->atm_dev->link_rate * 2);
742 #else
743                 /* this is pretty, but avoids _divdu3 and is mostly correct */
744                 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
745                 if (rate_cps > (272 * mult))
746                         buf = 4;
747                 else if (rate_cps > (204 * mult))
748                         buf = 3;
749                 else if (rate_cps > (136 * mult))
750                         buf = 2;
751                 else if (rate_cps > (68 * mult))
752                         buf = 1;
753                 else
754                         buf = 0;
755 #endif
756                 if (buf > buf_limit)
757                         buf = buf_limit;
758                 reg = (reg << 16) | ((i << 8) | buf);
759
760 #define RTGTBL_OFFSET 0x400
761           
762                 if (rate_atmf & 0x1)
763                         he_writel_rcm(he_dev, reg,
764                                 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
765
766                 ++rate_atmf;
767         }
768
769         kfree(rategrid);
770         return 0;
771 }
772
773 static int __init
774 he_init_group(struct he_dev *he_dev, int group)
775 {
776         int i;
777
778 #ifdef USE_RBPS
779         /* small buffer pool */
780 #ifdef USE_RBPS_POOL
781         he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
782                         CONFIG_RBPS_BUFSIZE, 8, 0);
783         if (he_dev->rbps_pool == NULL) {
784                 hprintk("unable to create rbps pages\n");
785                 return -ENOMEM;
786         }
787 #else /* !USE_RBPS_POOL */
788         he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
789                 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
790         if (he_dev->rbps_pages == NULL) {
791                 hprintk("unable to create rbps page pool\n");
792                 return -ENOMEM;
793         }
794 #endif /* USE_RBPS_POOL */
795
796         he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
797                 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
798         if (he_dev->rbps_base == NULL) {
799                 hprintk("failed to alloc rbps\n");
800                 return -ENOMEM;
801         }
802         memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
803         he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
804
805         for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
806                 dma_addr_t dma_handle;
807                 void *cpuaddr;
808
809 #ifdef USE_RBPS_POOL 
810                 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
811                 if (cpuaddr == NULL)
812                         return -ENOMEM;
813 #else
814                 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
815                 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
816 #endif
817
818                 he_dev->rbps_virt[i].virt = cpuaddr;
819                 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
820                 he_dev->rbps_base[i].phys = dma_handle;
821
822         }
823         he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
824
825         he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
826         he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
827                                                 G0_RBPS_T + (group * 32));
828         he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
829                                                 G0_RBPS_BS + (group * 32));
830         he_writel(he_dev,
831                         RBP_THRESH(CONFIG_RBPS_THRESH) |
832                         RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
833                         RBP_INT_ENB,
834                                                 G0_RBPS_QI + (group * 32));
835 #else /* !USE_RBPS */
836         he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
837         he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
838         he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
839         he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
840                                                 G0_RBPS_BS + (group * 32));
841 #endif /* USE_RBPS */
842
843         /* large buffer pool */
844 #ifdef USE_RBPL_POOL
845         he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
846                         CONFIG_RBPL_BUFSIZE, 8, 0);
847         if (he_dev->rbpl_pool == NULL) {
848                 hprintk("unable to create rbpl pool\n");
849                 return -ENOMEM;
850         }
851 #else /* !USE_RBPL_POOL */
852         he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
853                 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
854         if (he_dev->rbpl_pages == NULL) {
855                 hprintk("unable to create rbpl pages\n");
856                 return -ENOMEM;
857         }
858 #endif /* USE_RBPL_POOL */
859
860         he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
861                 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
862         if (he_dev->rbpl_base == NULL) {
863                 hprintk("failed to alloc rbpl\n");
864                 return -ENOMEM;
865         }
866         memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
867         he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
868
869         for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
870                 dma_addr_t dma_handle;
871                 void *cpuaddr;
872
873 #ifdef USE_RBPL_POOL
874                 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
875                 if (cpuaddr == NULL)
876                         return -ENOMEM;
877 #else
878                 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
879                 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
880 #endif
881
882                 he_dev->rbpl_virt[i].virt = cpuaddr;
883                 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
884                 he_dev->rbpl_base[i].phys = dma_handle;
885         }
886         he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
887
888         he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
889         he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
890                                                 G0_RBPL_T + (group * 32));
891         he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
892                                                 G0_RBPL_BS + (group * 32));
893         he_writel(he_dev,
894                         RBP_THRESH(CONFIG_RBPL_THRESH) |
895                         RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
896                         RBP_INT_ENB,
897                                                 G0_RBPL_QI + (group * 32));
898
899         /* rx buffer ready queue */
900
901         he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
902                 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
903         if (he_dev->rbrq_base == NULL) {
904                 hprintk("failed to allocate rbrq\n");
905                 return -ENOMEM;
906         }
907         memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
908
909         he_dev->rbrq_head = he_dev->rbrq_base;
910         he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
911         he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
912         he_writel(he_dev,
913                 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
914                                                 G0_RBRQ_Q + (group * 16));
915         if (irq_coalesce) {
916                 hprintk("coalescing interrupts\n");
917                 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
918                                                 G0_RBRQ_I + (group * 16));
919         } else
920                 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
921                                                 G0_RBRQ_I + (group * 16));
922
923         /* tx buffer ready queue */
924
925         he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
926                 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
927         if (he_dev->tbrq_base == NULL) {
928                 hprintk("failed to allocate tbrq\n");
929                 return -ENOMEM;
930         }
931         memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
932
933         he_dev->tbrq_head = he_dev->tbrq_base;
934
935         he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
936         he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
937         he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
938         he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
939
940         return 0;
941 }
942
943 static int __init
944 he_init_irq(struct he_dev *he_dev)
945 {
946         int i;
947
948         /* 2.9.3.5  tail offset for each interrupt queue is located after the
949                     end of the interrupt queue */
950
951         he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
952                         (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
953         if (he_dev->irq_base == NULL) {
954                 hprintk("failed to allocate irq\n");
955                 return -ENOMEM;
956         }
957         he_dev->irq_tailoffset = (unsigned *)
958                                         &he_dev->irq_base[CONFIG_IRQ_SIZE];
959         *he_dev->irq_tailoffset = 0;
960         he_dev->irq_head = he_dev->irq_base;
961         he_dev->irq_tail = he_dev->irq_base;
962
963         for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
964                 he_dev->irq_base[i].isw = ITYPE_INVALID;
965
966         he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
967         he_writel(he_dev,
968                 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
969                                                                 IRQ0_HEAD);
970         he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
971         he_writel(he_dev, 0x0, IRQ0_DATA);
972
973         he_writel(he_dev, 0x0, IRQ1_BASE);
974         he_writel(he_dev, 0x0, IRQ1_HEAD);
975         he_writel(he_dev, 0x0, IRQ1_CNTL);
976         he_writel(he_dev, 0x0, IRQ1_DATA);
977
978         he_writel(he_dev, 0x0, IRQ2_BASE);
979         he_writel(he_dev, 0x0, IRQ2_HEAD);
980         he_writel(he_dev, 0x0, IRQ2_CNTL);
981         he_writel(he_dev, 0x0, IRQ2_DATA);
982
983         he_writel(he_dev, 0x0, IRQ3_BASE);
984         he_writel(he_dev, 0x0, IRQ3_HEAD);
985         he_writel(he_dev, 0x0, IRQ3_CNTL);
986         he_writel(he_dev, 0x0, IRQ3_DATA);
987
988         /* 2.9.3.2 interrupt queue mapping registers */
989
990         he_writel(he_dev, 0x0, GRP_10_MAP);
991         he_writel(he_dev, 0x0, GRP_32_MAP);
992         he_writel(he_dev, 0x0, GRP_54_MAP);
993         he_writel(he_dev, 0x0, GRP_76_MAP);
994
995         if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
996                 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
997                 return -EINVAL;
998         }   
999
1000         he_dev->irq = he_dev->pci_dev->irq;
1001
1002         return 0;
1003 }
1004
1005 static int __init
1006 he_start(struct atm_dev *dev)
1007 {
1008         struct he_dev *he_dev;
1009         struct pci_dev *pci_dev;
1010         unsigned long membase;
1011
1012         u16 command;
1013         u32 gen_cntl_0, host_cntl, lb_swap;
1014         u8 cache_size, timer;
1015         
1016         unsigned err;
1017         unsigned int status, reg;
1018         int i, group;
1019
1020         he_dev = HE_DEV(dev);
1021         pci_dev = he_dev->pci_dev;
1022
1023         membase = pci_resource_start(pci_dev, 0);
1024         HPRINTK("membase = 0x%lx  irq = %d.\n", membase, pci_dev->irq);
1025
1026         /*
1027          * pci bus controller initialization 
1028          */
1029
1030         /* 4.3 pci bus controller-specific initialization */
1031         if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1032                 hprintk("can't read GEN_CNTL_0\n");
1033                 return -EINVAL;
1034         }
1035         gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1036         if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1037                 hprintk("can't write GEN_CNTL_0.\n");
1038                 return -EINVAL;
1039         }
1040
1041         if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1042                 hprintk("can't read PCI_COMMAND.\n");
1043                 return -EINVAL;
1044         }
1045
1046         command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1047         if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1048                 hprintk("can't enable memory.\n");
1049                 return -EINVAL;
1050         }
1051
1052         if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1053                 hprintk("can't read cache line size?\n");
1054                 return -EINVAL;
1055         }
1056
1057         if (cache_size < 16) {
1058                 cache_size = 16;
1059                 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1060                         hprintk("can't set cache line size to %d\n", cache_size);
1061         }
1062
1063         if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1064                 hprintk("can't read latency timer?\n");
1065                 return -EINVAL;
1066         }
1067
1068         /* from table 3.9
1069          *
1070          * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1071          * 
1072          * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1073          * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1074          *
1075          */ 
1076 #define LAT_TIMER 209
1077         if (timer < LAT_TIMER) {
1078                 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1079                 timer = LAT_TIMER;
1080                 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1081                         hprintk("can't set latency timer to %d\n", timer);
1082         }
1083
1084         if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1085                 hprintk("can't set up page mapping\n");
1086                 return -EINVAL;
1087         }
1088
1089         /* 4.4 card reset */
1090         he_writel(he_dev, 0x0, RESET_CNTL);
1091         he_writel(he_dev, 0xff, RESET_CNTL);
1092
1093         udelay(16*1000);        /* 16 ms */
1094         status = he_readl(he_dev, RESET_CNTL);
1095         if ((status & BOARD_RST_STATUS) == 0) {
1096                 hprintk("reset failed\n");
1097                 return -EINVAL;
1098         }
1099
1100         /* 4.5 set bus width */
1101         host_cntl = he_readl(he_dev, HOST_CNTL);
1102         if (host_cntl & PCI_BUS_SIZE64)
1103                 gen_cntl_0 |= ENBL_64;
1104         else
1105                 gen_cntl_0 &= ~ENBL_64;
1106
1107         if (disable64 == 1) {
1108                 hprintk("disabling 64-bit pci bus transfers\n");
1109                 gen_cntl_0 &= ~ENBL_64;
1110         }
1111
1112         if (gen_cntl_0 & ENBL_64)
1113                 hprintk("64-bit transfers enabled\n");
1114
1115         pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1116
1117         /* 4.7 read prom contents */
1118         for (i = 0; i < PROD_ID_LEN; ++i)
1119                 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1120
1121         he_dev->media = read_prom_byte(he_dev, MEDIA);
1122
1123         for (i = 0; i < 6; ++i)
1124                 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1125
1126         hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1127                                 he_dev->prod_id,
1128                                         he_dev->media & 0x40 ? "SM" : "MM",
1129                                                 dev->esi[0],
1130                                                 dev->esi[1],
1131                                                 dev->esi[2],
1132                                                 dev->esi[3],
1133                                                 dev->esi[4],
1134                                                 dev->esi[5]);
1135         he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1136                                                 ATM_OC12_PCR : ATM_OC3_PCR;
1137
1138         /* 4.6 set host endianess */
1139         lb_swap = he_readl(he_dev, LB_SWAP);
1140         if (he_is622(he_dev))
1141                 lb_swap &= ~XFER_SIZE;          /* 4 cells */
1142         else
1143                 lb_swap |= XFER_SIZE;           /* 8 cells */
1144 #ifdef __BIG_ENDIAN
1145         lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1146 #else
1147         lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1148                         DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1149 #endif /* __BIG_ENDIAN */
1150         he_writel(he_dev, lb_swap, LB_SWAP);
1151
1152         /* 4.8 sdram controller initialization */
1153         he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1154
1155         /* 4.9 initialize rnum value */
1156         lb_swap |= SWAP_RNUM_MAX(0xf);
1157         he_writel(he_dev, lb_swap, LB_SWAP);
1158
1159         /* 4.10 initialize the interrupt queues */
1160         if ((err = he_init_irq(he_dev)) != 0)
1161                 return err;
1162
1163 #ifdef USE_TASKLET
1164         tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1165 #endif
1166         spin_lock_init(&he_dev->global_lock);
1167
1168         /* 4.11 enable pci bus controller state machines */
1169         host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1170                                 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1171         he_writel(he_dev, host_cntl, HOST_CNTL);
1172
1173         gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1174         pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1175
1176         /*
1177          * atm network controller initialization
1178          */
1179
1180         /* 5.1.1 generic configuration state */
1181
1182         /*
1183          *              local (cell) buffer memory map
1184          *                    
1185          *             HE155                          HE622
1186          *                                                      
1187          *        0 ____________1023 bytes  0 _______________________2047 bytes
1188          *         |            |            |                   |   |
1189          *         |  utility   |            |        rx0        |   |
1190          *        5|____________|         255|___________________| u |
1191          *        6|            |         256|                   | t |
1192          *         |            |            |                   | i |
1193          *         |    rx0     |     row    |        tx         | l |
1194          *         |            |            |                   | i |
1195          *         |            |         767|___________________| t |
1196          *      517|____________|         768|                   | y |
1197          * row  518|            |            |        rx1        |   |
1198          *         |            |        1023|___________________|___|
1199          *         |            |
1200          *         |    tx      |
1201          *         |            |
1202          *         |            |
1203          *     1535|____________|
1204          *     1536|            |
1205          *         |    rx1     |
1206          *     2047|____________|
1207          *
1208          */
1209
1210         /* total 4096 connections */
1211         he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1212         he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1213
1214         if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1215                 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1216                 return -ENODEV;
1217         }
1218
1219         if (nvpibits != -1) {
1220                 he_dev->vpibits = nvpibits;
1221                 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1222         }
1223
1224         if (nvcibits != -1) {
1225                 he_dev->vcibits = nvcibits;
1226                 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1227         }
1228
1229
1230         if (he_is622(he_dev)) {
1231                 he_dev->cells_per_row = 40;
1232                 he_dev->bytes_per_row = 2048;
1233                 he_dev->r0_numrows = 256;
1234                 he_dev->tx_numrows = 512;
1235                 he_dev->r1_numrows = 256;
1236                 he_dev->r0_startrow = 0;
1237                 he_dev->tx_startrow = 256;
1238                 he_dev->r1_startrow = 768;
1239         } else {
1240                 he_dev->cells_per_row = 20;
1241                 he_dev->bytes_per_row = 1024;
1242                 he_dev->r0_numrows = 512;
1243                 he_dev->tx_numrows = 1018;
1244                 he_dev->r1_numrows = 512;
1245                 he_dev->r0_startrow = 6;
1246                 he_dev->tx_startrow = 518;
1247                 he_dev->r1_startrow = 1536;
1248         }
1249
1250         he_dev->cells_per_lbuf = 4;
1251         he_dev->buffer_limit = 4;
1252         he_dev->r0_numbuffs = he_dev->r0_numrows *
1253                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1254         if (he_dev->r0_numbuffs > 2560)
1255                 he_dev->r0_numbuffs = 2560;
1256
1257         he_dev->r1_numbuffs = he_dev->r1_numrows *
1258                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1259         if (he_dev->r1_numbuffs > 2560)
1260                 he_dev->r1_numbuffs = 2560;
1261
1262         he_dev->tx_numbuffs = he_dev->tx_numrows *
1263                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1264         if (he_dev->tx_numbuffs > 5120)
1265                 he_dev->tx_numbuffs = 5120;
1266
1267         /* 5.1.2 configure hardware dependent registers */
1268
1269         he_writel(he_dev, 
1270                 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1271                 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1272                 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1273                 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1274                                                                 LBARB);
1275
1276         he_writel(he_dev, BANK_ON |
1277                 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1278                                                                 SDRAMCON);
1279
1280         he_writel(he_dev,
1281                 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1282                                                 RM_RW_WAIT(1), RCMCONFIG);
1283         he_writel(he_dev,
1284                 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1285                                                 TM_RW_WAIT(1), TCMCONFIG);
1286
1287         he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1288
1289         he_writel(he_dev, 
1290                 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1291                 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1292                 RX_VALVP(he_dev->vpibits) |
1293                 RX_VALVC(he_dev->vcibits),                       RC_CONFIG);
1294
1295         he_writel(he_dev, DRF_THRESH(0x20) |
1296                 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1297                 TX_VCI_MASK(he_dev->vcibits) |
1298                 LBFREE_CNT(he_dev->tx_numbuffs),                TX_CONFIG);
1299
1300         he_writel(he_dev, 0x0, TXAAL5_PROTO);
1301
1302         he_writel(he_dev, PHY_INT_ENB |
1303                 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1304                                                                 RH_CONFIG);
1305
1306         /* 5.1.3 initialize connection memory */
1307
1308         for (i = 0; i < TCM_MEM_SIZE; ++i)
1309                 he_writel_tcm(he_dev, 0, i);
1310
1311         for (i = 0; i < RCM_MEM_SIZE; ++i)
1312                 he_writel_rcm(he_dev, 0, i);
1313
1314         /*
1315          *      transmit connection memory map
1316          *
1317          *                  tx memory
1318          *          0x0 ___________________
1319          *             |                   |
1320          *             |                   |
1321          *             |       TSRa        |
1322          *             |                   |
1323          *             |                   |
1324          *       0x8000|___________________|
1325          *             |                   |
1326          *             |       TSRb        |
1327          *       0xc000|___________________|
1328          *             |                   |
1329          *             |       TSRc        |
1330          *       0xe000|___________________|
1331          *             |       TSRd        |
1332          *       0xf000|___________________|
1333          *             |       tmABR       |
1334          *      0x10000|___________________|
1335          *             |                   |
1336          *             |       tmTPD       |
1337          *             |___________________|
1338          *             |                   |
1339          *                      ....
1340          *      0x1ffff|___________________|
1341          *
1342          *
1343          */
1344
1345         he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1346         he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1347         he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1348         he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1349         he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1350
1351
1352         /*
1353          *      receive connection memory map
1354          *
1355          *          0x0 ___________________
1356          *             |                   |
1357          *             |                   |
1358          *             |       RSRa        |
1359          *             |                   |
1360          *             |                   |
1361          *       0x8000|___________________|
1362          *             |                   |
1363          *             |             rx0/1 |
1364          *             |       LBM         |   link lists of local
1365          *             |             tx    |   buffer memory 
1366          *             |                   |
1367          *       0xd000|___________________|
1368          *             |                   |
1369          *             |      rmABR        |
1370          *       0xe000|___________________|
1371          *             |                   |
1372          *             |       RSRb        |
1373          *             |___________________|
1374          *             |                   |
1375          *                      ....
1376          *       0xffff|___________________|
1377          */
1378
1379         he_writel(he_dev, 0x08000, RCMLBM_BA);
1380         he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1381         he_writel(he_dev, 0x0d800, RCMABR_BA);
1382
1383         /* 5.1.4 initialize local buffer free pools linked lists */
1384
1385         he_init_rx_lbfp0(he_dev);
1386         he_init_rx_lbfp1(he_dev);
1387
1388         he_writel(he_dev, 0x0, RLBC_H);
1389         he_writel(he_dev, 0x0, RLBC_T);
1390         he_writel(he_dev, 0x0, RLBC_H2);
1391
1392         he_writel(he_dev, 512, RXTHRSH);        /* 10% of r0+r1 buffers */
1393         he_writel(he_dev, 256, LITHRSH);        /* 5% of r0+r1 buffers */
1394
1395         he_init_tx_lbfp(he_dev);
1396
1397         he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1398
1399         /* 5.1.5 initialize intermediate receive queues */
1400
1401         if (he_is622(he_dev)) {
1402                 he_writel(he_dev, 0x000f, G0_INMQ_S);
1403                 he_writel(he_dev, 0x200f, G0_INMQ_L);
1404
1405                 he_writel(he_dev, 0x001f, G1_INMQ_S);
1406                 he_writel(he_dev, 0x201f, G1_INMQ_L);
1407
1408                 he_writel(he_dev, 0x002f, G2_INMQ_S);
1409                 he_writel(he_dev, 0x202f, G2_INMQ_L);
1410
1411                 he_writel(he_dev, 0x003f, G3_INMQ_S);
1412                 he_writel(he_dev, 0x203f, G3_INMQ_L);
1413
1414                 he_writel(he_dev, 0x004f, G4_INMQ_S);
1415                 he_writel(he_dev, 0x204f, G4_INMQ_L);
1416
1417                 he_writel(he_dev, 0x005f, G5_INMQ_S);
1418                 he_writel(he_dev, 0x205f, G5_INMQ_L);
1419
1420                 he_writel(he_dev, 0x006f, G6_INMQ_S);
1421                 he_writel(he_dev, 0x206f, G6_INMQ_L);
1422
1423                 he_writel(he_dev, 0x007f, G7_INMQ_S);
1424                 he_writel(he_dev, 0x207f, G7_INMQ_L);
1425         } else {
1426                 he_writel(he_dev, 0x0000, G0_INMQ_S);
1427                 he_writel(he_dev, 0x0008, G0_INMQ_L);
1428
1429                 he_writel(he_dev, 0x0001, G1_INMQ_S);
1430                 he_writel(he_dev, 0x0009, G1_INMQ_L);
1431
1432                 he_writel(he_dev, 0x0002, G2_INMQ_S);
1433                 he_writel(he_dev, 0x000a, G2_INMQ_L);
1434
1435                 he_writel(he_dev, 0x0003, G3_INMQ_S);
1436                 he_writel(he_dev, 0x000b, G3_INMQ_L);
1437
1438                 he_writel(he_dev, 0x0004, G4_INMQ_S);
1439                 he_writel(he_dev, 0x000c, G4_INMQ_L);
1440
1441                 he_writel(he_dev, 0x0005, G5_INMQ_S);
1442                 he_writel(he_dev, 0x000d, G5_INMQ_L);
1443
1444                 he_writel(he_dev, 0x0006, G6_INMQ_S);
1445                 he_writel(he_dev, 0x000e, G6_INMQ_L);
1446
1447                 he_writel(he_dev, 0x0007, G7_INMQ_S);
1448                 he_writel(he_dev, 0x000f, G7_INMQ_L);
1449         }
1450
1451         /* 5.1.6 application tunable parameters */
1452
1453         he_writel(he_dev, 0x0, MCC);
1454         he_writel(he_dev, 0x0, OEC);
1455         he_writel(he_dev, 0x0, DCC);
1456         he_writel(he_dev, 0x0, CEC);
1457         
1458         /* 5.1.7 cs block initialization */
1459
1460         he_init_cs_block(he_dev);
1461
1462         /* 5.1.8 cs block connection memory initialization */
1463         
1464         if (he_init_cs_block_rcm(he_dev) < 0)
1465                 return -ENOMEM;
1466
1467         /* 5.1.10 initialize host structures */
1468
1469         he_init_tpdrq(he_dev);
1470
1471 #ifdef USE_TPD_POOL
1472         he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1473                 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1474         if (he_dev->tpd_pool == NULL) {
1475                 hprintk("unable to create tpd pci_pool\n");
1476                 return -ENOMEM;         
1477         }
1478
1479         INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1480 #else
1481         he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1482                         CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1483         if (!he_dev->tpd_base)
1484                 return -ENOMEM;
1485
1486         for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1487                 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1488                 he_dev->tpd_base[i].inuse = 0;
1489         }
1490                 
1491         he_dev->tpd_head = he_dev->tpd_base;
1492         he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1493 #endif
1494
1495         if (he_init_group(he_dev, 0) != 0)
1496                 return -ENOMEM;
1497
1498         for (group = 1; group < HE_NUM_GROUPS; ++group) {
1499                 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1500                 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1501                 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1502                 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1503                                                 G0_RBPS_BS + (group * 32));
1504
1505                 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1506                 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1507                 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1508                                                 G0_RBPL_QI + (group * 32));
1509                 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1510
1511                 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1512                 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1513                 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1514                                                 G0_RBRQ_Q + (group * 16));
1515                 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1516
1517                 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1518                 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1519                 he_writel(he_dev, TBRQ_THRESH(0x1),
1520                                                 G0_TBRQ_THRESH + (group * 16));
1521                 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1522         }
1523
1524         /* host status page */
1525
1526         he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1527                                 sizeof(struct he_hsp), &he_dev->hsp_phys);
1528         if (he_dev->hsp == NULL) {
1529                 hprintk("failed to allocate host status page\n");
1530                 return -ENOMEM;
1531         }
1532         memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1533         he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1534
1535         /* initialize framer */
1536
1537 #ifdef CONFIG_ATM_HE_USE_SUNI
1538         suni_init(he_dev->atm_dev);
1539         if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1540                 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1541 #endif /* CONFIG_ATM_HE_USE_SUNI */
1542
1543         if (sdh) {
1544                 /* this really should be in suni.c but for now... */
1545                 int val;
1546
1547                 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1548                 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1549                 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1550         }
1551
1552         /* 5.1.12 enable transmit and receive */
1553
1554         reg = he_readl_mbox(he_dev, CS_ERCTL0);
1555         reg |= TX_ENABLE|ER_ENABLE;
1556         he_writel_mbox(he_dev, reg, CS_ERCTL0);
1557
1558         reg = he_readl(he_dev, RC_CONFIG);
1559         reg |= RX_ENABLE;
1560         he_writel(he_dev, reg, RC_CONFIG);
1561
1562         for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1563                 he_dev->cs_stper[i].inuse = 0;
1564                 he_dev->cs_stper[i].pcr = -1;
1565         }
1566         he_dev->total_bw = 0;
1567
1568
1569         /* atm linux initialization */
1570
1571         he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1572         he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1573
1574         he_dev->irq_peak = 0;
1575         he_dev->rbrq_peak = 0;
1576         he_dev->rbpl_peak = 0;
1577         he_dev->tbrq_peak = 0;
1578
1579         HPRINTK("hell bent for leather!\n");
1580
1581         return 0;
1582 }
1583
1584 static void
1585 he_stop(struct he_dev *he_dev)
1586 {
1587         u16 command;
1588         u32 gen_cntl_0, reg;
1589         struct pci_dev *pci_dev;
1590
1591         pci_dev = he_dev->pci_dev;
1592
1593         /* disable interrupts */
1594
1595         if (he_dev->membase) {
1596                 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1597                 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1598                 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1599
1600 #ifdef USE_TASKLET
1601                 tasklet_disable(&he_dev->tasklet);
1602 #endif
1603
1604                 /* disable recv and transmit */
1605
1606                 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1607                 reg &= ~(TX_ENABLE|ER_ENABLE);
1608                 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1609
1610                 reg = he_readl(he_dev, RC_CONFIG);
1611                 reg &= ~(RX_ENABLE);
1612                 he_writel(he_dev, reg, RC_CONFIG);
1613         }
1614
1615 #ifdef CONFIG_ATM_HE_USE_SUNI
1616         if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1617                 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1618 #endif /* CONFIG_ATM_HE_USE_SUNI */
1619
1620         if (he_dev->irq)
1621                 free_irq(he_dev->irq, he_dev);
1622
1623         if (he_dev->irq_base)
1624                 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1625                         * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1626
1627         if (he_dev->hsp)
1628                 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1629                                                 he_dev->hsp, he_dev->hsp_phys);
1630
1631         if (he_dev->rbpl_base) {
1632 #ifdef USE_RBPL_POOL
1633                 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1634                         void *cpuaddr = he_dev->rbpl_virt[i].virt;
1635                         dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1636
1637                         pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1638                 }
1639 #else
1640                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1641                         * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1642 #endif
1643                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1644                         * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1645         }
1646
1647 #ifdef USE_RBPL_POOL
1648         if (he_dev->rbpl_pool)
1649                 pci_pool_destroy(he_dev->rbpl_pool);
1650 #endif
1651
1652 #ifdef USE_RBPS
1653         if (he_dev->rbps_base) {
1654 #ifdef USE_RBPS_POOL
1655                 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1656                         void *cpuaddr = he_dev->rbps_virt[i].virt;
1657                         dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1658
1659                         pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1660                 }
1661 #else
1662                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1663                         * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1664 #endif
1665                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1666                         * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1667         }
1668
1669 #ifdef USE_RBPS_POOL
1670         if (he_dev->rbps_pool)
1671                 pci_pool_destroy(he_dev->rbps_pool);
1672 #endif
1673
1674 #endif /* USE_RBPS */
1675
1676         if (he_dev->rbrq_base)
1677                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1678                                                         he_dev->rbrq_base, he_dev->rbrq_phys);
1679
1680         if (he_dev->tbrq_base)
1681                 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1682                                                         he_dev->tbrq_base, he_dev->tbrq_phys);
1683
1684         if (he_dev->tpdrq_base)
1685                 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1686                                                         he_dev->tpdrq_base, he_dev->tpdrq_phys);
1687
1688 #ifdef USE_TPD_POOL
1689         if (he_dev->tpd_pool)
1690                 pci_pool_destroy(he_dev->tpd_pool);
1691 #else
1692         if (he_dev->tpd_base)
1693                 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1694                                                         he_dev->tpd_base, he_dev->tpd_base_phys);
1695 #endif
1696
1697         if (he_dev->pci_dev) {
1698                 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1699                 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1700                 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1701         }
1702         
1703         if (he_dev->membase)
1704                 iounmap((void *) he_dev->membase);
1705 }
1706
1707 static struct he_tpd *
1708 __alloc_tpd(struct he_dev *he_dev)
1709 {
1710 #ifdef USE_TPD_POOL
1711         struct he_tpd *tpd;
1712         dma_addr_t dma_handle; 
1713
1714         tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);              
1715         if (tpd == NULL)
1716                 return NULL;
1717                         
1718         tpd->status = TPD_ADDR(dma_handle);
1719         tpd->reserved = 0; 
1720         tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1721         tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1722         tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1723
1724         return tpd;
1725 #else
1726         int i;
1727
1728         for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1729                 ++he_dev->tpd_head;
1730                 if (he_dev->tpd_head > he_dev->tpd_end) {
1731                         he_dev->tpd_head = he_dev->tpd_base;
1732                 }
1733
1734                 if (!he_dev->tpd_head->inuse) {
1735                         he_dev->tpd_head->inuse = 1;
1736                         he_dev->tpd_head->status &= TPD_MASK;
1737                         he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1738                         he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1739                         he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1740                         return he_dev->tpd_head;
1741                 }
1742         }
1743         hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1744         return NULL;
1745 #endif
1746 }
1747
1748 #define AAL5_LEN(buf,len)                                               \
1749                         ((((unsigned char *)(buf))[(len)-6] << 8) |     \
1750                                 (((unsigned char *)(buf))[(len)-5]))
1751
1752 /* 2.10.1.2 receive
1753  *
1754  * aal5 packets can optionally return the tcp checksum in the lower
1755  * 16 bits of the crc (RSR0_TCP_CKSUM)
1756  */
1757
1758 #define TCP_CKSUM(buf,len)                                              \
1759                         ((((unsigned char *)(buf))[(len)-2] << 8) |     \
1760                                 (((unsigned char *)(buf))[(len-1)]))
1761
1762 static int
1763 he_service_rbrq(struct he_dev *he_dev, int group)
1764 {
1765         struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1766                                 ((unsigned long)he_dev->rbrq_base |
1767                                         he_dev->hsp->group[group].rbrq_tail);
1768         struct he_rbp *rbp = NULL;
1769         unsigned cid, lastcid = -1;
1770         unsigned buf_len = 0;
1771         struct sk_buff *skb;
1772         struct atm_vcc *vcc = NULL;
1773         struct he_vcc *he_vcc;
1774         struct he_iovec *iov;
1775         int pdus_assembled = 0;
1776         int updated = 0;
1777
1778         read_lock(&vcc_sklist_lock);
1779         while (he_dev->rbrq_head != rbrq_tail) {
1780                 ++updated;
1781
1782                 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1783                         he_dev->rbrq_head, group,
1784                         RBRQ_ADDR(he_dev->rbrq_head),
1785                         RBRQ_BUFLEN(he_dev->rbrq_head),
1786                         RBRQ_CID(he_dev->rbrq_head),
1787                         RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1788                         RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1789                         RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1790                         RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1791                         RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1792                         RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1793
1794 #ifdef USE_RBPS
1795                 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1796                         rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1797                 else
1798 #endif
1799                         rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1800                 
1801                 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1802                 cid = RBRQ_CID(he_dev->rbrq_head);
1803
1804                 if (cid != lastcid)
1805                         vcc = __find_vcc(he_dev, cid);
1806                 lastcid = cid;
1807
1808                 if (vcc == NULL) {
1809                         hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1810                         if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1811                                         rbp->status &= ~RBP_LOANED;
1812                                         
1813                         goto next_rbrq_entry;
1814                 }
1815
1816                 he_vcc = HE_VCC(vcc);
1817                 if (he_vcc == NULL) {
1818                         hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1819                         if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1820                                         rbp->status &= ~RBP_LOANED;
1821                         goto next_rbrq_entry;
1822                 }
1823
1824                 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1825                         hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1826                                 atomic_inc(&vcc->stats->rx_drop);
1827                         goto return_host_buffers;
1828                 }
1829
1830                 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1831                 he_vcc->iov_tail->iov_len = buf_len;
1832                 he_vcc->pdu_len += buf_len;
1833                 ++he_vcc->iov_tail;
1834
1835                 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1836                         lastcid = -1;
1837                         HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1838                         wake_up(&he_vcc->rx_waitq);
1839                         goto return_host_buffers;
1840                 }
1841
1842 #ifdef notdef
1843                 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1844                         hprintk("iovec full!  cid 0x%x\n", cid);
1845                         goto return_host_buffers;
1846                 }
1847 #endif
1848                 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1849                         goto next_rbrq_entry;
1850
1851                 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1852                                 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1853                         HPRINTK("%s%s (%d.%d)\n",
1854                                 RBRQ_CRC_ERR(he_dev->rbrq_head)
1855                                                         ? "CRC_ERR " : "",
1856                                 RBRQ_LEN_ERR(he_dev->rbrq_head)
1857                                                         ? "LEN_ERR" : "",
1858                                                         vcc->vpi, vcc->vci);
1859                         atomic_inc(&vcc->stats->rx_err);
1860                         goto return_host_buffers;
1861                 }
1862
1863                 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1864                                                         GFP_ATOMIC);
1865                 if (!skb) {
1866                         HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1867                         goto return_host_buffers;
1868                 }
1869
1870                 if (rx_skb_reserve > 0)
1871                         skb_reserve(skb, rx_skb_reserve);
1872
1873                 do_gettimeofday(&skb->stamp);
1874
1875                 for (iov = he_vcc->iov_head;
1876                                 iov < he_vcc->iov_tail; ++iov) {
1877 #ifdef USE_RBPS
1878                         if (iov->iov_base & RBP_SMALLBUF)
1879                                 memcpy(skb_put(skb, iov->iov_len),
1880                                         he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1881                         else
1882 #endif
1883                                 memcpy(skb_put(skb, iov->iov_len),
1884                                         he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1885                 }
1886
1887                 switch (vcc->qos.aal) {
1888                         case ATM_AAL0:
1889                                 /* 2.10.1.5 raw cell receive */
1890                                 skb->len = ATM_AAL0_SDU;
1891                                 skb->tail = skb->data + skb->len;
1892                                 break;
1893                         case ATM_AAL5:
1894                                 /* 2.10.1.2 aal5 receive */
1895
1896                                 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1897                                 skb->tail = skb->data + skb->len;
1898 #ifdef USE_CHECKSUM_HW
1899                                 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1900                                         skb->ip_summed = CHECKSUM_HW;
1901                                         skb->csum = TCP_CKSUM(skb->data,
1902                                                         he_vcc->pdu_len);
1903                                 }
1904 #endif
1905                                 break;
1906                 }
1907
1908 #ifdef should_never_happen
1909                 if (skb->len > vcc->qos.rxtp.max_sdu)
1910                         hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1911 #endif
1912
1913 #ifdef notdef
1914                 ATM_SKB(skb)->vcc = vcc;
1915 #endif
1916                 vcc->push(vcc, skb);
1917
1918                 atomic_inc(&vcc->stats->rx);
1919
1920 return_host_buffers:
1921                 ++pdus_assembled;
1922
1923                 for (iov = he_vcc->iov_head;
1924                                 iov < he_vcc->iov_tail; ++iov) {
1925 #ifdef USE_RBPS
1926                         if (iov->iov_base & RBP_SMALLBUF)
1927                                 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1928                         else
1929 #endif
1930                                 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1931
1932                         rbp->status &= ~RBP_LOANED;
1933                 }
1934
1935                 he_vcc->iov_tail = he_vcc->iov_head;
1936                 he_vcc->pdu_len = 0;
1937
1938 next_rbrq_entry:
1939                 he_dev->rbrq_head = (struct he_rbrq *)
1940                                 ((unsigned long) he_dev->rbrq_base |
1941                                         RBRQ_MASK(++he_dev->rbrq_head));
1942
1943         }
1944         read_unlock(&vcc_sklist_lock);
1945
1946         if (updated) {
1947                 if (updated > he_dev->rbrq_peak)
1948                         he_dev->rbrq_peak = updated;
1949
1950                 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1951                                                 G0_RBRQ_H + (group * 16));
1952         }
1953
1954         return pdus_assembled;
1955 }
1956
1957 static void
1958 he_service_tbrq(struct he_dev *he_dev, int group)
1959 {
1960         struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1961                                 ((unsigned long)he_dev->tbrq_base |
1962                                         he_dev->hsp->group[group].tbrq_tail);
1963         struct he_tpd *tpd;
1964         int slot, updated = 0;
1965 #ifdef USE_TPD_POOL
1966         struct he_tpd *__tpd;
1967 #endif
1968
1969         /* 2.1.6 transmit buffer return queue */
1970
1971         while (he_dev->tbrq_head != tbrq_tail) {
1972                 ++updated;
1973
1974                 HPRINTK("tbrq%d 0x%x%s%s\n",
1975                         group,
1976                         TBRQ_TPD(he_dev->tbrq_head), 
1977                         TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1978                         TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1979 #ifdef USE_TPD_POOL
1980                 tpd = NULL;
1981                 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1982                         if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1983                                 tpd = __tpd;
1984                                 list_del(&__tpd->entry);
1985                                 break;
1986                         }
1987                 }
1988
1989                 if (tpd == NULL) {
1990                         hprintk("unable to locate tpd for dma buffer %x\n",
1991                                                 TBRQ_TPD(he_dev->tbrq_head));
1992                         goto next_tbrq_entry;
1993                 }
1994 #else
1995                 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
1996 #endif
1997
1998                 if (TBRQ_EOS(he_dev->tbrq_head)) {
1999                         HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2000                                 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2001                         if (tpd->vcc)
2002                                 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2003
2004                         goto next_tbrq_entry;
2005                 }
2006
2007                 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2008                         if (tpd->iovec[slot].addr)
2009                                 pci_unmap_single(he_dev->pci_dev,
2010                                         tpd->iovec[slot].addr,
2011                                         tpd->iovec[slot].len & TPD_LEN_MASK,
2012                                                         PCI_DMA_TODEVICE);
2013                         if (tpd->iovec[slot].len & TPD_LST)
2014                                 break;
2015                                 
2016                 }
2017
2018                 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2019                         if (tpd->vcc && tpd->vcc->pop)
2020                                 tpd->vcc->pop(tpd->vcc, tpd->skb);
2021                         else
2022                                 dev_kfree_skb_any(tpd->skb);
2023                 }
2024
2025 next_tbrq_entry:
2026 #ifdef USE_TPD_POOL
2027                 if (tpd)
2028                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2029 #else
2030                 tpd->inuse = 0;
2031 #endif
2032                 he_dev->tbrq_head = (struct he_tbrq *)
2033                                 ((unsigned long) he_dev->tbrq_base |
2034                                         TBRQ_MASK(++he_dev->tbrq_head));
2035         }
2036
2037         if (updated) {
2038                 if (updated > he_dev->tbrq_peak)
2039                         he_dev->tbrq_peak = updated;
2040
2041                 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2042                                                 G0_TBRQ_H + (group * 16));
2043         }
2044 }
2045
2046
2047 static void
2048 he_service_rbpl(struct he_dev *he_dev, int group)
2049 {
2050         struct he_rbp *newtail;
2051         struct he_rbp *rbpl_head;
2052         int moved = 0;
2053
2054         rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2055                                         RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2056
2057         for (;;) {
2058                 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2059                                                 RBPL_MASK(he_dev->rbpl_tail+1));
2060
2061                 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2062                 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2063                         break;
2064
2065                 newtail->status |= RBP_LOANED;
2066                 he_dev->rbpl_tail = newtail;
2067                 ++moved;
2068         } 
2069
2070         if (moved)
2071                 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2072 }
2073
2074 #ifdef USE_RBPS
2075 static void
2076 he_service_rbps(struct he_dev *he_dev, int group)
2077 {
2078         struct he_rbp *newtail;
2079         struct he_rbp *rbps_head;
2080         int moved = 0;
2081
2082         rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2083                                         RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2084
2085         for (;;) {
2086                 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2087                                                 RBPS_MASK(he_dev->rbps_tail+1));
2088
2089                 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2090                 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2091                         break;
2092
2093                 newtail->status |= RBP_LOANED;
2094                 he_dev->rbps_tail = newtail;
2095                 ++moved;
2096         } 
2097
2098         if (moved)
2099                 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2100 }
2101 #endif /* USE_RBPS */
2102
2103 static void
2104 he_tasklet(unsigned long data)
2105 {
2106         unsigned long flags;
2107         struct he_dev *he_dev = (struct he_dev *) data;
2108         int group, type;
2109         int updated = 0;
2110
2111         HPRINTK("tasklet (0x%lx)\n", data);
2112 #ifdef USE_TASKLET
2113         spin_lock_irqsave(&he_dev->global_lock, flags);
2114 #endif
2115
2116         while (he_dev->irq_head != he_dev->irq_tail) {
2117                 ++updated;
2118
2119                 type = ITYPE_TYPE(he_dev->irq_head->isw);
2120                 group = ITYPE_GROUP(he_dev->irq_head->isw);
2121
2122                 switch (type) {
2123                         case ITYPE_RBRQ_THRESH:
2124                                 HPRINTK("rbrq%d threshold\n", group);
2125                                 /* fall through */
2126                         case ITYPE_RBRQ_TIMER:
2127                                 if (he_service_rbrq(he_dev, group)) {
2128                                         he_service_rbpl(he_dev, group);
2129 #ifdef USE_RBPS
2130                                         he_service_rbps(he_dev, group);
2131 #endif /* USE_RBPS */
2132                                 }
2133                                 break;
2134                         case ITYPE_TBRQ_THRESH:
2135                                 HPRINTK("tbrq%d threshold\n", group);
2136                                 /* fall through */
2137                         case ITYPE_TPD_COMPLETE:
2138                                 he_service_tbrq(he_dev, group);
2139                                 break;
2140                         case ITYPE_RBPL_THRESH:
2141                                 he_service_rbpl(he_dev, group);
2142                                 break;
2143                         case ITYPE_RBPS_THRESH:
2144 #ifdef USE_RBPS
2145                                 he_service_rbps(he_dev, group);
2146 #endif /* USE_RBPS */
2147                                 break;
2148                         case ITYPE_PHY:
2149                                 HPRINTK("phy interrupt\n");
2150 #ifdef CONFIG_ATM_HE_USE_SUNI
2151                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2152                                 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2153                                         he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2154                                 spin_lock_irqsave(&he_dev->global_lock, flags);
2155 #endif
2156                                 break;
2157                         case ITYPE_OTHER:
2158                                 switch (type|group) {
2159                                         case ITYPE_PARITY:
2160                                                 hprintk("parity error\n");
2161                                                 break;
2162                                         case ITYPE_ABORT:
2163                                                 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2164                                                 break;
2165                                 }
2166                                 break;
2167                         case ITYPE_TYPE(ITYPE_INVALID):
2168                                 /* see 8.1.1 -- check all queues */
2169
2170                                 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2171
2172                                 he_service_rbrq(he_dev, 0);
2173                                 he_service_rbpl(he_dev, 0);
2174 #ifdef USE_RBPS
2175                                 he_service_rbps(he_dev, 0);
2176 #endif /* USE_RBPS */
2177                                 he_service_tbrq(he_dev, 0);
2178                                 break;
2179                         default:
2180                                 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2181                 }
2182
2183                 he_dev->irq_head->isw = ITYPE_INVALID;
2184
2185                 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2186         }
2187
2188         if (updated) {
2189                 if (updated > he_dev->irq_peak)
2190                         he_dev->irq_peak = updated;
2191
2192                 he_writel(he_dev,
2193                         IRQ_SIZE(CONFIG_IRQ_SIZE) |
2194                         IRQ_THRESH(CONFIG_IRQ_THRESH) |
2195                         IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2196                 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2197         }
2198 #ifdef USE_TASKLET
2199         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2200 #endif
2201 }
2202
2203 static irqreturn_t
2204 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2205 {
2206         unsigned long flags;
2207         struct he_dev *he_dev = (struct he_dev * )dev_id;
2208         int handled = 0;
2209
2210         if (he_dev == NULL)
2211                 return IRQ_NONE;
2212
2213         spin_lock_irqsave(&he_dev->global_lock, flags);
2214
2215         he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2216                                                 (*he_dev->irq_tailoffset << 2));
2217
2218         if (he_dev->irq_tail == he_dev->irq_head) {
2219                 HPRINTK("tailoffset not updated?\n");
2220                 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2221                         ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2222                 (void) he_readl(he_dev, INT_FIFO);      /* 8.1.2 controller errata */
2223         }
2224
2225 #ifdef DEBUG
2226         if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2227                 hprintk("spurious (or shared) interrupt?\n");
2228 #endif
2229
2230         if (he_dev->irq_head != he_dev->irq_tail) {
2231                 handled = 1;
2232 #ifdef USE_TASKLET
2233                 tasklet_schedule(&he_dev->tasklet);
2234 #else
2235                 he_tasklet((unsigned long) he_dev);
2236 #endif
2237                 he_writel(he_dev, INT_CLEAR_A, INT_FIFO);       /* clear interrupt */
2238                 (void) he_readl(he_dev, INT_FIFO);              /* flush posted writes */
2239         }
2240         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2241         return IRQ_RETVAL(handled);
2242
2243 }
2244
2245 static __inline__ void
2246 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2247 {
2248         struct he_tpdrq *new_tail;
2249
2250         HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2251                                         tpd, cid, he_dev->tpdrq_tail);
2252
2253         /* new_tail = he_dev->tpdrq_tail; */
2254         new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2255                                         TPDRQ_MASK(he_dev->tpdrq_tail+1));
2256
2257         /*
2258          * check to see if we are about to set the tail == head
2259          * if true, update the head pointer from the adapter
2260          * to see if this is really the case (reading the queue
2261          * head for every enqueue would be unnecessarily slow)
2262          */
2263
2264         if (new_tail == he_dev->tpdrq_head) {
2265                 he_dev->tpdrq_head = (struct he_tpdrq *)
2266                         (((unsigned long)he_dev->tpdrq_base) |
2267                                 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2268
2269                 if (new_tail == he_dev->tpdrq_head) {
2270                         hprintk("tpdrq full (cid 0x%x)\n", cid);
2271                         /*
2272                          * FIXME
2273                          * push tpd onto a transmit backlog queue
2274                          * after service_tbrq, service the backlog
2275                          * for now, we just drop the pdu
2276                          */
2277                         if (tpd->skb) {
2278                                 if (tpd->vcc->pop)
2279                                         tpd->vcc->pop(tpd->vcc, tpd->skb);
2280                                 else
2281                                         dev_kfree_skb_any(tpd->skb);
2282                                 atomic_inc(&tpd->vcc->stats->tx_err);
2283                         }
2284 #ifdef USE_TPD_POOL
2285                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2286 #else
2287                         tpd->inuse = 0;
2288 #endif
2289                         return;
2290                 }
2291         }
2292
2293         /* 2.1.5 transmit packet descriptor ready queue */
2294 #ifdef USE_TPD_POOL
2295         list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2296         he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2297 #else
2298         he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2299                                 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2300 #endif
2301         he_dev->tpdrq_tail->cid = cid;
2302         wmb();
2303
2304         he_dev->tpdrq_tail = new_tail;
2305
2306         he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2307         (void) he_readl(he_dev, TPDRQ_T);               /* flush posted writes */
2308 }
2309
2310 static int
2311 he_open(struct atm_vcc *vcc)
2312 {
2313         unsigned long flags;
2314         struct he_dev *he_dev = HE_DEV(vcc->dev);
2315         struct he_vcc *he_vcc;
2316         int err = 0;
2317         unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2318         short vpi = vcc->vpi;
2319         int vci = vcc->vci;
2320
2321         if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2322                 return 0;
2323
2324         HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2325
2326         set_bit(ATM_VF_ADDR, &vcc->flags);
2327
2328         cid = he_mkcid(he_dev, vpi, vci);
2329
2330         he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2331         if (he_vcc == NULL) {
2332                 hprintk("unable to allocate he_vcc during open\n");
2333                 return -ENOMEM;
2334         }
2335
2336         he_vcc->iov_tail = he_vcc->iov_head;
2337         he_vcc->pdu_len = 0;
2338         he_vcc->rc_index = -1;
2339
2340         init_waitqueue_head(&he_vcc->rx_waitq);
2341         init_waitqueue_head(&he_vcc->tx_waitq);
2342
2343         vcc->dev_data = he_vcc;
2344
2345         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2346                 int pcr_goal;
2347
2348                 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2349                 if (pcr_goal == 0)
2350                         pcr_goal = he_dev->atm_dev->link_rate;
2351                 if (pcr_goal < 0)       /* means round down, technically */
2352                         pcr_goal = -pcr_goal;
2353
2354                 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2355
2356                 switch (vcc->qos.aal) {
2357                         case ATM_AAL5:
2358                                 tsr0_aal = TSR0_AAL5;
2359                                 tsr4 = TSR4_AAL5;
2360                                 break;
2361                         case ATM_AAL0:
2362                                 tsr0_aal = TSR0_AAL0_SDU;
2363                                 tsr4 = TSR4_AAL0_SDU;
2364                                 break;
2365                         default:
2366                                 err = -EINVAL;
2367                                 goto open_failed;
2368                 }
2369
2370                 spin_lock_irqsave(&he_dev->global_lock, flags);
2371                 tsr0 = he_readl_tsr0(he_dev, cid);
2372                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2373
2374                 if (TSR0_CONN_STATE(tsr0) != 0) {
2375                         hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2376                         err = -EBUSY;
2377                         goto open_failed;
2378                 }
2379
2380                 switch (vcc->qos.txtp.traffic_class) {
2381                         case ATM_UBR:
2382                                 /* 2.3.3.1 open connection ubr */
2383
2384                                 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2385                                         TSR0_USE_WMIN | TSR0_UPDATE_GER;
2386                                 break;
2387
2388                         case ATM_CBR:
2389                                 /* 2.3.3.2 open connection cbr */
2390
2391                                 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2392                                 if ((he_dev->total_bw + pcr_goal)
2393                                         > (he_dev->atm_dev->link_rate * 9 / 10))
2394                                 {
2395                                         err = -EBUSY;
2396                                         goto open_failed;
2397                                 }
2398
2399                                 spin_lock_irqsave(&he_dev->global_lock, flags);                 /* also protects he_dev->cs_stper[] */
2400
2401                                 /* find an unused cs_stper register */
2402                                 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2403                                         if (he_dev->cs_stper[reg].inuse == 0 || 
2404                                             he_dev->cs_stper[reg].pcr == pcr_goal)
2405                                                         break;
2406
2407                                 if (reg == HE_NUM_CS_STPER) {
2408                                         err = -EBUSY;
2409                                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2410                                         goto open_failed;
2411                                 }
2412
2413                                 he_dev->total_bw += pcr_goal;
2414
2415                                 he_vcc->rc_index = reg;
2416                                 ++he_dev->cs_stper[reg].inuse;
2417                                 he_dev->cs_stper[reg].pcr = pcr_goal;
2418
2419                                 clock = he_is622(he_dev) ? 66667000 : 50000000;
2420                                 period = clock / pcr_goal;
2421                                 
2422                                 HPRINTK("rc_index = %d period = %d\n",
2423                                                                 reg, period);
2424
2425                                 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2426                                                         CS_STPER0 + reg);
2427                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2428
2429                                 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2430                                                         TSR0_RC_INDEX(reg);
2431
2432                                 break;
2433                         default:
2434                                 err = -EINVAL;
2435                                 goto open_failed;
2436                 }
2437
2438                 spin_lock_irqsave(&he_dev->global_lock, flags);
2439
2440                 he_writel_tsr0(he_dev, tsr0, cid);
2441                 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2442                 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2443                                         TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2444                 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2445                 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2446
2447                 he_writel_tsr3(he_dev, 0x0, cid);
2448                 he_writel_tsr5(he_dev, 0x0, cid);
2449                 he_writel_tsr6(he_dev, 0x0, cid);
2450                 he_writel_tsr7(he_dev, 0x0, cid);
2451                 he_writel_tsr8(he_dev, 0x0, cid);
2452                 he_writel_tsr10(he_dev, 0x0, cid);
2453                 he_writel_tsr11(he_dev, 0x0, cid);
2454                 he_writel_tsr12(he_dev, 0x0, cid);
2455                 he_writel_tsr13(he_dev, 0x0, cid);
2456                 he_writel_tsr14(he_dev, 0x0, cid);
2457                 (void) he_readl_tsr0(he_dev, cid);              /* flush posted writes */
2458                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2459         }
2460
2461         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2462                 unsigned aal;
2463
2464                 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2465                                                 &HE_VCC(vcc)->rx_waitq);
2466
2467                 switch (vcc->qos.aal) {
2468                         case ATM_AAL5:
2469                                 aal = RSR0_AAL5;
2470                                 break;
2471                         case ATM_AAL0:
2472                                 aal = RSR0_RAWCELL;
2473                                 break;
2474                         default:
2475                                 err = -EINVAL;
2476                                 goto open_failed;
2477                 }
2478
2479                 spin_lock_irqsave(&he_dev->global_lock, flags);
2480
2481                 rsr0 = he_readl_rsr0(he_dev, cid);
2482                 if (rsr0 & RSR0_OPEN_CONN) {
2483                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2484
2485                         hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2486                         err = -EBUSY;
2487                         goto open_failed;
2488                 }
2489
2490 #ifdef USE_RBPS
2491                 rsr1 = RSR1_GROUP(0);
2492                 rsr4 = RSR4_GROUP(0);
2493 #else /* !USE_RBPS */
2494                 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2495                 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2496 #endif /* USE_RBPS */
2497                 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2498                                 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2499
2500 #ifdef USE_CHECKSUM_HW
2501                 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2502                         rsr0 |= RSR0_TCP_CKSUM;
2503 #endif
2504
2505                 he_writel_rsr4(he_dev, rsr4, cid);
2506                 he_writel_rsr1(he_dev, rsr1, cid);
2507                 /* 5.1.11 last parameter initialized should be
2508                           the open/closed indication in rsr0 */
2509                 he_writel_rsr0(he_dev,
2510                         rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2511                 (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2512
2513                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2514         }
2515
2516 open_failed:
2517
2518         if (err) {
2519                 if (he_vcc)
2520                         kfree(he_vcc);
2521                 clear_bit(ATM_VF_ADDR, &vcc->flags);
2522         }
2523         else
2524                 set_bit(ATM_VF_READY, &vcc->flags);
2525
2526         return err;
2527 }
2528
2529 static void
2530 he_close(struct atm_vcc *vcc)
2531 {
2532         unsigned long flags;
2533         DECLARE_WAITQUEUE(wait, current);
2534         struct he_dev *he_dev = HE_DEV(vcc->dev);
2535         struct he_tpd *tpd;
2536         unsigned cid;
2537         struct he_vcc *he_vcc = HE_VCC(vcc);
2538 #define MAX_RETRY 30
2539         int retry = 0, sleep = 1, tx_inuse;
2540
2541         HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2542
2543         clear_bit(ATM_VF_READY, &vcc->flags);
2544         cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2545
2546         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2547                 int timeout;
2548
2549                 HPRINTK("close rx cid 0x%x\n", cid);
2550
2551                 /* 2.7.2.2 close receive operation */
2552
2553                 /* wait for previous close (if any) to finish */
2554
2555                 spin_lock_irqsave(&he_dev->global_lock, flags);
2556                 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2557                         HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2558                         udelay(250);
2559                 }
2560
2561                 add_wait_queue(&he_vcc->rx_waitq, &wait);
2562                 set_current_state(TASK_UNINTERRUPTIBLE);
2563
2564                 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2565                 (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2566                 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2567                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2568
2569                 timeout = schedule_timeout(30*HZ);
2570
2571                 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2572                 set_current_state(TASK_RUNNING);
2573
2574                 if (timeout == 0)
2575                         hprintk("close rx timeout cid 0x%x\n", cid);
2576
2577                 HPRINTK("close rx cid 0x%x complete\n", cid);
2578
2579         }
2580
2581         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2582                 volatile unsigned tsr4, tsr0;
2583                 int timeout;
2584
2585                 HPRINTK("close tx cid 0x%x\n", cid);
2586                 
2587                 /* 2.1.2
2588                  *
2589                  * ... the host must first stop queueing packets to the TPDRQ
2590                  * on the connection to be closed, then wait for all outstanding
2591                  * packets to be transmitted and their buffers returned to the
2592                  * TBRQ. When the last packet on the connection arrives in the
2593                  * TBRQ, the host issues the close command to the adapter.
2594                  */
2595
2596                 while (((tx_inuse = atomic_read(&vcc->sk->sk_wmem_alloc)) > 0) &&
2597                        (retry < MAX_RETRY)) {
2598                         msleep(sleep);
2599                         if (sleep < 250)
2600                                 sleep = sleep * 2;
2601
2602                         ++retry;
2603                 }
2604
2605                 if (tx_inuse)
2606                         hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2607
2608                 /* 2.3.1.1 generic close operations with flush */
2609
2610                 spin_lock_irqsave(&he_dev->global_lock, flags);
2611                 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2612                                         /* also clears TSR4_SESSION_ENDED */
2613
2614                 switch (vcc->qos.txtp.traffic_class) {
2615                         case ATM_UBR:
2616                                 he_writel_tsr1(he_dev, 
2617                                         TSR1_MCR(rate_to_atmf(200000))
2618                                         | TSR1_PCR(0), cid);
2619                                 break;
2620                         case ATM_CBR:
2621                                 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2622                                 break;
2623                 }
2624                 (void) he_readl_tsr4(he_dev, cid);              /* flush posted writes */
2625
2626                 tpd = __alloc_tpd(he_dev);
2627                 if (tpd == NULL) {
2628                         hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2629                         goto close_tx_incomplete;
2630                 }
2631                 tpd->status |= TPD_EOS | TPD_INT;
2632                 tpd->skb = NULL;
2633                 tpd->vcc = vcc;
2634                 wmb();
2635
2636                 add_wait_queue(&he_vcc->tx_waitq, &wait);
2637                 set_current_state(TASK_UNINTERRUPTIBLE);
2638                 __enqueue_tpd(he_dev, tpd, cid);
2639                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2640
2641                 timeout = schedule_timeout(30*HZ);
2642
2643                 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2644                 set_current_state(TASK_RUNNING);
2645
2646                 spin_lock_irqsave(&he_dev->global_lock, flags);
2647
2648                 if (timeout == 0) {
2649                         hprintk("close tx timeout cid 0x%x\n", cid);
2650                         goto close_tx_incomplete;
2651                 }
2652
2653                 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2654                         HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2655                         udelay(250);
2656                 }
2657
2658                 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2659                         HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2660                         udelay(250);
2661                 }
2662
2663 close_tx_incomplete:
2664
2665                 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2666                         int reg = he_vcc->rc_index;
2667
2668                         HPRINTK("cs_stper reg = %d\n", reg);
2669
2670                         if (he_dev->cs_stper[reg].inuse == 0)
2671                                 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2672                         else
2673                                 --he_dev->cs_stper[reg].inuse;
2674
2675                         he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2676                 }
2677                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2678
2679                 HPRINTK("close tx cid 0x%x complete\n", cid);
2680         }
2681
2682         kfree(he_vcc);
2683
2684         clear_bit(ATM_VF_ADDR, &vcc->flags);
2685 }
2686
2687 static int
2688 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2689 {
2690         unsigned long flags;
2691         struct he_dev *he_dev = HE_DEV(vcc->dev);
2692         unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2693         struct he_tpd *tpd;
2694 #ifdef USE_SCATTERGATHER
2695         int i, slot = 0;
2696 #endif
2697
2698 #define HE_TPD_BUFSIZE 0xffff
2699
2700         HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2701
2702         if ((skb->len > HE_TPD_BUFSIZE) ||
2703             ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2704                 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2705                 if (vcc->pop)
2706                         vcc->pop(vcc, skb);
2707                 else
2708                         dev_kfree_skb_any(skb);
2709                 atomic_inc(&vcc->stats->tx_err);
2710                 return -EINVAL;
2711         }
2712
2713 #ifndef USE_SCATTERGATHER
2714         if (skb_shinfo(skb)->nr_frags) {
2715                 hprintk("no scatter/gather support\n");
2716                 if (vcc->pop)
2717                         vcc->pop(vcc, skb);
2718                 else
2719                         dev_kfree_skb_any(skb);
2720                 atomic_inc(&vcc->stats->tx_err);
2721                 return -EINVAL;
2722         }
2723 #endif
2724         spin_lock_irqsave(&he_dev->global_lock, flags);
2725
2726         tpd = __alloc_tpd(he_dev);
2727         if (tpd == NULL) {
2728                 if (vcc->pop)
2729                         vcc->pop(vcc, skb);
2730                 else
2731                         dev_kfree_skb_any(skb);
2732                 atomic_inc(&vcc->stats->tx_err);
2733                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2734                 return -ENOMEM;
2735         }
2736
2737         if (vcc->qos.aal == ATM_AAL5)
2738                 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2739         else {
2740                 char *pti_clp = (void *) (skb->data + 3);
2741                 int clp, pti;
2742
2743                 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2744                 clp = (*pti_clp & ATM_HDR_CLP);
2745                 tpd->status |= TPD_CELLTYPE(pti);
2746                 if (clp)
2747                         tpd->status |= TPD_CLP;
2748
2749                 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2750         }
2751
2752 #ifdef USE_SCATTERGATHER
2753         tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2754                                 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2755         tpd->iovec[slot].len = skb->len - skb->data_len;
2756         ++slot;
2757
2758         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2759                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2760
2761                 if (slot == TPD_MAXIOV) {       /* queue tpd; start new tpd */
2762                         tpd->vcc = vcc;
2763                         tpd->skb = NULL;        /* not the last fragment
2764                                                    so dont ->push() yet */
2765                         wmb();
2766
2767                         __enqueue_tpd(he_dev, tpd, cid);
2768                         tpd = __alloc_tpd(he_dev);
2769                         if (tpd == NULL) {
2770                                 if (vcc->pop)
2771                                         vcc->pop(vcc, skb);
2772                                 else
2773                                         dev_kfree_skb_any(skb);
2774                                 atomic_inc(&vcc->stats->tx_err);
2775                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2776                                 return -ENOMEM;
2777                         }
2778                         tpd->status |= TPD_USERCELL;
2779                         slot = 0;
2780                 }
2781
2782                 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2783                         (void *) page_address(frag->page) + frag->page_offset,
2784                                 frag->size, PCI_DMA_TODEVICE);
2785                 tpd->iovec[slot].len = frag->size;
2786                 ++slot;
2787
2788         }
2789
2790         tpd->iovec[slot - 1].len |= TPD_LST;
2791 #else
2792         tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2793         tpd->length0 = skb->len | TPD_LST;
2794 #endif
2795         tpd->status |= TPD_INT;
2796
2797         tpd->vcc = vcc;
2798         tpd->skb = skb;
2799         wmb();
2800         ATM_SKB(skb)->vcc = vcc;
2801
2802         __enqueue_tpd(he_dev, tpd, cid);
2803         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2804
2805         atomic_inc(&vcc->stats->tx);
2806
2807         return 0;
2808 }
2809
2810 static int
2811 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2812 {
2813         unsigned long flags;
2814         struct he_dev *he_dev = HE_DEV(atm_dev);
2815         struct he_ioctl_reg reg;
2816         int err = 0;
2817
2818         switch (cmd) {
2819                 case HE_GET_REG:
2820                         if (!capable(CAP_NET_ADMIN))
2821                                 return -EPERM;
2822
2823                         if (copy_from_user(&reg, arg,
2824                                            sizeof(struct he_ioctl_reg)))
2825                                 return -EFAULT;
2826                         
2827                         spin_lock_irqsave(&he_dev->global_lock, flags);
2828                         switch (reg.type) {
2829                                 case HE_REGTYPE_PCI:
2830                                         reg.val = he_readl(he_dev, reg.addr);
2831                                         break;
2832                                 case HE_REGTYPE_RCM:
2833                                         reg.val =
2834                                                 he_readl_rcm(he_dev, reg.addr);
2835                                         break;
2836                                 case HE_REGTYPE_TCM:
2837                                         reg.val =
2838                                                 he_readl_tcm(he_dev, reg.addr);
2839                                         break;
2840                                 case HE_REGTYPE_MBOX:
2841                                         reg.val =
2842                                                 he_readl_mbox(he_dev, reg.addr);
2843                                         break;
2844                                 default:
2845                                         err = -EINVAL;
2846                                         break;
2847                         }
2848                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2849                         if (err == 0)
2850                                 if (copy_to_user(arg, &reg,
2851                                                         sizeof(struct he_ioctl_reg)))
2852                                         return -EFAULT;
2853                         break;
2854                 default:
2855 #ifdef CONFIG_ATM_HE_USE_SUNI
2856                         if (atm_dev->phy && atm_dev->phy->ioctl)
2857                                 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2858 #else /* CONFIG_ATM_HE_USE_SUNI */
2859                         err = -EINVAL;
2860 #endif /* CONFIG_ATM_HE_USE_SUNI */
2861                         break;
2862         }
2863
2864         return err;
2865 }
2866
2867 static void
2868 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2869 {
2870         unsigned long flags;
2871         struct he_dev *he_dev = HE_DEV(atm_dev);
2872
2873         HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2874
2875         spin_lock_irqsave(&he_dev->global_lock, flags);
2876         he_writel(he_dev, val, FRAMER + (addr*4));
2877         (void) he_readl(he_dev, FRAMER + (addr*4));             /* flush posted writes */
2878         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2879 }
2880  
2881         
2882 static unsigned char
2883 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2884
2885         unsigned long flags;
2886         struct he_dev *he_dev = HE_DEV(atm_dev);
2887         unsigned reg;
2888
2889         spin_lock_irqsave(&he_dev->global_lock, flags);
2890         reg = he_readl(he_dev, FRAMER + (addr*4));
2891         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2892
2893         HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2894         return reg;
2895 }
2896
2897 static int
2898 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2899 {
2900         unsigned long flags;
2901         struct he_dev *he_dev = HE_DEV(dev);
2902         int left, i;
2903 #ifdef notdef
2904         struct he_rbrq *rbrq_tail;
2905         struct he_tpdrq *tpdrq_head;
2906         int rbpl_head, rbpl_tail;
2907 #endif
2908         static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2909
2910
2911         left = *pos;
2912         if (!left--)
2913                 return sprintf(page, "%s\n", version);
2914
2915         if (!left--)
2916                 return sprintf(page, "%s%s\n\n",
2917                         he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2918
2919         if (!left--)
2920                 return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2921
2922         spin_lock_irqsave(&he_dev->global_lock, flags);
2923         mcc += he_readl(he_dev, MCC);
2924         oec += he_readl(he_dev, OEC);
2925         dcc += he_readl(he_dev, DCC);
2926         cec += he_readl(he_dev, CEC);
2927         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2928
2929         if (!left--)
2930                 return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2931                                                         mcc, oec, dcc, cec);
2932
2933         if (!left--)
2934                 return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2935                                 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2936
2937         if (!left--)
2938                 return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2939                                                 CONFIG_TPDRQ_SIZE);
2940
2941         if (!left--)
2942                 return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2943                                 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2944
2945         if (!left--)
2946                 return sprintf(page, "tbrq_size = %d  peak = %d\n",
2947                                         CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2948
2949
2950 #ifdef notdef
2951         rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2952         rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2953
2954         inuse = rbpl_head - rbpl_tail;
2955         if (inuse < 0)
2956                 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2957         inuse /= sizeof(struct he_rbp);
2958
2959         if (!left--)
2960                 return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2961                                                 CONFIG_RBPL_SIZE, inuse);
2962 #endif
2963
2964         if (!left--)
2965                 return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2966
2967         for (i = 0; i < HE_NUM_CS_STPER; ++i)
2968                 if (!left--)
2969                         return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2970                                                 he_dev->cs_stper[i].pcr,
2971                                                 he_dev->cs_stper[i].inuse);
2972
2973         if (!left--)
2974                 return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2975                         he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2976
2977         return 0;
2978 }
2979
2980 /* eeprom routines  -- see 4.7 */
2981
2982 u8
2983 read_prom_byte(struct he_dev *he_dev, int addr)
2984 {
2985         u32 val = 0, tmp_read = 0;
2986         int i, j = 0;
2987         u8 byte_read = 0;
2988
2989         val = readl(he_dev->membase + HOST_CNTL);
2990         val &= 0xFFFFE0FF;
2991        
2992         /* Turn on write enable */
2993         val |= 0x800;
2994         he_writel(he_dev, val, HOST_CNTL);
2995        
2996         /* Send READ instruction */
2997         for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
2998                 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2999                 udelay(EEPROM_DELAY);
3000         }
3001        
3002         /* Next, we need to send the byte address to read from */
3003         for (i = 7; i >= 0; i--) {
3004                 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3005                 udelay(EEPROM_DELAY);
3006                 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3007                 udelay(EEPROM_DELAY);
3008         }
3009        
3010         j = 0;
3011
3012         val &= 0xFFFFF7FF;      /* Turn off write enable */
3013         he_writel(he_dev, val, HOST_CNTL);
3014        
3015         /* Now, we can read data from the EEPROM by clocking it in */
3016         for (i = 7; i >= 0; i--) {
3017                 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3018                 udelay(EEPROM_DELAY);
3019                 tmp_read = he_readl(he_dev, HOST_CNTL);
3020                 byte_read |= (unsigned char)
3021                            ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3022                 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3023                 udelay(EEPROM_DELAY);
3024         }
3025        
3026         he_writel(he_dev, val | ID_CS, HOST_CNTL);
3027         udelay(EEPROM_DELAY);
3028
3029         return byte_read;
3030 }
3031
3032 MODULE_LICENSE("GPL");
3033 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3034 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3035 MODULE_PARM(disable64, "h");
3036 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3037 MODULE_PARM(nvpibits, "i");
3038 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3039 MODULE_PARM(nvcibits, "i");
3040 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3041 MODULE_PARM(rx_skb_reserve, "i");
3042 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3043 MODULE_PARM(irq_coalesce, "i");
3044 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3045 MODULE_PARM(sdh, "i");
3046 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3047
3048 static struct pci_device_id he_pci_tbl[] = {
3049         { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3050           0, 0, 0 },
3051         { 0, }
3052 };
3053
3054 static struct pci_driver he_driver = {
3055         .name =         "he",
3056         .probe =        he_init_one,
3057         .remove =       __devexit_p(he_remove_one),
3058         .id_table =     he_pci_tbl,
3059 };
3060
3061 static int __init he_init(void)
3062 {
3063         return pci_module_init(&he_driver);
3064 }
3065
3066 static void __exit he_cleanup(void)
3067 {
3068         pci_unregister_driver(&he_driver);
3069 }
3070
3071 module_init(he_init);
3072 module_exit(he_cleanup);