patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / atm / he.c
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
2
3 /*
4
5   he.c
6
7   ForeRunnerHE ATM Adapter driver for ATM on Linux
8   Copyright (C) 1999-2001  Naval Research Laboratory
9
10   This library is free software; you can redistribute it and/or
11   modify it under the terms of the GNU Lesser General Public
12   License as published by the Free Software Foundation; either
13   version 2.1 of the License, or (at your option) any later version.
14
15   This library is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   Lesser General Public License for more details.
19
20   You should have received a copy of the GNU Lesser General Public
21   License along with this library; if not, write to the Free Software
22   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23
24 */
25
26 /*
27
28   he.c
29
30   ForeRunnerHE ATM Adapter driver for ATM on Linux
31   Copyright (C) 1999-2001  Naval Research Laboratory
32
33   Permission to use, copy, modify and distribute this software and its
34   documentation is hereby granted, provided that both the copyright
35   notice and this permission notice appear in all copies of the software,
36   derivative works or modified versions, and any portions thereof, and
37   that both notices appear in supporting documentation.
38
39   NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40   DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41   RESULTING FROM THE USE OF THIS SOFTWARE.
42
43   This driver was written using the "Programmer's Reference Manual for
44   ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45
46   AUTHORS:
47         chas williams <chas@cmf.nrl.navy.mil>
48         eric kinzie <ekinzie@cmf.nrl.navy.mil>
49
50   NOTES:
51         4096 supported 'connections'
52         group 0 is used for all traffic
53         interrupt queue 0 is used for all interrupts
54         aal0 support (based on work from ulrich.u.muller@nokia.com)
55
56  */
57
58 #include <linux/config.h>
59 #include <linux/module.h>
60 #include <linux/version.h>
61 #include <linux/kernel.h>
62 #include <linux/skbuff.h>
63 #include <linux/pci.h>
64 #include <linux/errno.h>
65 #include <linux/types.h>
66 #include <linux/string.h>
67 #include <linux/delay.h>
68 #include <linux/init.h>
69 #include <linux/mm.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
73 #include <asm/io.h>
74 #include <asm/byteorder.h>
75 #include <asm/uaccess.h>
76
77 #include <linux/atmdev.h>
78 #include <linux/atm.h>
79 #include <linux/sonet.h>
80
81 #define USE_TASKLET
82 #undef USE_SCATTERGATHER
83 #undef USE_CHECKSUM_HW                  /* still confused about this */
84 #define USE_RBPS
85 #undef USE_RBPS_POOL                    /* if memory is tight try this */
86 #undef USE_RBPL_POOL                    /* if memory is tight try this */
87 #define USE_TPD_POOL
88 /* #undef CONFIG_ATM_HE_USE_SUNI */
89
90 /* compatibility */
91
92 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
93 typedef void irqreturn_t;
94 #define IRQ_NONE
95 #define IRQ_HANDLED
96 #define IRQ_RETVAL(x)
97 #endif
98
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,9)
100 #define __devexit_p(func)               func
101 #endif
102
103 #ifndef MODULE_LICENSE
104 #define MODULE_LICENSE(x)
105 #endif
106
107 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
108 #define pci_set_drvdata(pci_dev, data)  (pci_dev)->driver_data = (data)
109 #define pci_get_drvdata(pci_dev)        (pci_dev)->driver_data
110 #endif
111
112 #include "he.h"
113
114 #include "suni.h"
115
116 #include <linux/atm_he.h>
117
118 #define hprintk(fmt,args...)    printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
119
120 #undef DEBUG
121 #ifdef DEBUG
122 #define HPRINTK(fmt,args...)    printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
123 #else
124 #define HPRINTK(fmt,args...)    do { } while (0)
125 #endif /* DEBUG */
126
127
128 /* version definition */
129
130 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
131
132 /* declarations */
133
134 static int he_open(struct atm_vcc *vcc);
135 static void he_close(struct atm_vcc *vcc);
136 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
137 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
138 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
139 static void he_tasklet(unsigned long data);
140 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
141 static int he_start(struct atm_dev *dev);
142 static void he_stop(struct he_dev *dev);
143 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
144 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
145
146 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
147
148 /* globals */
149
150 struct he_dev *he_devs = NULL;
151 static short disable64 = -1;
152 static short nvpibits = -1;
153 static short nvcibits = -1;
154 static short rx_skb_reserve = 16;
155 static short irq_coalesce = 1;
156 static short sdh = 0;
157
158 static struct atmdev_ops he_ops =
159 {
160         .open =         he_open,
161         .close =        he_close,       
162         .ioctl =        he_ioctl,       
163         .send =         he_send,
164         .phy_put =      he_phy_put,
165         .phy_get =      he_phy_get,
166         .proc_read =    he_proc_read,
167         .owner =        THIS_MODULE
168 };
169
170 #define he_writel(dev, val, reg)        do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
171 #define he_readl(dev, reg)              readl((dev)->membase + (reg))
172
173 /* section 2.12 connection memory access */
174
175 static __inline__ void
176 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
177                                                                 unsigned flags)
178 {
179         he_writel(he_dev, val, CON_DAT);
180         (void) he_readl(he_dev, CON_DAT);               /* flush posted writes */
181         he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
182         while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
183 }
184
185 #define he_writel_rcm(dev, val, reg)                            \
186                         he_writel_internal(dev, val, reg, CON_CTL_RCM)
187
188 #define he_writel_tcm(dev, val, reg)                            \
189                         he_writel_internal(dev, val, reg, CON_CTL_TCM)
190
191 #define he_writel_mbox(dev, val, reg)                           \
192                         he_writel_internal(dev, val, reg, CON_CTL_MBOX)
193
194 static unsigned
195 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
196 {
197         he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
198         while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
199         return he_readl(he_dev, CON_DAT);
200 }
201
202 #define he_readl_rcm(dev, reg) \
203                         he_readl_internal(dev, reg, CON_CTL_RCM)
204
205 #define he_readl_tcm(dev, reg) \
206                         he_readl_internal(dev, reg, CON_CTL_TCM)
207
208 #define he_readl_mbox(dev, reg) \
209                         he_readl_internal(dev, reg, CON_CTL_MBOX)
210
211
212 /* figure 2.2 connection id */
213
214 #define he_mkcid(dev, vpi, vci)         (((vpi << (dev)->vcibits) | vci) & 0x1fff)
215
216 /* 2.5.1 per connection transmit state registers */
217
218 #define he_writel_tsr0(dev, val, cid) \
219                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
220 #define he_readl_tsr0(dev, cid) \
221                 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
222
223 #define he_writel_tsr1(dev, val, cid) \
224                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
225
226 #define he_writel_tsr2(dev, val, cid) \
227                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
228
229 #define he_writel_tsr3(dev, val, cid) \
230                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
231
232 #define he_writel_tsr4(dev, val, cid) \
233                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
234
235         /* from page 2-20
236          *
237          * NOTE While the transmit connection is active, bits 23 through 0
238          *      of this register must not be written by the host.  Byte
239          *      enables should be used during normal operation when writing
240          *      the most significant byte.
241          */
242
243 #define he_writel_tsr4_upper(dev, val, cid) \
244                 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
245                                                         CON_CTL_TCM \
246                                                         | CON_BYTE_DISABLE_2 \
247                                                         | CON_BYTE_DISABLE_1 \
248                                                         | CON_BYTE_DISABLE_0)
249
250 #define he_readl_tsr4(dev, cid) \
251                 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
252
253 #define he_writel_tsr5(dev, val, cid) \
254                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
255
256 #define he_writel_tsr6(dev, val, cid) \
257                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
258
259 #define he_writel_tsr7(dev, val, cid) \
260                 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
261
262
263 #define he_writel_tsr8(dev, val, cid) \
264                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
265
266 #define he_writel_tsr9(dev, val, cid) \
267                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
268
269 #define he_writel_tsr10(dev, val, cid) \
270                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
271
272 #define he_writel_tsr11(dev, val, cid) \
273                 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
274
275
276 #define he_writel_tsr12(dev, val, cid) \
277                 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
278
279 #define he_writel_tsr13(dev, val, cid) \
280                 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
281
282
283 #define he_writel_tsr14(dev, val, cid) \
284                 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
285
286 #define he_writel_tsr14_upper(dev, val, cid) \
287                 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
288                                                         CON_CTL_TCM \
289                                                         | CON_BYTE_DISABLE_2 \
290                                                         | CON_BYTE_DISABLE_1 \
291                                                         | CON_BYTE_DISABLE_0)
292
293 /* 2.7.1 per connection receive state registers */
294
295 #define he_writel_rsr0(dev, val, cid) \
296                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
297 #define he_readl_rsr0(dev, cid) \
298                 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
299
300 #define he_writel_rsr1(dev, val, cid) \
301                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
302
303 #define he_writel_rsr2(dev, val, cid) \
304                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
305
306 #define he_writel_rsr3(dev, val, cid) \
307                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
308
309 #define he_writel_rsr4(dev, val, cid) \
310                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
311
312 #define he_writel_rsr5(dev, val, cid) \
313                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
314
315 #define he_writel_rsr6(dev, val, cid) \
316                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
317
318 #define he_writel_rsr7(dev, val, cid) \
319                 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
320
321 static __inline__ struct atm_vcc*
322 __find_vcc(struct he_dev *he_dev, unsigned cid)
323 {
324         struct hlist_head *head;
325         struct atm_vcc *vcc;
326         struct hlist_node *node;
327         struct sock *s;
328         short vpi;
329         int vci;
330
331         vpi = cid >> he_dev->vcibits;
332         vci = cid & ((1 << he_dev->vcibits) - 1);
333         head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
334
335         sk_for_each(s, node, head) {
336                 vcc = atm_sk(s);
337                 if (vcc->dev == he_dev->atm_dev &&
338                     vcc->vci == vci && vcc->vpi == vpi &&
339                     vcc->qos.rxtp.traffic_class != ATM_NONE) {
340                                 return vcc;
341                 }
342         }
343         return NULL;
344 }
345
346 static int __devinit
347 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
348 {
349         struct atm_dev *atm_dev = NULL;
350         struct he_dev *he_dev = NULL;
351         int err = 0;
352
353         printk(KERN_INFO "he: %s\n", version);
354
355         if (pci_enable_device(pci_dev))
356                 return -EIO;
357         if (pci_set_dma_mask(pci_dev, HE_DMA_MASK) != 0) {
358                 printk(KERN_WARNING "he: no suitable dma available\n");
359                 err = -EIO;
360                 goto init_one_failure;
361         }
362
363         atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, 0);
364         if (!atm_dev) {
365                 err = -ENODEV;
366                 goto init_one_failure;
367         }
368         pci_set_drvdata(pci_dev, atm_dev);
369
370         he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
371                                                         GFP_KERNEL);
372         if (!he_dev) {
373                 err = -ENOMEM;
374                 goto init_one_failure;
375         }
376         memset(he_dev, 0, sizeof(struct he_dev));
377
378         he_dev->pci_dev = pci_dev;
379         he_dev->atm_dev = atm_dev;
380         he_dev->atm_dev->dev_data = he_dev;
381         atm_dev->dev_data = he_dev;
382         he_dev->number = atm_dev->number;
383         if (he_start(atm_dev)) {
384                 he_stop(he_dev);
385                 err = -ENODEV;
386                 goto init_one_failure;
387         }
388         he_dev->next = NULL;
389         if (he_devs)
390                 he_dev->next = he_devs;
391         he_devs = he_dev;
392         return 0;
393
394 init_one_failure:
395         if (atm_dev)
396                 atm_dev_deregister(atm_dev);
397         if (he_dev)
398                 kfree(he_dev);
399         pci_disable_device(pci_dev);
400         return err;
401 }
402
403 static void __devexit
404 he_remove_one (struct pci_dev *pci_dev)
405 {
406         struct atm_dev *atm_dev;
407         struct he_dev *he_dev;
408
409         atm_dev = pci_get_drvdata(pci_dev);
410         he_dev = HE_DEV(atm_dev);
411
412         /* need to remove from he_devs */
413
414         he_stop(he_dev);
415         atm_dev_deregister(atm_dev);
416         kfree(he_dev);
417
418         pci_set_drvdata(pci_dev, NULL);
419         pci_disable_device(pci_dev);
420 }
421
422
423 static unsigned
424 rate_to_atmf(unsigned rate)             /* cps to atm forum format */
425 {
426 #define NONZERO (1 << 14)
427
428         unsigned exp = 0;
429
430         if (rate == 0)
431                 return 0;
432
433         rate <<= 9;
434         while (rate > 0x3ff) {
435                 ++exp;
436                 rate >>= 1;
437         }
438
439         return (NONZERO | (exp << 9) | (rate & 0x1ff));
440 }
441
442 static void __init
443 he_init_rx_lbfp0(struct he_dev *he_dev)
444 {
445         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
446         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
447         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
448         unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
449         
450         lbufd_index = 0;
451         lbm_offset = he_readl(he_dev, RCMLBM_BA);
452
453         he_writel(he_dev, lbufd_index, RLBF0_H);
454
455         for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
456                 lbufd_index += 2;
457                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
458
459                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
460                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
461
462                 if (++lbuf_count == lbufs_per_row) {
463                         lbuf_count = 0;
464                         row_offset += he_dev->bytes_per_row;
465                 }
466                 lbm_offset += 4;
467         }
468                 
469         he_writel(he_dev, lbufd_index - 2, RLBF0_T);
470         he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
471 }
472
473 static void __init
474 he_init_rx_lbfp1(struct he_dev *he_dev)
475 {
476         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
477         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
478         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
479         unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
480         
481         lbufd_index = 1;
482         lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
483
484         he_writel(he_dev, lbufd_index, RLBF1_H);
485
486         for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
487                 lbufd_index += 2;
488                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
489
490                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
491                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
492
493                 if (++lbuf_count == lbufs_per_row) {
494                         lbuf_count = 0;
495                         row_offset += he_dev->bytes_per_row;
496                 }
497                 lbm_offset += 4;
498         }
499                 
500         he_writel(he_dev, lbufd_index - 2, RLBF1_T);
501         he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
502 }
503
504 static void __init
505 he_init_tx_lbfp(struct he_dev *he_dev)
506 {
507         unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508         unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509         unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510         unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
511         
512         lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513         lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
514
515         he_writel(he_dev, lbufd_index, TLBF_H);
516
517         for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
518                 lbufd_index += 1;
519                 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
520
521                 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522                 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
523
524                 if (++lbuf_count == lbufs_per_row) {
525                         lbuf_count = 0;
526                         row_offset += he_dev->bytes_per_row;
527                 }
528                 lbm_offset += 2;
529         }
530                 
531         he_writel(he_dev, lbufd_index - 1, TLBF_T);
532 }
533
534 static int __init
535 he_init_tpdrq(struct he_dev *he_dev)
536 {
537         he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
538                 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
539         if (he_dev->tpdrq_base == NULL) {
540                 hprintk("failed to alloc tpdrq\n");
541                 return -ENOMEM;
542         }
543         memset(he_dev->tpdrq_base, 0,
544                                 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
545
546         he_dev->tpdrq_tail = he_dev->tpdrq_base;
547         he_dev->tpdrq_head = he_dev->tpdrq_base;
548
549         he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
550         he_writel(he_dev, 0, TPDRQ_T);  
551         he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
552
553         return 0;
554 }
555
556 static void __init
557 he_init_cs_block(struct he_dev *he_dev)
558 {
559         unsigned clock, rate, delta;
560         int reg;
561
562         /* 5.1.7 cs block initialization */
563
564         for (reg = 0; reg < 0x20; ++reg)
565                 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
566
567         /* rate grid timer reload values */
568
569         clock = he_is622(he_dev) ? 66667000 : 50000000;
570         rate = he_dev->atm_dev->link_rate;
571         delta = rate / 16 / 2;
572
573         for (reg = 0; reg < 0x10; ++reg) {
574                 /* 2.4 internal transmit function
575                  *
576                  * we initialize the first row in the rate grid.
577                  * values are period (in clock cycles) of timer
578                  */
579                 unsigned period = clock / rate;
580
581                 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
582                 rate -= delta;
583         }
584
585         if (he_is622(he_dev)) {
586                 /* table 5.2 (4 cells per lbuf) */
587                 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
588                 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
589                 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
590                 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
591                 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
592
593                 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
594                 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
595                 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
596                 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
597                 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
598                 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
599                 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
600
601                 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
602
603                 /* table 5.8 */
604                 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
605                 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
606                 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
607                 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
608                 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
609                 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
610
611                 /* table 5.9 */
612                 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
613                 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
614         } else {
615                 /* table 5.1 (4 cells per lbuf) */
616                 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
617                 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
618                 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
619                 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
620                 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
621
622                 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
623                 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
624                 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
625                 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
626                 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
627                 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
628                 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
629
630                 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
631
632                 /* table 5.8 */
633                 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
634                 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
635                 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
636                 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
637                 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
638                 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
639
640                 /* table 5.9 */
641                 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
642                 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
643         }
644
645         he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
646
647         for (reg = 0; reg < 0x8; ++reg)
648                 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
649
650 }
651
652 static int __init
653 he_init_cs_block_rcm(struct he_dev *he_dev)
654 {
655         unsigned (*rategrid)[16][16];
656         unsigned rate, delta;
657         int i, j, reg;
658
659         unsigned rate_atmf, exp, man;
660         unsigned long long rate_cps;
661         int mult, buf, buf_limit = 4;
662
663         rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
664         if (!rategrid)
665                 return -ENOMEM;
666
667         /* initialize rate grid group table */
668
669         for (reg = 0x0; reg < 0xff; ++reg)
670                 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
671
672         /* initialize rate controller groups */
673
674         for (reg = 0x100; reg < 0x1ff; ++reg)
675                 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
676         
677         /* initialize tNrm lookup table */
678
679         /* the manual makes reference to a routine in a sample driver
680            for proper configuration; fortunately, we only need this
681            in order to support abr connection */
682         
683         /* initialize rate to group table */
684
685         rate = he_dev->atm_dev->link_rate;
686         delta = rate / 32;
687
688         /*
689          * 2.4 transmit internal functions
690          * 
691          * we construct a copy of the rate grid used by the scheduler
692          * in order to construct the rate to group table below
693          */
694
695         for (j = 0; j < 16; j++) {
696                 (*rategrid)[0][j] = rate;
697                 rate -= delta;
698         }
699
700         for (i = 1; i < 16; i++)
701                 for (j = 0; j < 16; j++)
702                         if (i > 14)
703                                 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
704                         else
705                                 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
706
707         /*
708          * 2.4 transmit internal function
709          *
710          * this table maps the upper 5 bits of exponent and mantissa
711          * of the atm forum representation of the rate into an index
712          * on rate grid  
713          */
714
715         rate_atmf = 0;
716         while (rate_atmf < 0x400) {
717                 man = (rate_atmf & 0x1f) << 4;
718                 exp = rate_atmf >> 5;
719
720                 /* 
721                         instead of '/ 512', use '>> 9' to prevent a call
722                         to divdu3 on x86 platforms
723                 */
724                 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
725
726                 if (rate_cps < 10)
727                         rate_cps = 10;  /* 2.2.1 minimum payload rate is 10 cps */
728
729                 for (i = 255; i > 0; i--)
730                         if ((*rategrid)[i/16][i%16] >= rate_cps)
731                                 break;   /* pick nearest rate instead? */
732
733                 /*
734                  * each table entry is 16 bits: (rate grid index (8 bits)
735                  * and a buffer limit (8 bits)
736                  * there are two table entries in each 32-bit register
737                  */
738
739 #ifdef notdef
740                 buf = rate_cps * he_dev->tx_numbuffs /
741                                 (he_dev->atm_dev->link_rate * 2);
742 #else
743                 /* this is pretty, but avoids _divdu3 and is mostly correct */
744                 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
745                 if (rate_cps > (272 * mult))
746                         buf = 4;
747                 else if (rate_cps > (204 * mult))
748                         buf = 3;
749                 else if (rate_cps > (136 * mult))
750                         buf = 2;
751                 else if (rate_cps > (68 * mult))
752                         buf = 1;
753                 else
754                         buf = 0;
755 #endif
756                 if (buf > buf_limit)
757                         buf = buf_limit;
758                 reg = (reg << 16) | ((i << 8) | buf);
759
760 #define RTGTBL_OFFSET 0x400
761           
762                 if (rate_atmf & 0x1)
763                         he_writel_rcm(he_dev, reg,
764                                 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
765
766                 ++rate_atmf;
767         }
768
769         kfree(rategrid);
770         return 0;
771 }
772
773 static int __init
774 he_init_group(struct he_dev *he_dev, int group)
775 {
776         int i;
777
778 #ifdef USE_RBPS
779         /* small buffer pool */
780 #ifdef USE_RBPS_POOL
781         he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
782                         CONFIG_RBPS_BUFSIZE, 8, 0);
783         if (he_dev->rbps_pool == NULL) {
784                 hprintk("unable to create rbps pages\n");
785                 return -ENOMEM;
786         }
787 #else /* !USE_RBPS_POOL */
788         he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
789                 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
790         if (he_dev->rbps_pages == NULL) {
791                 hprintk("unable to create rbps page pool\n");
792                 return -ENOMEM;
793         }
794 #endif /* USE_RBPS_POOL */
795
796         he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
797                 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
798         if (he_dev->rbps_base == NULL) {
799                 hprintk("failed to alloc rbps\n");
800                 return -ENOMEM;
801         }
802         memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
803         he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
804
805         for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
806                 dma_addr_t dma_handle;
807                 void *cpuaddr;
808
809 #ifdef USE_RBPS_POOL 
810                 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
811                 if (cpuaddr == NULL)
812                         return -ENOMEM;
813 #else
814                 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
815                 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
816 #endif
817
818                 he_dev->rbps_virt[i].virt = cpuaddr;
819                 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
820                 he_dev->rbps_base[i].phys = dma_handle;
821
822         }
823         he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
824
825         he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
826         he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
827                                                 G0_RBPS_T + (group * 32));
828         he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
829                                                 G0_RBPS_BS + (group * 32));
830         he_writel(he_dev,
831                         RBP_THRESH(CONFIG_RBPS_THRESH) |
832                         RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
833                         RBP_INT_ENB,
834                                                 G0_RBPS_QI + (group * 32));
835 #else /* !USE_RBPS */
836         he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
837         he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
838         he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
839         he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
840                                                 G0_RBPS_BS + (group * 32));
841 #endif /* USE_RBPS */
842
843         /* large buffer pool */
844 #ifdef USE_RBPL_POOL
845         he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
846                         CONFIG_RBPL_BUFSIZE, 8, 0);
847         if (he_dev->rbpl_pool == NULL) {
848                 hprintk("unable to create rbpl pool\n");
849                 return -ENOMEM;
850         }
851 #else /* !USE_RBPL_POOL */
852         he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
853                 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
854         if (he_dev->rbpl_pages == NULL) {
855                 hprintk("unable to create rbpl pages\n");
856                 return -ENOMEM;
857         }
858 #endif /* USE_RBPL_POOL */
859
860         he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
861                 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
862         if (he_dev->rbpl_base == NULL) {
863                 hprintk("failed to alloc rbpl\n");
864                 return -ENOMEM;
865         }
866         memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
867         he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
868
869         for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
870                 dma_addr_t dma_handle;
871                 void *cpuaddr;
872
873 #ifdef USE_RBPL_POOL
874                 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
875                 if (cpuaddr == NULL)
876                         return -ENOMEM;
877 #else
878                 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
879                 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
880 #endif
881
882                 he_dev->rbpl_virt[i].virt = cpuaddr;
883                 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
884                 he_dev->rbpl_base[i].phys = dma_handle;
885         }
886         he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
887
888         he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
889         he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
890                                                 G0_RBPL_T + (group * 32));
891         he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
892                                                 G0_RBPL_BS + (group * 32));
893         he_writel(he_dev,
894                         RBP_THRESH(CONFIG_RBPL_THRESH) |
895                         RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
896                         RBP_INT_ENB,
897                                                 G0_RBPL_QI + (group * 32));
898
899         /* rx buffer ready queue */
900
901         he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
902                 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
903         if (he_dev->rbrq_base == NULL) {
904                 hprintk("failed to allocate rbrq\n");
905                 return -ENOMEM;
906         }
907         memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
908
909         he_dev->rbrq_head = he_dev->rbrq_base;
910         he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
911         he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
912         he_writel(he_dev,
913                 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
914                                                 G0_RBRQ_Q + (group * 16));
915         if (irq_coalesce) {
916                 hprintk("coalescing interrupts\n");
917                 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
918                                                 G0_RBRQ_I + (group * 16));
919         } else
920                 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
921                                                 G0_RBRQ_I + (group * 16));
922
923         /* tx buffer ready queue */
924
925         he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
926                 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
927         if (he_dev->tbrq_base == NULL) {
928                 hprintk("failed to allocate tbrq\n");
929                 return -ENOMEM;
930         }
931         memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
932
933         he_dev->tbrq_head = he_dev->tbrq_base;
934
935         he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
936         he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
937         he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
938         he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
939
940         return 0;
941 }
942
943 static int __init
944 he_init_irq(struct he_dev *he_dev)
945 {
946         int i;
947
948         /* 2.9.3.5  tail offset for each interrupt queue is located after the
949                     end of the interrupt queue */
950
951         he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
952                         (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
953         if (he_dev->irq_base == NULL) {
954                 hprintk("failed to allocate irq\n");
955                 return -ENOMEM;
956         }
957         he_dev->irq_tailoffset = (unsigned *)
958                                         &he_dev->irq_base[CONFIG_IRQ_SIZE];
959         *he_dev->irq_tailoffset = 0;
960         he_dev->irq_head = he_dev->irq_base;
961         he_dev->irq_tail = he_dev->irq_base;
962
963         for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
964                 he_dev->irq_base[i].isw = ITYPE_INVALID;
965
966         he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
967         he_writel(he_dev,
968                 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
969                                                                 IRQ0_HEAD);
970         he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
971         he_writel(he_dev, 0x0, IRQ0_DATA);
972
973         he_writel(he_dev, 0x0, IRQ1_BASE);
974         he_writel(he_dev, 0x0, IRQ1_HEAD);
975         he_writel(he_dev, 0x0, IRQ1_CNTL);
976         he_writel(he_dev, 0x0, IRQ1_DATA);
977
978         he_writel(he_dev, 0x0, IRQ2_BASE);
979         he_writel(he_dev, 0x0, IRQ2_HEAD);
980         he_writel(he_dev, 0x0, IRQ2_CNTL);
981         he_writel(he_dev, 0x0, IRQ2_DATA);
982
983         he_writel(he_dev, 0x0, IRQ3_BASE);
984         he_writel(he_dev, 0x0, IRQ3_HEAD);
985         he_writel(he_dev, 0x0, IRQ3_CNTL);
986         he_writel(he_dev, 0x0, IRQ3_DATA);
987
988         /* 2.9.3.2 interrupt queue mapping registers */
989
990         he_writel(he_dev, 0x0, GRP_10_MAP);
991         he_writel(he_dev, 0x0, GRP_32_MAP);
992         he_writel(he_dev, 0x0, GRP_54_MAP);
993         he_writel(he_dev, 0x0, GRP_76_MAP);
994
995         if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
996                 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
997                 return -EINVAL;
998         }   
999
1000         he_dev->irq = he_dev->pci_dev->irq;
1001
1002         return 0;
1003 }
1004
1005 static int __init
1006 he_start(struct atm_dev *dev)
1007 {
1008         struct he_dev *he_dev;
1009         struct pci_dev *pci_dev;
1010
1011         u16 command;
1012         u32 gen_cntl_0, host_cntl, lb_swap;
1013         u8 cache_size, timer;
1014         
1015         unsigned err;
1016         unsigned int status, reg;
1017         int i, group;
1018
1019         he_dev = HE_DEV(dev);
1020         pci_dev = he_dev->pci_dev;
1021
1022         he_dev->membase = pci_dev->resource[0].start;
1023         HPRINTK("membase = 0x%lx  irq = %d.\n", he_dev->membase, pci_dev->irq);
1024
1025         /*
1026          * pci bus controller initialization 
1027          */
1028
1029         /* 4.3 pci bus controller-specific initialization */
1030         if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1031                 hprintk("can't read GEN_CNTL_0\n");
1032                 return -EINVAL;
1033         }
1034         gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1035         if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1036                 hprintk("can't write GEN_CNTL_0.\n");
1037                 return -EINVAL;
1038         }
1039
1040         if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1041                 hprintk("can't read PCI_COMMAND.\n");
1042                 return -EINVAL;
1043         }
1044
1045         command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1046         if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1047                 hprintk("can't enable memory.\n");
1048                 return -EINVAL;
1049         }
1050
1051         if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1052                 hprintk("can't read cache line size?\n");
1053                 return -EINVAL;
1054         }
1055
1056         if (cache_size < 16) {
1057                 cache_size = 16;
1058                 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1059                         hprintk("can't set cache line size to %d\n", cache_size);
1060         }
1061
1062         if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1063                 hprintk("can't read latency timer?\n");
1064                 return -EINVAL;
1065         }
1066
1067         /* from table 3.9
1068          *
1069          * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1070          * 
1071          * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1072          * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1073          *
1074          */ 
1075 #define LAT_TIMER 209
1076         if (timer < LAT_TIMER) {
1077                 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1078                 timer = LAT_TIMER;
1079                 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1080                         hprintk("can't set latency timer to %d\n", timer);
1081         }
1082
1083         if (!(he_dev->membase = (unsigned long) ioremap(he_dev->membase, HE_REGMAP_SIZE))) {
1084                 hprintk("can't set up page mapping\n");
1085                 return -EINVAL;
1086         }
1087
1088         /* 4.4 card reset */
1089         he_writel(he_dev, 0x0, RESET_CNTL);
1090         he_writel(he_dev, 0xff, RESET_CNTL);
1091
1092         udelay(16*1000);        /* 16 ms */
1093         status = he_readl(he_dev, RESET_CNTL);
1094         if ((status & BOARD_RST_STATUS) == 0) {
1095                 hprintk("reset failed\n");
1096                 return -EINVAL;
1097         }
1098
1099         /* 4.5 set bus width */
1100         host_cntl = he_readl(he_dev, HOST_CNTL);
1101         if (host_cntl & PCI_BUS_SIZE64)
1102                 gen_cntl_0 |= ENBL_64;
1103         else
1104                 gen_cntl_0 &= ~ENBL_64;
1105
1106         if (disable64 == 1) {
1107                 hprintk("disabling 64-bit pci bus transfers\n");
1108                 gen_cntl_0 &= ~ENBL_64;
1109         }
1110
1111         if (gen_cntl_0 & ENBL_64)
1112                 hprintk("64-bit transfers enabled\n");
1113
1114         pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1115
1116         /* 4.7 read prom contents */
1117         for (i = 0; i < PROD_ID_LEN; ++i)
1118                 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1119
1120         he_dev->media = read_prom_byte(he_dev, MEDIA);
1121
1122         for (i = 0; i < 6; ++i)
1123                 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1124
1125         hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1126                                 he_dev->prod_id,
1127                                         he_dev->media & 0x40 ? "SM" : "MM",
1128                                                 dev->esi[0],
1129                                                 dev->esi[1],
1130                                                 dev->esi[2],
1131                                                 dev->esi[3],
1132                                                 dev->esi[4],
1133                                                 dev->esi[5]);
1134         he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1135                                                 ATM_OC12_PCR : ATM_OC3_PCR;
1136
1137         /* 4.6 set host endianess */
1138         lb_swap = he_readl(he_dev, LB_SWAP);
1139         if (he_is622(he_dev))
1140                 lb_swap &= ~XFER_SIZE;          /* 4 cells */
1141         else
1142                 lb_swap |= XFER_SIZE;           /* 8 cells */
1143 #ifdef __BIG_ENDIAN
1144         lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1145 #else
1146         lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1147                         DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1148 #endif /* __BIG_ENDIAN */
1149         he_writel(he_dev, lb_swap, LB_SWAP);
1150
1151         /* 4.8 sdram controller initialization */
1152         he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1153
1154         /* 4.9 initialize rnum value */
1155         lb_swap |= SWAP_RNUM_MAX(0xf);
1156         he_writel(he_dev, lb_swap, LB_SWAP);
1157
1158         /* 4.10 initialize the interrupt queues */
1159         if ((err = he_init_irq(he_dev)) != 0)
1160                 return err;
1161
1162 #ifdef USE_TASKLET
1163         tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1164 #endif
1165         spin_lock_init(&he_dev->global_lock);
1166
1167         /* 4.11 enable pci bus controller state machines */
1168         host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1169                                 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1170         he_writel(he_dev, host_cntl, HOST_CNTL);
1171
1172         gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1173         pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1174
1175         /*
1176          * atm network controller initialization
1177          */
1178
1179         /* 5.1.1 generic configuration state */
1180
1181         /*
1182          *              local (cell) buffer memory map
1183          *                    
1184          *             HE155                          HE622
1185          *                                                      
1186          *        0 ____________1023 bytes  0 _______________________2047 bytes
1187          *         |            |            |                   |   |
1188          *         |  utility   |            |        rx0        |   |
1189          *        5|____________|         255|___________________| u |
1190          *        6|            |         256|                   | t |
1191          *         |            |            |                   | i |
1192          *         |    rx0     |     row    |        tx         | l |
1193          *         |            |            |                   | i |
1194          *         |            |         767|___________________| t |
1195          *      517|____________|         768|                   | y |
1196          * row  518|            |            |        rx1        |   |
1197          *         |            |        1023|___________________|___|
1198          *         |            |
1199          *         |    tx      |
1200          *         |            |
1201          *         |            |
1202          *     1535|____________|
1203          *     1536|            |
1204          *         |    rx1     |
1205          *     2047|____________|
1206          *
1207          */
1208
1209         /* total 4096 connections */
1210         he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1211         he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1212
1213         if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1214                 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1215                 return -ENODEV;
1216         }
1217
1218         if (nvpibits != -1) {
1219                 he_dev->vpibits = nvpibits;
1220                 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1221         }
1222
1223         if (nvcibits != -1) {
1224                 he_dev->vcibits = nvcibits;
1225                 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1226         }
1227
1228
1229         if (he_is622(he_dev)) {
1230                 he_dev->cells_per_row = 40;
1231                 he_dev->bytes_per_row = 2048;
1232                 he_dev->r0_numrows = 256;
1233                 he_dev->tx_numrows = 512;
1234                 he_dev->r1_numrows = 256;
1235                 he_dev->r0_startrow = 0;
1236                 he_dev->tx_startrow = 256;
1237                 he_dev->r1_startrow = 768;
1238         } else {
1239                 he_dev->cells_per_row = 20;
1240                 he_dev->bytes_per_row = 1024;
1241                 he_dev->r0_numrows = 512;
1242                 he_dev->tx_numrows = 1018;
1243                 he_dev->r1_numrows = 512;
1244                 he_dev->r0_startrow = 6;
1245                 he_dev->tx_startrow = 518;
1246                 he_dev->r1_startrow = 1536;
1247         }
1248
1249         he_dev->cells_per_lbuf = 4;
1250         he_dev->buffer_limit = 4;
1251         he_dev->r0_numbuffs = he_dev->r0_numrows *
1252                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1253         if (he_dev->r0_numbuffs > 2560)
1254                 he_dev->r0_numbuffs = 2560;
1255
1256         he_dev->r1_numbuffs = he_dev->r1_numrows *
1257                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1258         if (he_dev->r1_numbuffs > 2560)
1259                 he_dev->r1_numbuffs = 2560;
1260
1261         he_dev->tx_numbuffs = he_dev->tx_numrows *
1262                                 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1263         if (he_dev->tx_numbuffs > 5120)
1264                 he_dev->tx_numbuffs = 5120;
1265
1266         /* 5.1.2 configure hardware dependent registers */
1267
1268         he_writel(he_dev, 
1269                 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1270                 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1271                 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1272                 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1273                                                                 LBARB);
1274
1275         he_writel(he_dev, BANK_ON |
1276                 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1277                                                                 SDRAMCON);
1278
1279         he_writel(he_dev,
1280                 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1281                                                 RM_RW_WAIT(1), RCMCONFIG);
1282         he_writel(he_dev,
1283                 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1284                                                 TM_RW_WAIT(1), TCMCONFIG);
1285
1286         he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1287
1288         he_writel(he_dev, 
1289                 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1290                 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1291                 RX_VALVP(he_dev->vpibits) |
1292                 RX_VALVC(he_dev->vcibits),                       RC_CONFIG);
1293
1294         he_writel(he_dev, DRF_THRESH(0x20) |
1295                 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1296                 TX_VCI_MASK(he_dev->vcibits) |
1297                 LBFREE_CNT(he_dev->tx_numbuffs),                TX_CONFIG);
1298
1299         he_writel(he_dev, 0x0, TXAAL5_PROTO);
1300
1301         he_writel(he_dev, PHY_INT_ENB |
1302                 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1303                                                                 RH_CONFIG);
1304
1305         /* 5.1.3 initialize connection memory */
1306
1307         for (i = 0; i < TCM_MEM_SIZE; ++i)
1308                 he_writel_tcm(he_dev, 0, i);
1309
1310         for (i = 0; i < RCM_MEM_SIZE; ++i)
1311                 he_writel_rcm(he_dev, 0, i);
1312
1313         /*
1314          *      transmit connection memory map
1315          *
1316          *                  tx memory
1317          *          0x0 ___________________
1318          *             |                   |
1319          *             |                   |
1320          *             |       TSRa        |
1321          *             |                   |
1322          *             |                   |
1323          *       0x8000|___________________|
1324          *             |                   |
1325          *             |       TSRb        |
1326          *       0xc000|___________________|
1327          *             |                   |
1328          *             |       TSRc        |
1329          *       0xe000|___________________|
1330          *             |       TSRd        |
1331          *       0xf000|___________________|
1332          *             |       tmABR       |
1333          *      0x10000|___________________|
1334          *             |                   |
1335          *             |       tmTPD       |
1336          *             |___________________|
1337          *             |                   |
1338          *                      ....
1339          *      0x1ffff|___________________|
1340          *
1341          *
1342          */
1343
1344         he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1345         he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1346         he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1347         he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1348         he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1349
1350
1351         /*
1352          *      receive connection memory map
1353          *
1354          *          0x0 ___________________
1355          *             |                   |
1356          *             |                   |
1357          *             |       RSRa        |
1358          *             |                   |
1359          *             |                   |
1360          *       0x8000|___________________|
1361          *             |                   |
1362          *             |             rx0/1 |
1363          *             |       LBM         |   link lists of local
1364          *             |             tx    |   buffer memory 
1365          *             |                   |
1366          *       0xd000|___________________|
1367          *             |                   |
1368          *             |      rmABR        |
1369          *       0xe000|___________________|
1370          *             |                   |
1371          *             |       RSRb        |
1372          *             |___________________|
1373          *             |                   |
1374          *                      ....
1375          *       0xffff|___________________|
1376          */
1377
1378         he_writel(he_dev, 0x08000, RCMLBM_BA);
1379         he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1380         he_writel(he_dev, 0x0d800, RCMABR_BA);
1381
1382         /* 5.1.4 initialize local buffer free pools linked lists */
1383
1384         he_init_rx_lbfp0(he_dev);
1385         he_init_rx_lbfp1(he_dev);
1386
1387         he_writel(he_dev, 0x0, RLBC_H);
1388         he_writel(he_dev, 0x0, RLBC_T);
1389         he_writel(he_dev, 0x0, RLBC_H2);
1390
1391         he_writel(he_dev, 512, RXTHRSH);        /* 10% of r0+r1 buffers */
1392         he_writel(he_dev, 256, LITHRSH);        /* 5% of r0+r1 buffers */
1393
1394         he_init_tx_lbfp(he_dev);
1395
1396         he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1397
1398         /* 5.1.5 initialize intermediate receive queues */
1399
1400         if (he_is622(he_dev)) {
1401                 he_writel(he_dev, 0x000f, G0_INMQ_S);
1402                 he_writel(he_dev, 0x200f, G0_INMQ_L);
1403
1404                 he_writel(he_dev, 0x001f, G1_INMQ_S);
1405                 he_writel(he_dev, 0x201f, G1_INMQ_L);
1406
1407                 he_writel(he_dev, 0x002f, G2_INMQ_S);
1408                 he_writel(he_dev, 0x202f, G2_INMQ_L);
1409
1410                 he_writel(he_dev, 0x003f, G3_INMQ_S);
1411                 he_writel(he_dev, 0x203f, G3_INMQ_L);
1412
1413                 he_writel(he_dev, 0x004f, G4_INMQ_S);
1414                 he_writel(he_dev, 0x204f, G4_INMQ_L);
1415
1416                 he_writel(he_dev, 0x005f, G5_INMQ_S);
1417                 he_writel(he_dev, 0x205f, G5_INMQ_L);
1418
1419                 he_writel(he_dev, 0x006f, G6_INMQ_S);
1420                 he_writel(he_dev, 0x206f, G6_INMQ_L);
1421
1422                 he_writel(he_dev, 0x007f, G7_INMQ_S);
1423                 he_writel(he_dev, 0x207f, G7_INMQ_L);
1424         } else {
1425                 he_writel(he_dev, 0x0000, G0_INMQ_S);
1426                 he_writel(he_dev, 0x0008, G0_INMQ_L);
1427
1428                 he_writel(he_dev, 0x0001, G1_INMQ_S);
1429                 he_writel(he_dev, 0x0009, G1_INMQ_L);
1430
1431                 he_writel(he_dev, 0x0002, G2_INMQ_S);
1432                 he_writel(he_dev, 0x000a, G2_INMQ_L);
1433
1434                 he_writel(he_dev, 0x0003, G3_INMQ_S);
1435                 he_writel(he_dev, 0x000b, G3_INMQ_L);
1436
1437                 he_writel(he_dev, 0x0004, G4_INMQ_S);
1438                 he_writel(he_dev, 0x000c, G4_INMQ_L);
1439
1440                 he_writel(he_dev, 0x0005, G5_INMQ_S);
1441                 he_writel(he_dev, 0x000d, G5_INMQ_L);
1442
1443                 he_writel(he_dev, 0x0006, G6_INMQ_S);
1444                 he_writel(he_dev, 0x000e, G6_INMQ_L);
1445
1446                 he_writel(he_dev, 0x0007, G7_INMQ_S);
1447                 he_writel(he_dev, 0x000f, G7_INMQ_L);
1448         }
1449
1450         /* 5.1.6 application tunable parameters */
1451
1452         he_writel(he_dev, 0x0, MCC);
1453         he_writel(he_dev, 0x0, OEC);
1454         he_writel(he_dev, 0x0, DCC);
1455         he_writel(he_dev, 0x0, CEC);
1456         
1457         /* 5.1.7 cs block initialization */
1458
1459         he_init_cs_block(he_dev);
1460
1461         /* 5.1.8 cs block connection memory initialization */
1462         
1463         if (he_init_cs_block_rcm(he_dev) < 0)
1464                 return -ENOMEM;
1465
1466         /* 5.1.10 initialize host structures */
1467
1468         he_init_tpdrq(he_dev);
1469
1470 #ifdef USE_TPD_POOL
1471         he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1472                 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1473         if (he_dev->tpd_pool == NULL) {
1474                 hprintk("unable to create tpd pci_pool\n");
1475                 return -ENOMEM;         
1476         }
1477
1478         INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1479 #else
1480         he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1481                         CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1482         if (!he_dev->tpd_base)
1483                 return -ENOMEM;
1484
1485         for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1486                 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1487                 he_dev->tpd_base[i].inuse = 0;
1488         }
1489                 
1490         he_dev->tpd_head = he_dev->tpd_base;
1491         he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1492 #endif
1493
1494         if (he_init_group(he_dev, 0) != 0)
1495                 return -ENOMEM;
1496
1497         for (group = 1; group < HE_NUM_GROUPS; ++group) {
1498                 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1499                 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1500                 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1501                 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1502                                                 G0_RBPS_BS + (group * 32));
1503
1504                 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1505                 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1506                 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1507                                                 G0_RBPL_QI + (group * 32));
1508                 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1509
1510                 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1511                 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1512                 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1513                                                 G0_RBRQ_Q + (group * 16));
1514                 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1515
1516                 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1517                 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1518                 he_writel(he_dev, TBRQ_THRESH(0x1),
1519                                                 G0_TBRQ_THRESH + (group * 16));
1520                 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1521         }
1522
1523         /* host status page */
1524
1525         he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1526                                 sizeof(struct he_hsp), &he_dev->hsp_phys);
1527         if (he_dev->hsp == NULL) {
1528                 hprintk("failed to allocate host status page\n");
1529                 return -ENOMEM;
1530         }
1531         memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1532         he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1533
1534         /* initialize framer */
1535
1536 #ifdef CONFIG_ATM_HE_USE_SUNI
1537         suni_init(he_dev->atm_dev);
1538         if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1539                 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1540 #endif /* CONFIG_ATM_HE_USE_SUNI */
1541
1542         if (sdh) {
1543                 /* this really should be in suni.c but for now... */
1544                 int val;
1545
1546                 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1547                 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1548                 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1549         }
1550
1551         /* 5.1.12 enable transmit and receive */
1552
1553         reg = he_readl_mbox(he_dev, CS_ERCTL0);
1554         reg |= TX_ENABLE|ER_ENABLE;
1555         he_writel_mbox(he_dev, reg, CS_ERCTL0);
1556
1557         reg = he_readl(he_dev, RC_CONFIG);
1558         reg |= RX_ENABLE;
1559         he_writel(he_dev, reg, RC_CONFIG);
1560
1561         for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1562                 he_dev->cs_stper[i].inuse = 0;
1563                 he_dev->cs_stper[i].pcr = -1;
1564         }
1565         he_dev->total_bw = 0;
1566
1567
1568         /* atm linux initialization */
1569
1570         he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1571         he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1572
1573         he_dev->irq_peak = 0;
1574         he_dev->rbrq_peak = 0;
1575         he_dev->rbpl_peak = 0;
1576         he_dev->tbrq_peak = 0;
1577
1578         HPRINTK("hell bent for leather!\n");
1579
1580         return 0;
1581 }
1582
1583 static void
1584 he_stop(struct he_dev *he_dev)
1585 {
1586         u16 command;
1587         u32 gen_cntl_0, reg;
1588         struct pci_dev *pci_dev;
1589
1590         pci_dev = he_dev->pci_dev;
1591
1592         /* disable interrupts */
1593
1594         if (he_dev->membase) {
1595                 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1596                 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1597                 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1598
1599 #ifdef USE_TASKLET
1600                 tasklet_disable(&he_dev->tasklet);
1601 #endif
1602
1603                 /* disable recv and transmit */
1604
1605                 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1606                 reg &= ~(TX_ENABLE|ER_ENABLE);
1607                 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1608
1609                 reg = he_readl(he_dev, RC_CONFIG);
1610                 reg &= ~(RX_ENABLE);
1611                 he_writel(he_dev, reg, RC_CONFIG);
1612         }
1613
1614 #ifdef CONFIG_ATM_HE_USE_SUNI
1615         if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1616                 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1617 #endif /* CONFIG_ATM_HE_USE_SUNI */
1618
1619         if (he_dev->irq)
1620                 free_irq(he_dev->irq, he_dev);
1621
1622         if (he_dev->irq_base)
1623                 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1624                         * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1625
1626         if (he_dev->hsp)
1627                 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1628                                                 he_dev->hsp, he_dev->hsp_phys);
1629
1630         if (he_dev->rbpl_base) {
1631 #ifdef USE_RBPL_POOL
1632                 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1633                         void *cpuaddr = he_dev->rbpl_virt[i].virt;
1634                         dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1635
1636                         pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1637                 }
1638 #else
1639                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1640                         * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1641 #endif
1642                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1643                         * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1644         }
1645
1646 #ifdef USE_RBPL_POOL
1647         if (he_dev->rbpl_pool)
1648                 pci_pool_destroy(he_dev->rbpl_pool);
1649 #endif
1650
1651 #ifdef USE_RBPS
1652         if (he_dev->rbps_base) {
1653 #ifdef USE_RBPS_POOL
1654                 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1655                         void *cpuaddr = he_dev->rbps_virt[i].virt;
1656                         dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1657
1658                         pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1659                 }
1660 #else
1661                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1662                         * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1663 #endif
1664                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1665                         * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1666         }
1667
1668 #ifdef USE_RBPS_POOL
1669         if (he_dev->rbps_pool)
1670                 pci_pool_destroy(he_dev->rbps_pool);
1671 #endif
1672
1673 #endif /* USE_RBPS */
1674
1675         if (he_dev->rbrq_base)
1676                 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1677                                                         he_dev->rbrq_base, he_dev->rbrq_phys);
1678
1679         if (he_dev->tbrq_base)
1680                 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1681                                                         he_dev->tbrq_base, he_dev->tbrq_phys);
1682
1683         if (he_dev->tpdrq_base)
1684                 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1685                                                         he_dev->tpdrq_base, he_dev->tpdrq_phys);
1686
1687 #ifdef USE_TPD_POOL
1688         if (he_dev->tpd_pool)
1689                 pci_pool_destroy(he_dev->tpd_pool);
1690 #else
1691         if (he_dev->tpd_base)
1692                 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1693                                                         he_dev->tpd_base, he_dev->tpd_base_phys);
1694 #endif
1695
1696         if (he_dev->pci_dev) {
1697                 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1698                 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1699                 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1700         }
1701         
1702         if (he_dev->membase)
1703                 iounmap((void *) he_dev->membase);
1704 }
1705
1706 static struct he_tpd *
1707 __alloc_tpd(struct he_dev *he_dev)
1708 {
1709 #ifdef USE_TPD_POOL
1710         struct he_tpd *tpd;
1711         dma_addr_t dma_handle; 
1712
1713         tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);              
1714         if (tpd == NULL)
1715                 return NULL;
1716                         
1717         tpd->status = TPD_ADDR(dma_handle);
1718         tpd->reserved = 0; 
1719         tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1720         tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1721         tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1722
1723         return tpd;
1724 #else
1725         int i;
1726
1727         for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1728                 ++he_dev->tpd_head;
1729                 if (he_dev->tpd_head > he_dev->tpd_end) {
1730                         he_dev->tpd_head = he_dev->tpd_base;
1731                 }
1732
1733                 if (!he_dev->tpd_head->inuse) {
1734                         he_dev->tpd_head->inuse = 1;
1735                         he_dev->tpd_head->status &= TPD_MASK;
1736                         he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1737                         he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1738                         he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1739                         return he_dev->tpd_head;
1740                 }
1741         }
1742         hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1743         return NULL;
1744 #endif
1745 }
1746
1747 #define AAL5_LEN(buf,len)                                               \
1748                         ((((unsigned char *)(buf))[(len)-6] << 8) |     \
1749                                 (((unsigned char *)(buf))[(len)-5]))
1750
1751 /* 2.10.1.2 receive
1752  *
1753  * aal5 packets can optionally return the tcp checksum in the lower
1754  * 16 bits of the crc (RSR0_TCP_CKSUM)
1755  */
1756
1757 #define TCP_CKSUM(buf,len)                                              \
1758                         ((((unsigned char *)(buf))[(len)-2] << 8) |     \
1759                                 (((unsigned char *)(buf))[(len-1)]))
1760
1761 static int
1762 he_service_rbrq(struct he_dev *he_dev, int group)
1763 {
1764         struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1765                                 ((unsigned long)he_dev->rbrq_base |
1766                                         he_dev->hsp->group[group].rbrq_tail);
1767         struct he_rbp *rbp = NULL;
1768         unsigned cid, lastcid = -1;
1769         unsigned buf_len = 0;
1770         struct sk_buff *skb;
1771         struct atm_vcc *vcc = NULL;
1772         struct he_vcc *he_vcc;
1773         struct he_iovec *iov;
1774         int pdus_assembled = 0;
1775         int updated = 0;
1776
1777         read_lock(&vcc_sklist_lock);
1778         while (he_dev->rbrq_head != rbrq_tail) {
1779                 ++updated;
1780
1781                 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1782                         he_dev->rbrq_head, group,
1783                         RBRQ_ADDR(he_dev->rbrq_head),
1784                         RBRQ_BUFLEN(he_dev->rbrq_head),
1785                         RBRQ_CID(he_dev->rbrq_head),
1786                         RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1787                         RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1788                         RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1789                         RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1790                         RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1791                         RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1792
1793 #ifdef USE_RBPS
1794                 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1795                         rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1796                 else
1797 #endif
1798                         rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1799                 
1800                 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1801                 cid = RBRQ_CID(he_dev->rbrq_head);
1802
1803                 if (cid != lastcid)
1804                         vcc = __find_vcc(he_dev, cid);
1805                 lastcid = cid;
1806
1807                 if (vcc == NULL) {
1808                         hprintk("vcc == NULL  (cid 0x%x)\n", cid);
1809                         if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1810                                         rbp->status &= ~RBP_LOANED;
1811                                         
1812                         goto next_rbrq_entry;
1813                 }
1814
1815                 he_vcc = HE_VCC(vcc);
1816                 if (he_vcc == NULL) {
1817                         hprintk("he_vcc == NULL  (cid 0x%x)\n", cid);
1818                         if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1819                                         rbp->status &= ~RBP_LOANED;
1820                         goto next_rbrq_entry;
1821                 }
1822
1823                 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1824                         hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
1825                                 atomic_inc(&vcc->stats->rx_drop);
1826                         goto return_host_buffers;
1827                 }
1828
1829                 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1830                 he_vcc->iov_tail->iov_len = buf_len;
1831                 he_vcc->pdu_len += buf_len;
1832                 ++he_vcc->iov_tail;
1833
1834                 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1835                         lastcid = -1;
1836                         HPRINTK("wake_up rx_waitq  (cid 0x%x)\n", cid);
1837                         wake_up(&he_vcc->rx_waitq);
1838                         goto return_host_buffers;
1839                 }
1840
1841 #ifdef notdef
1842                 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1843                         hprintk("iovec full!  cid 0x%x\n", cid);
1844                         goto return_host_buffers;
1845                 }
1846 #endif
1847                 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1848                         goto next_rbrq_entry;
1849
1850                 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1851                                 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1852                         HPRINTK("%s%s (%d.%d)\n",
1853                                 RBRQ_CRC_ERR(he_dev->rbrq_head)
1854                                                         ? "CRC_ERR " : "",
1855                                 RBRQ_LEN_ERR(he_dev->rbrq_head)
1856                                                         ? "LEN_ERR" : "",
1857                                                         vcc->vpi, vcc->vci);
1858                         atomic_inc(&vcc->stats->rx_err);
1859                         goto return_host_buffers;
1860                 }
1861
1862                 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1863                                                         GFP_ATOMIC);
1864                 if (!skb) {
1865                         HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1866                         goto return_host_buffers;
1867                 }
1868
1869                 if (rx_skb_reserve > 0)
1870                         skb_reserve(skb, rx_skb_reserve);
1871
1872                 do_gettimeofday(&skb->stamp);
1873
1874                 for (iov = he_vcc->iov_head;
1875                                 iov < he_vcc->iov_tail; ++iov) {
1876 #ifdef USE_RBPS
1877                         if (iov->iov_base & RBP_SMALLBUF)
1878                                 memcpy(skb_put(skb, iov->iov_len),
1879                                         he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1880                         else
1881 #endif
1882                                 memcpy(skb_put(skb, iov->iov_len),
1883                                         he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1884                 }
1885
1886                 switch (vcc->qos.aal) {
1887                         case ATM_AAL0:
1888                                 /* 2.10.1.5 raw cell receive */
1889                                 skb->len = ATM_AAL0_SDU;
1890                                 skb->tail = skb->data + skb->len;
1891                                 break;
1892                         case ATM_AAL5:
1893                                 /* 2.10.1.2 aal5 receive */
1894
1895                                 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1896                                 skb->tail = skb->data + skb->len;
1897 #ifdef USE_CHECKSUM_HW
1898                                 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1899                                         skb->ip_summed = CHECKSUM_HW;
1900                                         skb->csum = TCP_CKSUM(skb->data,
1901                                                         he_vcc->pdu_len);
1902                                 }
1903 #endif
1904                                 break;
1905                 }
1906
1907 #ifdef should_never_happen
1908                 if (skb->len > vcc->qos.rxtp.max_sdu)
1909                         hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)!  cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1910 #endif
1911
1912 #ifdef notdef
1913                 ATM_SKB(skb)->vcc = vcc;
1914 #endif
1915                 vcc->push(vcc, skb);
1916
1917                 atomic_inc(&vcc->stats->rx);
1918
1919 return_host_buffers:
1920                 ++pdus_assembled;
1921
1922                 for (iov = he_vcc->iov_head;
1923                                 iov < he_vcc->iov_tail; ++iov) {
1924 #ifdef USE_RBPS
1925                         if (iov->iov_base & RBP_SMALLBUF)
1926                                 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1927                         else
1928 #endif
1929                                 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1930
1931                         rbp->status &= ~RBP_LOANED;
1932                 }
1933
1934                 he_vcc->iov_tail = he_vcc->iov_head;
1935                 he_vcc->pdu_len = 0;
1936
1937 next_rbrq_entry:
1938                 he_dev->rbrq_head = (struct he_rbrq *)
1939                                 ((unsigned long) he_dev->rbrq_base |
1940                                         RBRQ_MASK(++he_dev->rbrq_head));
1941
1942         }
1943         read_unlock(&vcc_sklist_lock);
1944
1945         if (updated) {
1946                 if (updated > he_dev->rbrq_peak)
1947                         he_dev->rbrq_peak = updated;
1948
1949                 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1950                                                 G0_RBRQ_H + (group * 16));
1951         }
1952
1953         return pdus_assembled;
1954 }
1955
1956 static void
1957 he_service_tbrq(struct he_dev *he_dev, int group)
1958 {
1959         struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1960                                 ((unsigned long)he_dev->tbrq_base |
1961                                         he_dev->hsp->group[group].tbrq_tail);
1962         struct he_tpd *tpd;
1963         int slot, updated = 0;
1964 #ifdef USE_TPD_POOL
1965         struct list_head *p;
1966 #endif
1967
1968         /* 2.1.6 transmit buffer return queue */
1969
1970         while (he_dev->tbrq_head != tbrq_tail) {
1971                 ++updated;
1972
1973                 HPRINTK("tbrq%d 0x%x%s%s\n",
1974                         group,
1975                         TBRQ_TPD(he_dev->tbrq_head), 
1976                         TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1977                         TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1978 #ifdef USE_TPD_POOL
1979                 tpd = NULL;
1980                 list_for_each(p, &he_dev->outstanding_tpds) {
1981                         struct he_tpd *__tpd = list_entry(p, struct he_tpd, entry);
1982                         if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1983                                 tpd = __tpd;
1984                                 list_del(&__tpd->entry);
1985                                 break;
1986                         }
1987                 }
1988
1989                 if (tpd == NULL) {
1990                         hprintk("unable to locate tpd for dma buffer %x\n",
1991                                                 TBRQ_TPD(he_dev->tbrq_head));
1992                         goto next_tbrq_entry;
1993                 }
1994 #else
1995                 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
1996 #endif
1997
1998                 if (TBRQ_EOS(he_dev->tbrq_head)) {
1999                         HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2000                                 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2001                         if (tpd->vcc)
2002                                 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2003
2004                         goto next_tbrq_entry;
2005                 }
2006
2007                 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2008                         if (tpd->iovec[slot].addr)
2009                                 pci_unmap_single(he_dev->pci_dev,
2010                                         tpd->iovec[slot].addr,
2011                                         tpd->iovec[slot].len & TPD_LEN_MASK,
2012                                                         PCI_DMA_TODEVICE);
2013                         if (tpd->iovec[slot].len & TPD_LST)
2014                                 break;
2015                                 
2016                 }
2017
2018                 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2019                         if (tpd->vcc && tpd->vcc->pop)
2020                                 tpd->vcc->pop(tpd->vcc, tpd->skb);
2021                         else
2022                                 dev_kfree_skb_any(tpd->skb);
2023                 }
2024
2025 next_tbrq_entry:
2026 #ifdef USE_TPD_POOL
2027                 if (tpd)
2028                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2029 #else
2030                 tpd->inuse = 0;
2031 #endif
2032                 he_dev->tbrq_head = (struct he_tbrq *)
2033                                 ((unsigned long) he_dev->tbrq_base |
2034                                         TBRQ_MASK(++he_dev->tbrq_head));
2035         }
2036
2037         if (updated) {
2038                 if (updated > he_dev->tbrq_peak)
2039                         he_dev->tbrq_peak = updated;
2040
2041                 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2042                                                 G0_TBRQ_H + (group * 16));
2043         }
2044 }
2045
2046
2047 static void
2048 he_service_rbpl(struct he_dev *he_dev, int group)
2049 {
2050         struct he_rbp *newtail;
2051         struct he_rbp *rbpl_head;
2052         int moved = 0;
2053
2054         rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2055                                         RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2056
2057         for (;;) {
2058                 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2059                                                 RBPL_MASK(he_dev->rbpl_tail+1));
2060
2061                 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2062                 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2063                         break;
2064
2065                 newtail->status |= RBP_LOANED;
2066                 he_dev->rbpl_tail = newtail;
2067                 ++moved;
2068         } 
2069
2070         if (moved)
2071                 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2072 }
2073
2074 #ifdef USE_RBPS
2075 static void
2076 he_service_rbps(struct he_dev *he_dev, int group)
2077 {
2078         struct he_rbp *newtail;
2079         struct he_rbp *rbps_head;
2080         int moved = 0;
2081
2082         rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2083                                         RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2084
2085         for (;;) {
2086                 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2087                                                 RBPS_MASK(he_dev->rbps_tail+1));
2088
2089                 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2090                 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2091                         break;
2092
2093                 newtail->status |= RBP_LOANED;
2094                 he_dev->rbps_tail = newtail;
2095                 ++moved;
2096         } 
2097
2098         if (moved)
2099                 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2100 }
2101 #endif /* USE_RBPS */
2102
2103 static void
2104 he_tasklet(unsigned long data)
2105 {
2106         unsigned long flags;
2107         struct he_dev *he_dev = (struct he_dev *) data;
2108         int group, type;
2109         int updated = 0;
2110
2111         HPRINTK("tasklet (0x%lx)\n", data);
2112 #ifdef USE_TASKLET
2113         spin_lock_irqsave(&he_dev->global_lock, flags);
2114 #endif
2115
2116         while (he_dev->irq_head != he_dev->irq_tail) {
2117                 ++updated;
2118
2119                 type = ITYPE_TYPE(he_dev->irq_head->isw);
2120                 group = ITYPE_GROUP(he_dev->irq_head->isw);
2121
2122                 switch (type) {
2123                         case ITYPE_RBRQ_THRESH:
2124                                 HPRINTK("rbrq%d threshold\n", group);
2125                                 /* fall through */
2126                         case ITYPE_RBRQ_TIMER:
2127                                 if (he_service_rbrq(he_dev, group)) {
2128                                         he_service_rbpl(he_dev, group);
2129 #ifdef USE_RBPS
2130                                         he_service_rbps(he_dev, group);
2131 #endif /* USE_RBPS */
2132                                 }
2133                                 break;
2134                         case ITYPE_TBRQ_THRESH:
2135                                 HPRINTK("tbrq%d threshold\n", group);
2136                                 /* fall through */
2137                         case ITYPE_TPD_COMPLETE:
2138                                 he_service_tbrq(he_dev, group);
2139                                 break;
2140                         case ITYPE_RBPL_THRESH:
2141                                 he_service_rbpl(he_dev, group);
2142                                 break;
2143                         case ITYPE_RBPS_THRESH:
2144 #ifdef USE_RBPS
2145                                 he_service_rbps(he_dev, group);
2146 #endif /* USE_RBPS */
2147                                 break;
2148                         case ITYPE_PHY:
2149                                 HPRINTK("phy interrupt\n");
2150 #ifdef CONFIG_ATM_HE_USE_SUNI
2151                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2152                                 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2153                                         he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2154                                 spin_lock_irqsave(&he_dev->global_lock, flags);
2155 #endif
2156                                 break;
2157                         case ITYPE_OTHER:
2158                                 switch (type|group) {
2159                                         case ITYPE_PARITY:
2160                                                 hprintk("parity error\n");
2161                                                 break;
2162                                         case ITYPE_ABORT:
2163                                                 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2164                                                 break;
2165                                 }
2166                                 break;
2167                         case ITYPE_TYPE(ITYPE_INVALID):
2168                                 /* see 8.1.1 -- check all queues */
2169
2170                                 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2171
2172                                 he_service_rbrq(he_dev, 0);
2173                                 he_service_rbpl(he_dev, 0);
2174 #ifdef USE_RBPS
2175                                 he_service_rbps(he_dev, 0);
2176 #endif /* USE_RBPS */
2177                                 he_service_tbrq(he_dev, 0);
2178                                 break;
2179                         default:
2180                                 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2181                 }
2182
2183                 he_dev->irq_head->isw = ITYPE_INVALID;
2184
2185                 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2186         }
2187
2188         if (updated) {
2189                 if (updated > he_dev->irq_peak)
2190                         he_dev->irq_peak = updated;
2191
2192                 he_writel(he_dev,
2193                         IRQ_SIZE(CONFIG_IRQ_SIZE) |
2194                         IRQ_THRESH(CONFIG_IRQ_THRESH) |
2195                         IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2196                 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2197         }
2198 #ifdef USE_TASKLET
2199         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2200 #endif
2201 }
2202
2203 static irqreturn_t
2204 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2205 {
2206         unsigned long flags;
2207         struct he_dev *he_dev = (struct he_dev * )dev_id;
2208         int handled = 0;
2209
2210         if (he_dev == NULL)
2211                 return IRQ_NONE;
2212
2213         spin_lock_irqsave(&he_dev->global_lock, flags);
2214
2215         he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2216                                                 (*he_dev->irq_tailoffset << 2));
2217
2218         if (he_dev->irq_tail == he_dev->irq_head) {
2219                 HPRINTK("tailoffset not updated?\n");
2220                 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2221                         ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2222                 (void) he_readl(he_dev, INT_FIFO);      /* 8.1.2 controller errata */
2223         }
2224
2225 #ifdef DEBUG
2226         if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2227                 hprintk("spurious (or shared) interrupt?\n");
2228 #endif
2229
2230         if (he_dev->irq_head != he_dev->irq_tail) {
2231                 handled = 1;
2232 #ifdef USE_TASKLET
2233                 tasklet_schedule(&he_dev->tasklet);
2234 #else
2235                 he_tasklet((unsigned long) he_dev);
2236 #endif
2237                 he_writel(he_dev, INT_CLEAR_A, INT_FIFO);       /* clear interrupt */
2238                 (void) he_readl(he_dev, INT_FIFO);              /* flush posted writes */
2239         }
2240         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2241         return IRQ_RETVAL(handled);
2242
2243 }
2244
2245 static __inline__ void
2246 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2247 {
2248         struct he_tpdrq *new_tail;
2249
2250         HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2251                                         tpd, cid, he_dev->tpdrq_tail);
2252
2253         /* new_tail = he_dev->tpdrq_tail; */
2254         new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2255                                         TPDRQ_MASK(he_dev->tpdrq_tail+1));
2256
2257         /*
2258          * check to see if we are about to set the tail == head
2259          * if true, update the head pointer from the adapter
2260          * to see if this is really the case (reading the queue
2261          * head for every enqueue would be unnecessarily slow)
2262          */
2263
2264         if (new_tail == he_dev->tpdrq_head) {
2265                 he_dev->tpdrq_head = (struct he_tpdrq *)
2266                         (((unsigned long)he_dev->tpdrq_base) |
2267                                 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2268
2269                 if (new_tail == he_dev->tpdrq_head) {
2270                         hprintk("tpdrq full (cid 0x%x)\n", cid);
2271                         /*
2272                          * FIXME
2273                          * push tpd onto a transmit backlog queue
2274                          * after service_tbrq, service the backlog
2275                          * for now, we just drop the pdu
2276                          */
2277                         if (tpd->skb) {
2278                                 if (tpd->vcc->pop)
2279                                         tpd->vcc->pop(tpd->vcc, tpd->skb);
2280                                 else
2281                                         dev_kfree_skb_any(tpd->skb);
2282                                 atomic_inc(&tpd->vcc->stats->tx_err);
2283                         }
2284 #ifdef USE_TPD_POOL
2285                         pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2286 #else
2287                         tpd->inuse = 0;
2288 #endif
2289                         return;
2290                 }
2291         }
2292
2293         /* 2.1.5 transmit packet descriptor ready queue */
2294 #ifdef USE_TPD_POOL
2295         list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2296         he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2297 #else
2298         he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2299                                 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2300 #endif
2301         he_dev->tpdrq_tail->cid = cid;
2302         wmb();
2303
2304         he_dev->tpdrq_tail = new_tail;
2305
2306         he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2307         (void) he_readl(he_dev, TPDRQ_T);               /* flush posted writes */
2308 }
2309
2310 static int
2311 he_open(struct atm_vcc *vcc)
2312 {
2313         unsigned long flags;
2314         struct he_dev *he_dev = HE_DEV(vcc->dev);
2315         struct he_vcc *he_vcc;
2316         int err = 0;
2317         unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2318         short vpi = vcc->vpi;
2319         int vci = vcc->vci;
2320
2321         if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2322                 return 0;
2323
2324         HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2325
2326         set_bit(ATM_VF_ADDR, &vcc->flags);
2327
2328         cid = he_mkcid(he_dev, vpi, vci);
2329
2330         he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2331         if (he_vcc == NULL) {
2332                 hprintk("unable to allocate he_vcc during open\n");
2333                 return -ENOMEM;
2334         }
2335
2336         he_vcc->iov_tail = he_vcc->iov_head;
2337         he_vcc->pdu_len = 0;
2338         he_vcc->rc_index = -1;
2339
2340         init_waitqueue_head(&he_vcc->rx_waitq);
2341         init_waitqueue_head(&he_vcc->tx_waitq);
2342
2343         vcc->dev_data = he_vcc;
2344
2345         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2346                 int pcr_goal;
2347
2348                 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2349                 if (pcr_goal == 0)
2350                         pcr_goal = he_dev->atm_dev->link_rate;
2351                 if (pcr_goal < 0)       /* means round down, technically */
2352                         pcr_goal = -pcr_goal;
2353
2354                 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2355
2356                 switch (vcc->qos.aal) {
2357                         case ATM_AAL5:
2358                                 tsr0_aal = TSR0_AAL5;
2359                                 tsr4 = TSR4_AAL5;
2360                                 break;
2361                         case ATM_AAL0:
2362                                 tsr0_aal = TSR0_AAL0_SDU;
2363                                 tsr4 = TSR4_AAL0_SDU;
2364                                 break;
2365                         default:
2366                                 err = -EINVAL;
2367                                 goto open_failed;
2368                 }
2369
2370                 spin_lock_irqsave(&he_dev->global_lock, flags);
2371                 tsr0 = he_readl_tsr0(he_dev, cid);
2372                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2373
2374                 if (TSR0_CONN_STATE(tsr0) != 0) {
2375                         hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2376                         err = -EBUSY;
2377                         goto open_failed;
2378                 }
2379
2380                 switch (vcc->qos.txtp.traffic_class) {
2381                         case ATM_UBR:
2382                                 /* 2.3.3.1 open connection ubr */
2383
2384                                 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2385                                         TSR0_USE_WMIN | TSR0_UPDATE_GER;
2386                                 break;
2387
2388                         case ATM_CBR:
2389                                 /* 2.3.3.2 open connection cbr */
2390
2391                                 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2392                                 if ((he_dev->total_bw + pcr_goal)
2393                                         > (he_dev->atm_dev->link_rate * 9 / 10))
2394                                 {
2395                                         err = -EBUSY;
2396                                         goto open_failed;
2397                                 }
2398
2399                                 spin_lock_irqsave(&he_dev->global_lock, flags);                 /* also protects he_dev->cs_stper[] */
2400
2401                                 /* find an unused cs_stper register */
2402                                 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2403                                         if (he_dev->cs_stper[reg].inuse == 0 || 
2404                                             he_dev->cs_stper[reg].pcr == pcr_goal)
2405                                                         break;
2406
2407                                 if (reg == HE_NUM_CS_STPER) {
2408                                         err = -EBUSY;
2409                                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2410                                         goto open_failed;
2411                                 }
2412
2413                                 he_dev->total_bw += pcr_goal;
2414
2415                                 he_vcc->rc_index = reg;
2416                                 ++he_dev->cs_stper[reg].inuse;
2417                                 he_dev->cs_stper[reg].pcr = pcr_goal;
2418
2419                                 clock = he_is622(he_dev) ? 66667000 : 50000000;
2420                                 period = clock / pcr_goal;
2421                                 
2422                                 HPRINTK("rc_index = %d period = %d\n",
2423                                                                 reg, period);
2424
2425                                 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2426                                                         CS_STPER0 + reg);
2427                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2428
2429                                 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2430                                                         TSR0_RC_INDEX(reg);
2431
2432                                 break;
2433                         default:
2434                                 err = -EINVAL;
2435                                 goto open_failed;
2436                 }
2437
2438                 spin_lock_irqsave(&he_dev->global_lock, flags);
2439
2440                 he_writel_tsr0(he_dev, tsr0, cid);
2441                 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2442                 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2443                                         TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2444                 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2445                 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2446
2447                 he_writel_tsr3(he_dev, 0x0, cid);
2448                 he_writel_tsr5(he_dev, 0x0, cid);
2449                 he_writel_tsr6(he_dev, 0x0, cid);
2450                 he_writel_tsr7(he_dev, 0x0, cid);
2451                 he_writel_tsr8(he_dev, 0x0, cid);
2452                 he_writel_tsr10(he_dev, 0x0, cid);
2453                 he_writel_tsr11(he_dev, 0x0, cid);
2454                 he_writel_tsr12(he_dev, 0x0, cid);
2455                 he_writel_tsr13(he_dev, 0x0, cid);
2456                 he_writel_tsr14(he_dev, 0x0, cid);
2457                 (void) he_readl_tsr0(he_dev, cid);              /* flush posted writes */
2458                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2459         }
2460
2461         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2462                 unsigned aal;
2463
2464                 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2465                                                 &HE_VCC(vcc)->rx_waitq);
2466
2467                 switch (vcc->qos.aal) {
2468                         case ATM_AAL5:
2469                                 aal = RSR0_AAL5;
2470                                 break;
2471                         case ATM_AAL0:
2472                                 aal = RSR0_RAWCELL;
2473                                 break;
2474                         default:
2475                                 err = -EINVAL;
2476                                 goto open_failed;
2477                 }
2478
2479                 spin_lock_irqsave(&he_dev->global_lock, flags);
2480
2481                 rsr0 = he_readl_rsr0(he_dev, cid);
2482                 if (rsr0 & RSR0_OPEN_CONN) {
2483                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2484
2485                         hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2486                         err = -EBUSY;
2487                         goto open_failed;
2488                 }
2489
2490 #ifdef USE_RBPS
2491                 rsr1 = RSR1_GROUP(0);
2492                 rsr4 = RSR4_GROUP(0);
2493 #else /* !USE_RBPS */
2494                 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2495                 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2496 #endif /* USE_RBPS */
2497                 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 
2498                                 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2499
2500 #ifdef USE_CHECKSUM_HW
2501                 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2502                         rsr0 |= RSR0_TCP_CKSUM;
2503 #endif
2504
2505                 he_writel_rsr4(he_dev, rsr4, cid);
2506                 he_writel_rsr1(he_dev, rsr1, cid);
2507                 /* 5.1.11 last parameter initialized should be
2508                           the open/closed indication in rsr0 */
2509                 he_writel_rsr0(he_dev,
2510                         rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2511                 (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2512
2513                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2514         }
2515
2516 open_failed:
2517
2518         if (err) {
2519                 if (he_vcc)
2520                         kfree(he_vcc);
2521                 clear_bit(ATM_VF_ADDR, &vcc->flags);
2522         }
2523         else
2524                 set_bit(ATM_VF_READY, &vcc->flags);
2525
2526         return err;
2527 }
2528
2529 static void
2530 he_close(struct atm_vcc *vcc)
2531 {
2532         unsigned long flags;
2533         DECLARE_WAITQUEUE(wait, current);
2534         struct he_dev *he_dev = HE_DEV(vcc->dev);
2535         struct he_tpd *tpd;
2536         unsigned cid;
2537         struct he_vcc *he_vcc = HE_VCC(vcc);
2538 #define MAX_RETRY 30
2539         int retry = 0, sleep = 1, tx_inuse;
2540
2541         HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2542
2543         clear_bit(ATM_VF_READY, &vcc->flags);
2544         cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2545
2546         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2547                 int timeout;
2548
2549                 HPRINTK("close rx cid 0x%x\n", cid);
2550
2551                 /* 2.7.2.2 close receive operation */
2552
2553                 /* wait for previous close (if any) to finish */
2554
2555                 spin_lock_irqsave(&he_dev->global_lock, flags);
2556                 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2557                         HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2558                         udelay(250);
2559                 }
2560
2561                 add_wait_queue(&he_vcc->rx_waitq, &wait);
2562                 set_current_state(TASK_UNINTERRUPTIBLE);
2563
2564                 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2565                 (void) he_readl_rsr0(he_dev, cid);              /* flush posted writes */
2566                 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2567                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2568
2569                 timeout = schedule_timeout(30*HZ);
2570
2571                 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2572                 set_current_state(TASK_RUNNING);
2573
2574                 if (timeout == 0)
2575                         hprintk("close rx timeout cid 0x%x\n", cid);
2576
2577                 HPRINTK("close rx cid 0x%x complete\n", cid);
2578
2579         }
2580
2581         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2582                 volatile unsigned tsr4, tsr0;
2583                 int timeout;
2584
2585                 HPRINTK("close tx cid 0x%x\n", cid);
2586                 
2587                 /* 2.1.2
2588                  *
2589                  * ... the host must first stop queueing packets to the TPDRQ
2590                  * on the connection to be closed, then wait for all outstanding
2591                  * packets to be transmitted and their buffers returned to the
2592                  * TBRQ. When the last packet on the connection arrives in the
2593                  * TBRQ, the host issues the close command to the adapter.
2594                  */
2595
2596                 while (((tx_inuse = atomic_read(&vcc->sk->sk_wmem_alloc)) > 0) &&
2597                        (retry < MAX_RETRY)) {
2598                         set_current_state(TASK_UNINTERRUPTIBLE);
2599                         (void) schedule_timeout(sleep);
2600                         if (sleep < HZ)
2601                                 sleep = sleep * 2;
2602
2603                         ++retry;
2604                 }
2605
2606                 if (tx_inuse)
2607                         hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2608
2609                 /* 2.3.1.1 generic close operations with flush */
2610
2611                 spin_lock_irqsave(&he_dev->global_lock, flags);
2612                 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2613                                         /* also clears TSR4_SESSION_ENDED */
2614
2615                 switch (vcc->qos.txtp.traffic_class) {
2616                         case ATM_UBR:
2617                                 he_writel_tsr1(he_dev, 
2618                                         TSR1_MCR(rate_to_atmf(200000))
2619                                         | TSR1_PCR(0), cid);
2620                                 break;
2621                         case ATM_CBR:
2622                                 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2623                                 break;
2624                 }
2625                 (void) he_readl_tsr4(he_dev, cid);              /* flush posted writes */
2626
2627                 tpd = __alloc_tpd(he_dev);
2628                 if (tpd == NULL) {
2629                         hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2630                         goto close_tx_incomplete;
2631                 }
2632                 tpd->status |= TPD_EOS | TPD_INT;
2633                 tpd->skb = NULL;
2634                 tpd->vcc = vcc;
2635                 wmb();
2636
2637                 add_wait_queue(&he_vcc->tx_waitq, &wait);
2638                 set_current_state(TASK_UNINTERRUPTIBLE);
2639                 __enqueue_tpd(he_dev, tpd, cid);
2640                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2641
2642                 timeout = schedule_timeout(30*HZ);
2643
2644                 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2645                 set_current_state(TASK_RUNNING);
2646
2647                 spin_lock_irqsave(&he_dev->global_lock, flags);
2648
2649                 if (timeout == 0) {
2650                         hprintk("close tx timeout cid 0x%x\n", cid);
2651                         goto close_tx_incomplete;
2652                 }
2653
2654                 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2655                         HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2656                         udelay(250);
2657                 }
2658
2659                 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2660                         HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2661                         udelay(250);
2662                 }
2663
2664 close_tx_incomplete:
2665
2666                 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2667                         int reg = he_vcc->rc_index;
2668
2669                         HPRINTK("cs_stper reg = %d\n", reg);
2670
2671                         if (he_dev->cs_stper[reg].inuse == 0)
2672                                 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2673                         else
2674                                 --he_dev->cs_stper[reg].inuse;
2675
2676                         he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2677                 }
2678                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2679
2680                 HPRINTK("close tx cid 0x%x complete\n", cid);
2681         }
2682
2683         kfree(he_vcc);
2684
2685         clear_bit(ATM_VF_ADDR, &vcc->flags);
2686 }
2687
2688 static int
2689 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2690 {
2691         unsigned long flags;
2692         struct he_dev *he_dev = HE_DEV(vcc->dev);
2693         unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2694         struct he_tpd *tpd;
2695 #ifdef USE_SCATTERGATHER
2696         int i, slot = 0;
2697 #endif
2698
2699 #define HE_TPD_BUFSIZE 0xffff
2700
2701         HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2702
2703         if ((skb->len > HE_TPD_BUFSIZE) ||
2704             ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2705                 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2706                 if (vcc->pop)
2707                         vcc->pop(vcc, skb);
2708                 else
2709                         dev_kfree_skb_any(skb);
2710                 atomic_inc(&vcc->stats->tx_err);
2711                 return -EINVAL;
2712         }
2713
2714 #ifndef USE_SCATTERGATHER
2715         if (skb_shinfo(skb)->nr_frags) {
2716                 hprintk("no scatter/gather support\n");
2717                 if (vcc->pop)
2718                         vcc->pop(vcc, skb);
2719                 else
2720                         dev_kfree_skb_any(skb);
2721                 atomic_inc(&vcc->stats->tx_err);
2722                 return -EINVAL;
2723         }
2724 #endif
2725         spin_lock_irqsave(&he_dev->global_lock, flags);
2726
2727         tpd = __alloc_tpd(he_dev);
2728         if (tpd == NULL) {
2729                 if (vcc->pop)
2730                         vcc->pop(vcc, skb);
2731                 else
2732                         dev_kfree_skb_any(skb);
2733                 atomic_inc(&vcc->stats->tx_err);
2734                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2735                 return -ENOMEM;
2736         }
2737
2738         if (vcc->qos.aal == ATM_AAL5)
2739                 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2740         else {
2741                 char *pti_clp = (void *) (skb->data + 3);
2742                 int clp, pti;
2743
2744                 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 
2745                 clp = (*pti_clp & ATM_HDR_CLP);
2746                 tpd->status |= TPD_CELLTYPE(pti);
2747                 if (clp)
2748                         tpd->status |= TPD_CLP;
2749
2750                 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2751         }
2752
2753 #ifdef USE_SCATTERGATHER
2754         tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2755                                 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2756         tpd->iovec[slot].len = skb->len - skb->data_len;
2757         ++slot;
2758
2759         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2760                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2761
2762                 if (slot == TPD_MAXIOV) {       /* queue tpd; start new tpd */
2763                         tpd->vcc = vcc;
2764                         tpd->skb = NULL;        /* not the last fragment
2765                                                    so dont ->push() yet */
2766                         wmb();
2767
2768                         __enqueue_tpd(he_dev, tpd, cid);
2769                         tpd = __alloc_tpd(he_dev);
2770                         if (tpd == NULL) {
2771                                 if (vcc->pop)
2772                                         vcc->pop(vcc, skb);
2773                                 else
2774                                         dev_kfree_skb_any(skb);
2775                                 atomic_inc(&vcc->stats->tx_err);
2776                                 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2777                                 return -ENOMEM;
2778                         }
2779                         tpd->status |= TPD_USERCELL;
2780                         slot = 0;
2781                 }
2782
2783                 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2784                         (void *) page_address(frag->page) + frag->page_offset,
2785                                 frag->size, PCI_DMA_TODEVICE);
2786                 tpd->iovec[slot].len = frag->size;
2787                 ++slot;
2788
2789         }
2790
2791         tpd->iovec[slot - 1].len |= TPD_LST;
2792 #else
2793         tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2794         tpd->length0 = skb->len | TPD_LST;
2795 #endif
2796         tpd->status |= TPD_INT;
2797
2798         tpd->vcc = vcc;
2799         tpd->skb = skb;
2800         wmb();
2801         ATM_SKB(skb)->vcc = vcc;
2802
2803         __enqueue_tpd(he_dev, tpd, cid);
2804         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2805
2806         atomic_inc(&vcc->stats->tx);
2807
2808         return 0;
2809 }
2810
2811 static int
2812 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2813 {
2814         unsigned long flags;
2815         struct he_dev *he_dev = HE_DEV(atm_dev);
2816         struct he_ioctl_reg reg;
2817         int err = 0;
2818
2819         switch (cmd) {
2820                 case HE_GET_REG:
2821                         if (!capable(CAP_NET_ADMIN))
2822                                 return -EPERM;
2823
2824                         if (copy_from_user(&reg, arg,
2825                                            sizeof(struct he_ioctl_reg)))
2826                                 return -EFAULT;
2827                         
2828                         spin_lock_irqsave(&he_dev->global_lock, flags);
2829                         switch (reg.type) {
2830                                 case HE_REGTYPE_PCI:
2831                                         reg.val = he_readl(he_dev, reg.addr);
2832                                         break;
2833                                 case HE_REGTYPE_RCM:
2834                                         reg.val =
2835                                                 he_readl_rcm(he_dev, reg.addr);
2836                                         break;
2837                                 case HE_REGTYPE_TCM:
2838                                         reg.val =
2839                                                 he_readl_tcm(he_dev, reg.addr);
2840                                         break;
2841                                 case HE_REGTYPE_MBOX:
2842                                         reg.val =
2843                                                 he_readl_mbox(he_dev, reg.addr);
2844                                         break;
2845                                 default:
2846                                         err = -EINVAL;
2847                                         break;
2848                         }
2849                         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2850                         if (err == 0)
2851                                 if (copy_to_user(arg, &reg,
2852                                                         sizeof(struct he_ioctl_reg)))
2853                                         return -EFAULT;
2854                         break;
2855                 default:
2856 #ifdef CONFIG_ATM_HE_USE_SUNI
2857                         if (atm_dev->phy && atm_dev->phy->ioctl)
2858                                 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2859 #else /* CONFIG_ATM_HE_USE_SUNI */
2860                         err = -EINVAL;
2861 #endif /* CONFIG_ATM_HE_USE_SUNI */
2862                         break;
2863         }
2864
2865         return err;
2866 }
2867
2868 static void
2869 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2870 {
2871         unsigned long flags;
2872         struct he_dev *he_dev = HE_DEV(atm_dev);
2873
2874         HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2875
2876         spin_lock_irqsave(&he_dev->global_lock, flags);
2877         he_writel(he_dev, val, FRAMER + (addr*4));
2878         (void) he_readl(he_dev, FRAMER + (addr*4));             /* flush posted writes */
2879         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2880 }
2881  
2882         
2883 static unsigned char
2884 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2885
2886         unsigned long flags;
2887         struct he_dev *he_dev = HE_DEV(atm_dev);
2888         unsigned reg;
2889
2890         spin_lock_irqsave(&he_dev->global_lock, flags);
2891         reg = he_readl(he_dev, FRAMER + (addr*4));
2892         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2893
2894         HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2895         return reg;
2896 }
2897
2898 static int
2899 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2900 {
2901         unsigned long flags;
2902         struct he_dev *he_dev = HE_DEV(dev);
2903         int left, i;
2904 #ifdef notdef
2905         struct he_rbrq *rbrq_tail;
2906         struct he_tpdrq *tpdrq_head;
2907         int rbpl_head, rbpl_tail;
2908 #endif
2909         static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2910
2911
2912         left = *pos;
2913         if (!left--)
2914                 return sprintf(page, "%s\n", version);
2915
2916         if (!left--)
2917                 return sprintf(page, "%s%s\n\n",
2918                         he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2919
2920         if (!left--)
2921                 return sprintf(page, "Mismatched Cells  VPI/VCI Not Open  Dropped Cells  RCM Dropped Cells\n");
2922
2923         spin_lock_irqsave(&he_dev->global_lock, flags);
2924         mcc += he_readl(he_dev, MCC);
2925         oec += he_readl(he_dev, OEC);
2926         dcc += he_readl(he_dev, DCC);
2927         cec += he_readl(he_dev, CEC);
2928         spin_unlock_irqrestore(&he_dev->global_lock, flags);
2929
2930         if (!left--)
2931                 return sprintf(page, "%16ld  %16ld  %13ld  %17ld\n\n", 
2932                                                         mcc, oec, dcc, cec);
2933
2934         if (!left--)
2935                 return sprintf(page, "irq_size = %d  inuse = ?  peak = %d\n",
2936                                 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2937
2938         if (!left--)
2939                 return sprintf(page, "tpdrq_size = %d  inuse = ?\n",
2940                                                 CONFIG_TPDRQ_SIZE);
2941
2942         if (!left--)
2943                 return sprintf(page, "rbrq_size = %d  inuse = ?  peak = %d\n",
2944                                 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2945
2946         if (!left--)
2947                 return sprintf(page, "tbrq_size = %d  peak = %d\n",
2948                                         CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2949
2950
2951 #ifdef notdef
2952         rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2953         rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2954
2955         inuse = rbpl_head - rbpl_tail;
2956         if (inuse < 0)
2957                 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2958         inuse /= sizeof(struct he_rbp);
2959
2960         if (!left--)
2961                 return sprintf(page, "rbpl_size = %d  inuse = %d\n\n",
2962                                                 CONFIG_RBPL_SIZE, inuse);
2963 #endif
2964
2965         if (!left--)
2966                 return sprintf(page, "rate controller periods (cbr)\n                 pcr  #vc\n");
2967
2968         for (i = 0; i < HE_NUM_CS_STPER; ++i)
2969                 if (!left--)
2970                         return sprintf(page, "cs_stper%-2d  %8ld  %3d\n", i,
2971                                                 he_dev->cs_stper[i].pcr,
2972                                                 he_dev->cs_stper[i].inuse);
2973
2974         if (!left--)
2975                 return sprintf(page, "total bw (cbr): %d  (limit %d)\n",
2976                         he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2977
2978         return 0;
2979 }
2980
2981 /* eeprom routines  -- see 4.7 */
2982
2983 u8
2984 read_prom_byte(struct he_dev *he_dev, int addr)
2985 {
2986         u32 val = 0, tmp_read = 0;
2987         int i, j = 0;
2988         u8 byte_read = 0;
2989
2990         val = readl(he_dev->membase + HOST_CNTL);
2991         val &= 0xFFFFE0FF;
2992        
2993         /* Turn on write enable */
2994         val |= 0x800;
2995         he_writel(he_dev, val, HOST_CNTL);
2996        
2997         /* Send READ instruction */
2998         for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
2999                 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3000                 udelay(EEPROM_DELAY);
3001         }
3002        
3003         /* Next, we need to send the byte address to read from */
3004         for (i = 7; i >= 0; i--) {
3005                 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3006                 udelay(EEPROM_DELAY);
3007                 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3008                 udelay(EEPROM_DELAY);
3009         }
3010        
3011         j = 0;
3012
3013         val &= 0xFFFFF7FF;      /* Turn off write enable */
3014         he_writel(he_dev, val, HOST_CNTL);
3015        
3016         /* Now, we can read data from the EEPROM by clocking it in */
3017         for (i = 7; i >= 0; i--) {
3018                 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3019                 udelay(EEPROM_DELAY);
3020                 tmp_read = he_readl(he_dev, HOST_CNTL);
3021                 byte_read |= (unsigned char)
3022                            ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3023                 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3024                 udelay(EEPROM_DELAY);
3025         }
3026        
3027         he_writel(he_dev, val | ID_CS, HOST_CNTL);
3028         udelay(EEPROM_DELAY);
3029
3030         return byte_read;
3031 }
3032
3033 MODULE_LICENSE("GPL");
3034 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3035 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3036 MODULE_PARM(disable64, "h");
3037 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3038 MODULE_PARM(nvpibits, "i");
3039 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3040 MODULE_PARM(nvcibits, "i");
3041 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3042 MODULE_PARM(rx_skb_reserve, "i");
3043 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3044 MODULE_PARM(irq_coalesce, "i");
3045 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3046 MODULE_PARM(sdh, "i");
3047 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3048
3049 static struct pci_device_id he_pci_tbl[] = {
3050         { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3051           0, 0, 0 },
3052         { 0, }
3053 };
3054
3055 static struct pci_driver he_driver = {
3056         .name =         "he",
3057         .probe =        he_init_one,
3058         .remove =       __devexit_p(he_remove_one),
3059         .id_table =     he_pci_tbl,
3060 };
3061
3062 static int __init he_init(void)
3063 {
3064         return pci_module_init(&he_driver);
3065 }
3066
3067 static void __exit he_cleanup(void)
3068 {
3069         pci_unregister_driver(&he_driver);
3070 }
3071
3072 module_init(he_init);
3073 module_exit(he_cleanup);